summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0
parent36289a03bcd3aabdf66de75cb6d1b4ee15726438 (diff)
parentd1fabc68f8e0541d41657096dc713cb01775652d (diff)
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-peak_usb19
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt1
-rw-r--r--Documentation/admin-guide/sysctl/net.rst6
-rw-r--r--Documentation/bpf/bpf_design_QA.rst25
-rw-r--r--Documentation/bpf/cpumasks.rst393
-rw-r--r--Documentation/bpf/graph_ds_impl.rst267
-rw-r--r--Documentation/bpf/index.rst1
-rw-r--r--Documentation/bpf/instruction-set.rst136
-rw-r--r--Documentation/bpf/kfuncs.rst219
-rw-r--r--Documentation/bpf/libbpf/libbpf_naming_convention.rst6
-rw-r--r--Documentation/bpf/map_sockmap.rst498
-rw-r--r--Documentation/bpf/map_xskmap.rst2
-rw-r--r--Documentation/bpf/other.rst3
-rw-r--r--Documentation/bpf/ringbuf.rst4
-rw-r--r--Documentation/bpf/verifier.rst297
-rw-r--r--Documentation/conf.py3
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/netlink.rst101
-rw-r--r--Documentation/core-api/packing.rst2
-rw-r--r--Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/amlogic,g12a-mdio-mux.yaml80
-rw-r--r--Documentation/devicetree/bindings/net/amlogic,gxl-mdio-mux.yaml64
-rw-r--r--Documentation/devicetree/bindings/net/asix,ax88796c.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml16
-rw-r--r--Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml15
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa-port.yaml30
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.yaml49
-rw-r--r--Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml58
-rw-r--r--Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca8k.yaml14
-rw-r--r--Documentation/devicetree/bindings/net/dsa/realtek.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-switch-port.yaml26
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-switch.yaml62
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fec.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/maxlinear,gpy2xx.yaml47
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt48
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz90x1.txt1
-rw-r--r--Documentation/devicetree/bindings/net/motorcomm,yt8xxx.yaml117
-rw-r--r--Documentation/devicetree/bindings/net/mscc,vsc7514-switch.yaml140
-rw-r--r--Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/rfkill-gpio.yaml51
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml33
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/wireless/esp,esp8089.yaml20
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ieee80211.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt4
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml12
-rw-r--r--Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wlcore.yaml70
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml4
-rw-r--r--Documentation/isdn/interface_capi.rst2
-rw-r--r--Documentation/isdn/m_isdn.rst2
-rw-r--r--Documentation/netlink/genetlink-c.yaml331
-rw-r--r--Documentation/netlink/genetlink-legacy.yaml361
-rw-r--r--Documentation/netlink/genetlink.yaml296
-rw-r--r--Documentation/netlink/specs/ethtool.yaml397
-rw-r--r--Documentation/netlink/specs/fou.yaml128
-rw-r--r--Documentation/netlink/specs/netdev.yaml100
-rw-r--r--Documentation/networking/af_xdp.rst4
-rw-r--r--Documentation/networking/arcnet-hardware.rst2
-rw-r--r--Documentation/networking/batman-adv.rst2
-rw-r--r--Documentation/networking/can.rst2
-rw-r--r--Documentation/networking/can_ucan_protocol.rst2
-rw-r--r--Documentation/networking/cdc_mbim.rst2
-rw-r--r--Documentation/networking/device_drivers/atm/iphase.rst2
-rw-r--r--Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst4
-rw-r--r--Documentation/networking/device_drivers/can/ctu/fsm_txt_buffer_user.svg4
-rw-r--r--Documentation/networking/device_drivers/ethernet/3com/vortex.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst6
-rw-r--r--Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ice.rst16
-rw-r--r--Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst746
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst1302
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst224
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst26
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst168
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst239
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/tracepoints.rst229
-rw-r--r--Documentation/networking/device_drivers/ethernet/pensando/ionic.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/ti/am65_nuss_cpsw_switchdev.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/ti/cpsw_switchdev.rst2
-rw-r--r--Documentation/networking/device_drivers/wwan/iosm.rst2
-rw-r--r--Documentation/networking/devlink/devlink-health.rst23
-rw-r--r--Documentation/networking/devlink/ice.rst4
-rw-r--r--Documentation/networking/devlink/index.rst1
-rw-r--r--Documentation/networking/devlink/mlx5.rst18
-rw-r--r--Documentation/networking/devlink/netdevsim.rst2
-rw-r--r--Documentation/networking/devlink/prestera.rst2
-rw-r--r--Documentation/networking/devlink/sfc.rst57
-rw-r--r--Documentation/networking/dsa/configuration.rst2
-rw-r--r--Documentation/networking/ethtool-netlink.rst272
-rw-r--r--Documentation/networking/gtp.rst2
-rw-r--r--Documentation/networking/ieee802154.rst2
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/ip-sysctl.rst17
-rw-r--r--Documentation/networking/ipvlan.rst2
-rw-r--r--Documentation/networking/j1939.rst2
-rw-r--r--Documentation/networking/net_failover.rst2
-rw-r--r--Documentation/networking/netconsole.rst2
-rw-r--r--Documentation/networking/page_pool.rst6
-rw-r--r--Documentation/networking/phonet.rst2
-rw-r--r--Documentation/networking/phy.rst2
-rw-r--r--Documentation/networking/regulatory.rst4
-rw-r--r--Documentation/networking/rxrpc.rst2
-rw-r--r--Documentation/networking/snmp_counter.rst4
-rw-r--r--Documentation/networking/statistics.rst1
-rw-r--r--Documentation/networking/sysfs-tagging.rst2
-rw-r--r--Documentation/networking/xdp-rx-metadata.rst110
-rw-r--r--Documentation/networking/xfrm_device.rst4
-rw-r--r--Documentation/userspace-api/netlink/c-code-gen.rst107
-rw-r--r--Documentation/userspace-api/netlink/genetlink-legacy.rst178
-rw-r--r--Documentation/userspace-api/netlink/index.rst6
-rw-r--r--Documentation/userspace-api/netlink/intro-specs.rst80
-rw-r--r--Documentation/userspace-api/netlink/specs.rst425
-rw-r--r--MAINTAINERS55
-rw-r--r--arch/arm/include/asm/checksum.h1
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts78
-rw-r--r--arch/arm64/boot/dts/freescale/imx93.dtsi48
-rw-r--r--arch/loongarch/net/bpf_jit.c2
-rw-r--r--arch/loongarch/net/bpf_jit.h21
-rw-r--r--arch/riscv/include/asm/patch.h2
-rw-r--r--arch/riscv/kernel/patch.c19
-rw-r--r--arch/riscv/kernel/probes/kprobes.c15
-rw-r--r--arch/riscv/net/bpf_jit.h5
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c435
-rw-r--r--arch/s390/net/bpf_jit_comp.c715
-rw-r--r--arch/sh/include/asm/checksum_32.h1
-rw-r--r--arch/x86/include/asm/checksum_64.h1
-rw-r--r--arch/x86/net/bpf_jit_comp.c171
-rw-r--r--crypto/asymmetric_keys/x509_loader.c1
-rw-r--r--drivers/base/regmap/regmap-mdio.c41
-rw-r--r--drivers/bluetooth/btintel.c116
-rw-r--r--drivers/bluetooth/btintel.h13
-rw-r--r--drivers/bluetooth/btusb.c16
-rw-r--r--drivers/bluetooth/hci_qca.c11
-rw-r--r--drivers/i2c/i2c-core-acpi.c13
-rw-r--r--drivers/i2c/i2c-core-base.c98
-rw-r--r--drivers/i2c/i2c-core-of.c66
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c635
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c3
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c18
-rw-r--r--drivers/infiniband/hw/mlx5/main.c78
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c3
-rw-r--r--drivers/mfd/ocelot-core.c68
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/bonding/bond_main.c10
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c4
-rw-r--r--drivers/net/can/dev/bittiming.c120
-rw-r--r--drivers/net/can/dev/calc_bittiming.c34
-rw-r--r--drivers/net/can/dev/dev.c21
-rw-r--r--drivers/net/can/dev/netlink.c49
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c225
-rw-r--r--drivers/net/can/sja1000/ems_pci.c154
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c18
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h26
-rw-r--r--drivers/net/can/usb/esd_usb.c70
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c44
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c122
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h12
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c68
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c30
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h1
-rw-r--r--drivers/net/dsa/lan9303-core.c169
-rw-r--r--drivers/net/dsa/microchip/Kconfig10
-rw-r--r--drivers/net/dsa/microchip/Makefile5
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c25
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h33
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c246
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h69
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c1201
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h86
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp_reg.h142
-rw-r--r--drivers/net/dsa/microchip/lan937x.h1
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c9
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h3
-rw-r--r--drivers/net/dsa/mt7530.c87
-rw-r--r--drivers/net/dsa/mt7530.h15
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c201
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h23
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c66
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c32
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c46
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.c83
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.h19
-rw-r--r--drivers/net/dsa/ocelot/Kconfig32
-rw-r--r--drivers/net/dsa/ocelot/Makefile13
-rw-r--r--drivers/net/dsa/ocelot/felix.c59
-rw-r--r--drivers/net/dsa/ocelot/felix.h2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c64
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c163
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c1
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c92
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c49
-rw-r--r--drivers/net/dsa/qca/qca8k.h5
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c137
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c24
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/adi/adin1110.c1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h49
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c94
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c415
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c5
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c474
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c11
-rw-r--r--drivers/net/ethernet/cadence/macb.h29
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c177
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c83
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c34
-rw-r--r--drivers/net/ethernet/engleder/Makefile2
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h16
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c479
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c21
-rw-r--r--drivers/net/ethernet/engleder/tsnep_xdp.c19
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig14
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c746
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h40
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c232
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h137
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c119
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c113
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c27
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c182
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c149
-rw-r--r--drivers/net/ethernet/fungible/funeth/Kconfig2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c192
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c29
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1038
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c56
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h46
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c94
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c421
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c252
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h643
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c157
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c32
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c70
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c1897
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h445
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c124
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c1916
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h328
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c377
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1005
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1071
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c133
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c463
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c264
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c183
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c206
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c32
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c29
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c39
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c56
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c237
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c30
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c309
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c8
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c482
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h38
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c27
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c43
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c312
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c227
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c197
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c678
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c213
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c337
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c755
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c166
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c167
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_goto.c10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h32
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c7
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c198
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c16
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c94
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c46
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c121
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h124
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h2511
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c53
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pool.c81
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c102
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h41
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c332
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c59
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c335
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h74
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c1260
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c2531
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c291
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c1356
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h120
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c4
-rw-r--r--drivers/net/ethernet/microchip/vcap/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h499
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c1201
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h13
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h13
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c77
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c19
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c127
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h15
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c1910
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h10
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.c412
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.h32
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2
-rw-r--r--drivers/net/ethernet/mscc/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/Makefile1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c66
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c31
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mm.c215
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c332
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c190
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c159
-rw-r--r--drivers/net/ethernet/netronome/Kconfig2
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile4
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c50
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/dcb.c571
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.h46
-rw-r--r--drivers/net/ethernet/ni/nixge.c141
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c67
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h13
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c117
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c165
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h40
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c14
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h20
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c18
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h6
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c191
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c54
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c24
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c554
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h50
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c37
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c105
-rw-r--r--drivers/net/ethernet/sfc/Kconfig1
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c30
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c114
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h7
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c57
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h10
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c731
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.h47
-rw-r--r--drivers/net/ethernet/sfc/mae.c218
-rw-r--r--drivers/net/ethernet/sfc/mae.h40
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c72
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h8
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h8
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c334
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c20
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c85
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c22
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c170
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c50
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c1197
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h42
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2004
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h32
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h409
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe.h79
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c22
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c70
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c583
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c286
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h12
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h98
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe.h43
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c116
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c569
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h35
-rw-r--r--drivers/net/hamradio/baycom_epp.c8
-rw-r--r--drivers/net/hyperv/netvsc.c18
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c90
-rw-r--r--drivers/net/ieee802154/cc2520.c136
-rw-r--r--drivers/net/ipa/Makefile9
-rw-r--r--drivers/net/ipa/gsi.c486
-rw-r--r--drivers/net/ipa/gsi.h7
-rw-r--r--drivers/net/ipa/gsi_reg.c151
-rw-r--r--drivers/net/ipa/gsi_reg.h504
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_cmd.c38
-rw-r--r--drivers/net/ipa/ipa_endpoint.c585
-rw-r--r--drivers/net/ipa/ipa_endpoint.h4
-rw-r--r--drivers/net/ipa/ipa_interrupt.c142
-rw-r--r--drivers/net/ipa/ipa_interrupt.h48
-rw-r--r--drivers/net/ipa/ipa_main.c122
-rw-r--r--drivers/net/ipa/ipa_mem.c22
-rw-r--r--drivers/net/ipa/ipa_mem.h8
-rw-r--r--drivers/net/ipa/ipa_power.c19
-rw-r--r--drivers/net/ipa/ipa_power.h12
-rw-r--r--drivers/net/ipa/ipa_reg.c90
-rw-r--r--drivers/net/ipa/ipa_reg.h190
-rw-r--r--drivers/net/ipa/ipa_resource.c16
-rw-r--r--drivers/net/ipa/ipa_table.c68
-rw-r--r--drivers/net/ipa/ipa_uc.c27
-rw-r--r--drivers/net/ipa/ipa_uc.h8
-rw-r--r--drivers/net/ipa/ipa_version.h6
-rw-r--r--drivers/net/ipa/reg.h133
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.1.c291
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.5.1.c303
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.0.c308
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.11.c313
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c311
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c312
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c283
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c269
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c271
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c255
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c287
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c271
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c271
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/macsec.c125
-rw-r--r--drivers/net/mdio/Kconfig11
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/fwnode_mdio.c8
-rw-r--r--drivers/net/mdio/mdio-aspeed.c48
-rw-r--r--drivers/net/mdio/mdio-bitbang.c77
-rw-r--r--drivers/net/mdio/mdio-cavium.c111
-rw-r--r--drivers/net/mdio/mdio-cavium.h9
-rw-r--r--drivers/net/mdio/mdio-i2c.c38
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c154
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c8
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c6
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c54
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c38
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c164
-rw-r--r--drivers/net/mdio/mdio-mvusb.c6
-rw-r--r--drivers/net/mdio/mdio-octeon.c6
-rw-r--r--drivers/net/mdio/mdio-thunder.c6
-rw-r--r--drivers/net/netdevsim/bpf.c4
-rw-r--r--drivers/net/netdevsim/dev.c50
-rw-r--r--drivers/net/netdevsim/health.c20
-rw-r--r--drivers/net/netdevsim/ipsec.c14
-rw-r--r--drivers/net/netdevsim/netdev.c1
-rw-r--r--drivers/net/pcs/pcs-lynx.c20
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c6
-rw-r--r--drivers/net/pcs/pcs-xpcs.c4
-rw-r--r--drivers/net/phy/Kconfig9
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/mdio-open-alliance.h46
-rw-r--r--drivers/net/phy/mdio_bus.c464
-rw-r--r--drivers/net/phy/micrel.c870
-rw-r--r--drivers/net/phy/microchip_t1.c70
-rw-r--r--drivers/net/phy/motorcomm.c559
-rw-r--r--drivers/net/phy/mxl-gpy.c5
-rw-r--r--drivers/net/phy/ncn26000.c171
-rw-r--r--drivers/net/phy/phy-c45.c514
-rw-r--r--drivers/net/phy/phy-core.c5
-rw-r--r--drivers/net/phy/phy.c417
-rw-r--r--drivers/net/phy/phy_device.c56
-rw-r--r--drivers/net/phy/phylink.c23
-rw-r--r--drivers/net/phy/sfp.c39
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/thunderbolt/Kconfig12
-rw-r--r--drivers/net/thunderbolt/Makefile6
-rw-r--r--drivers/net/thunderbolt/main.c (renamed from drivers/net/thunderbolt.c)48
-rw-r--r--drivers/net/thunderbolt/trace.c10
-rw-r--r--drivers/net/thunderbolt/trace.h141
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/cdc_ether.c114
-rw-r--r--drivers/net/usb/r8152.c179
-rw-r--r--drivers/net/usb/usbnet.c29
-rw-r--r--drivers/net/veth.c91
-rw-r--r--drivers/net/virtio_net.c428
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c47
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h16
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c93
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c48
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c371
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c104
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig34
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile27
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c964
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h184
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c939
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h822
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c357
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c102
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h67
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c1580
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h1816
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c2596
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h106
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c4234
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h145
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c1211
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c2222
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1142
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h2961
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c850
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h704
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.c145
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h194
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h144
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c789
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.h316
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c1041
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h312
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c7038
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c616
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h46
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c1374
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h135
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c342
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h67
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c3087
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h569
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c732
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h95
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h1441
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h152
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c6600
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h4803
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c74
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h82
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h148
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h8
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c11
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c16
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c14
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h145
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c5
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.c1
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c76
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/types.h21
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c132
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c124
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c194
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c193
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c110
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c416
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c249
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h142
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c1899
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c24
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c13
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c45
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c28
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c450
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h46
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c52
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c50
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c41
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1813
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c130
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h295
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c43
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c146
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h54
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c99
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c19
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h25
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c353
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c26
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf.h3
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvme/host/tcp.c3
-rw-r--r--drivers/nvme/target/tcp.c5
-rw-r--r--drivers/ptp/ptp_qoriq.c50
-rw-r--r--drivers/s390/net/ctcm_fsms.c32
-rw-r--r--drivers/s390/net/ctcm_main.c16
-rw-r--r--drivers/s390/net/ctcm_mpc.c15
-rw-r--r--drivers/s390/net/ism.h19
-rw-r--r--drivers/s390/net/ism_drv.c376
-rw-r--r--drivers/s390/net/qeth_core_main.c14
-rw-r--r--drivers/s390/net/qeth_core_sys.c66
-rw-r--r--drivers/s390/net/qeth_ethtool.c6
-rw-r--r--drivers/s390/net/qeth_l2_main.c53
-rw-r--r--drivers/s390/net/qeth_l2_sys.c28
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_sys.c83
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1
-rw-r--r--drivers/soc/qcom/qmi_interface.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/vhost/vsock.c214
-rw-r--r--drivers/xen/pvcalls-back.c5
-rw-r--r--fs/dlm/lowcomms.c5
-rw-r--r--fs/ocfs2/cluster/tcp.c5
-rw-r--r--include/linux/acpi.h15
-rw-r--r--include/linux/avf/virtchnl.h159
-rw-r--r--include/linux/bitfield.h26
-rw-r--r--include/linux/bpf.h156
-rw-r--r--include/linux/bpf_verifier.h83
-rw-r--r--include/linux/btf.h23
-rw-r--r--include/linux/can/bittiming.h12
-rw-r--r--include/linux/cpumask.h20
-rw-r--r--include/linux/dsa/ksz_common.h53
-rw-r--r--include/linux/ethtool.h265
-rw-r--r--include/linux/ethtool_netlink.h42
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/find.h33
-rw-r--r--include/linux/fsl/enetc_mdio.h21
-rw-r--r--include/linux/fsl/ptp_qoriq.h1
-rw-r--r--include/linux/i2c.h24
-rw-r--r--include/linux/ieee80211.h1
-rw-r--r--include/linux/ieee802154.h7
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/ip.h21
-rw-r--r--include/linux/ism.h98
-rw-r--r--include/linux/mdio-bitbang.h6
-rw-r--r--include/linux/mdio.h150
-rw-r--r--include/linux/memcontrol.h11
-rw-r--r--include/linux/micrel_phy.h3
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mlx5/device.h6
-rw-r--r--include/linux/mlx5/driver.h24
-rw-r--r--include/linux/mlx5/fs.h5
-rw-r--r--include/linux/mlx5/mlx5_ifc.h297
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/netdevice.h22
-rw-r--r--include/linux/netfilter.h3
-rw-r--r--include/linux/netlink.h14
-rw-r--r--include/linux/phy.h116
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/ptp_classify.h73
-rw-r--r--include/linux/regmap.h8
-rw-r--r--include/linux/skbuff.h49
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h3
-rw-r--r--include/linux/spi/at86rf230.h20
-rw-r--r--include/linux/spi/cc2520.h21
-rw-r--r--include/linux/string_helpers.h5
-rw-r--r--include/linux/topology.h33
-rw-r--r--include/linux/u64_stats_sync.h12
-rw-r--r--include/linux/virtio_vsock.h129
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/bluetooth/hci.h4
-rw-r--r--include/net/bluetooth/mgmt.h2
-rw-r--r--include/net/cfg80211.h148
-rw-r--r--include/net/cfg802154.h78
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/dcbnl.h18
-rw-r--r--include/net/devlink.h55
-rw-r--r--include/net/dropreason.h26
-rw-r--r--include/net/dsa.h11
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/flow.h5
-rw-r--r--include/net/flow_offload.h6
-rw-r--r--include/net/ieee802154_netdev.h52
-rw-r--r--include/net/inet_sock.h4
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/mac80211.h81
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h12
-rw-r--r--include/net/netfilter/nf_flow_table.h8
-rw-r--r--include/net/netfilter/nf_tables_core.h16
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h4
-rw-r--r--include/net/netlink.h3
-rw-r--r--include/net/netns/core.h5
-rw-r--r--include/net/nl802154.h61
-rw-r--r--include/net/page_pool.h14
-rw-r--r--include/net/pkt_cls.h74
-rw-r--r--include/net/pkt_sched.h21
-rw-r--r--include/net/raw.h13
-rw-r--r--include/net/route.h3
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/smc.h24
-rw-r--r--include/net/sock.h35
-rw-r--r--include/net/tc_act/tc_connmark.h9
-rw-r--r--include/net/tc_act/tc_nat.h10
-rw-r--r--include/net/tc_act/tc_pedit.h81
-rw-r--r--include/net/tc_wrapper.h15
-rw-r--r--include/net/xdp.h36
-rw-r--r--include/net/xsk_buff_pool.h5
-rw-r--r--include/soc/mscc/ocelot.h64
-rw-r--r--include/soc/mscc/ocelot_dev.h23
-rw-r--r--include/soc/mscc/vsc7514_regs.h18
-rw-r--r--include/trace/events/bridge.h58
-rw-r--r--include/trace/events/devlink.h2
-rw-r--r--include/trace/events/rxrpc.h492
-rw-r--r--include/trace/events/skb.h10
-rw-r--r--include/trace/events/sock.h69
-rw-r--r--include/uapi/linux/batadv_packet.h2
-rw-r--r--include/uapi/linux/bpf.h35
-rw-r--r--include/uapi/linux/dcbnl.h2
-rw-r--r--include/uapi/linux/ethtool.h48
-rw-r--r--include/uapi/linux/ethtool_netlink.h79
-rw-r--r--include/uapi/linux/fou.h54
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_link.h5
-rw-r--r--include/uapi/linux/if_packet.h1
-rw-r--r--include/uapi/linux/in.h1
-rw-r--r--include/uapi/linux/ioam6.h2
-rw-r--r--include/uapi/linux/mdio.h8
-rw-r--r--include/uapi/linux/netdev.h59
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h14
-rw-r--r--include/uapi/linux/nl80211.h36
-rw-r--r--include/uapi/linux/rpl.h4
-rw-r--r--include/uapi/linux/rtnetlink.h1
-rw-r--r--include/uapi/linux/snmp.h3
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/bpf/Makefile1
-rw-r--r--kernel/bpf/bpf_local_storage.c8
-rw-r--r--kernel/bpf/btf.c394
-rw-r--r--kernel/bpf/core.c25
-rw-r--r--kernel/bpf/cpumap.c2
-rw-r--r--kernel/bpf/cpumask.c479
-rw-r--r--kernel/bpf/devmap.c16
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/helpers.c203
-rw-r--r--kernel/bpf/memalloc.c5
-rw-r--r--kernel/bpf/offload.c419
-rw-r--r--kernel/bpf/preload/bpf_preload_kern.c6
-rw-r--r--kernel/bpf/preload/iterators/Makefile12
-rw-r--r--kernel/bpf/preload/iterators/README5
-rw-r--r--kernel/bpf/preload/iterators/iterators.lskel-big-endian.h419
-rw-r--r--kernel/bpf/preload/iterators/iterators.lskel-little-endian.h (renamed from kernel/bpf/preload/iterators/iterators.lskel.h)0
-rw-r--r--kernel/bpf/syscall.c106
-rw-r--r--kernel/bpf/verifier.c1293
-rw-r--r--kernel/cgroup/rstat.c4
-rw-r--r--kernel/kexec_core.c3
-rw-r--r--kernel/livepatch/core.c10
-rw-r--r--kernel/module/kallsyms.c13
-rw-r--r--kernel/sched/topology.c95
-rw-r--r--kernel/trace/bpf_trace.c157
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/cpumask.c52
-rw-r--r--lib/find_bit.c9
-rw-r--r--mm/memcontrol.c18
-rw-r--r--net/Makefile1
-rw-r--r--net/batman-adv/bat_iv_ogm.c1
-rw-r--r--net/batman-adv/bat_v_elp.c1
-rw-r--r--net/batman-adv/bat_v_ogm.c5
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/gateway_common.c2
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/multicast.c251
-rw-r--r--net/batman-adv/multicast.h38
-rw-r--r--net/batman-adv/network-coding.c4
-rw-r--r--net/batman-adv/routing.c7
-rw-r--r--net/batman-adv/soft-interface.c26
-rw-r--r--net/batman-adv/translation-table.c4
-rw-r--r--net/batman-adv/tvlv.c71
-rw-r--r--net/batman-adv/tvlv.h9
-rw-r--r--net/batman-adv/types.h6
-rw-r--r--net/bluetooth/hci_conn.c23
-rw-r--r--net/bluetooth/l2cap_core.c24
-rw-r--r--net/bluetooth/l2cap_sock.c8
-rw-r--r--net/bluetooth/mgmt.c12
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c18
-rw-r--r--net/bpf/test_run.c77
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_mdb.c66
-rw-r--r--net/bridge/br_multicast.c179
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netlink.c19
-rw-r--r--net/bridge/br_netlink_tunnel.c3
-rw-r--r--net/bridge/br_private.h12
-rw-r--r--net/bridge/br_switchdev.c10
-rw-r--r--net/bridge/br_vlan.c11
-rw-r--r--net/bridge/br_vlan_options.c27
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c4
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/can/gw.c7
-rw-r--r--net/can/isotp.c3
-rw-r--r--net/can/raw.c11
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/core/Makefile4
-rw-r--r--net/core/dev.c30
-rw-r--r--net/core/dev.h20
-rw-r--r--net/core/dst.c8
-rw-r--r--net/core/filter.c116
-rw-r--r--net/core/gro.c12
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/net-sysfs.c92
-rw-r--r--net/core/net-traces.c3
-rw-r--r--net/core/netdev-genl-gen.c48
-rw-r--r--net/core/netdev-genl-gen.h23
-rw-r--r--net/core/netdev-genl.c179
-rw-r--r--net/core/netpoll.c12
-rw-r--r--net/core/page_pool.c6
-rw-r--r--net/core/rtnetlink.c35
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/skbuff.c237
-rw-r--r--net/core/skmsg.c5
-rw-r--r--net/core/sock.c56
-rw-r--r--net/core/sysctl_net_core.c111
-rw-r--r--net/core/xdp.c88
-rw-r--r--net/dcb/dcbnl.c272
-rw-r--r--net/devlink/Makefile3
-rw-r--r--net/devlink/core.c320
-rw-r--r--net/devlink/dev.c1346
-rw-r--r--net/devlink/devl_internal.h239
-rw-r--r--net/devlink/health.c1333
-rw-r--r--net/devlink/leftover.c (renamed from net/core/devlink.c)4621
-rw-r--r--net/devlink/netlink.c251
-rw-r--r--net/dsa/master.c6
-rw-r--r--net/dsa/slave.c50
-rw-r--r--net/dsa/tag_ksz.c216
-rw-r--r--net/ethtool/Makefile4
-rw-r--r--net/ethtool/channels.c92
-rw-r--r--net/ethtool/coalesce.c114
-rw-r--r--net/ethtool/common.c8
-rw-r--r--net/ethtool/common.h2
-rw-r--r--net/ethtool/debug.c71
-rw-r--r--net/ethtool/eee.c78
-rw-r--r--net/ethtool/fec.c83
-rw-r--r--net/ethtool/linkinfo.c81
-rw-r--r--net/ethtool/linkmodes.c91
-rw-r--r--net/ethtool/mm.c251
-rw-r--r--net/ethtool/module.c89
-rw-r--r--net/ethtool/netlink.c135
-rw-r--r--net/ethtool/netlink.h74
-rw-r--r--net/ethtool/pause.c125
-rw-r--r--net/ethtool/plca.c248
-rw-r--r--net/ethtool/privflags.c84
-rw-r--r--net/ethtool/pse-pd.c81
-rw-r--r--net/ethtool/rings.c118
-rw-r--r--net/ethtool/stats.c159
-rw-r--r--net/ethtool/wol.c79
-rw-r--r--net/ieee802154/header_ops.c24
-rw-r--r--net/ieee802154/nl802154.c283
-rw-r--r--net/ieee802154/nl802154.h4
-rw-r--r--net/ieee802154/rdev-ops.h56
-rw-r--r--net/ieee802154/trace.h61
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/bpf_tcp_ca.c3
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/fou_core.c (renamed from net/ipv4/fou.c)47
-rw-r--r--net/ipv4/fou_nl.c48
-rw-r--r--net/ipv4/fou_nl.h25
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/inet_connection_sock.c30
-rw-r--r--net/ipv4/inet_hashtables.c14
-rw-r--r--net/ipv4/inet_timewait_sock.c3
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c18
-rw-r--r--net/ipv4/netfilter/Kconfig14
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c929
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c1
-rw-r--r--net/ipv4/proc.c8
-rw-r--r--net/ipv4/raw.c21
-rw-r--r--net/ipv4/tcp_bbr.c16
-rw-r--r--net/ipv4/tcp_cong.c10
-rw-r--r--net/ipv4/tcp_cubic.c12
-rw-r--r--net/ipv4/tcp_dctcp.c12
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/af_inet6.c10
-rw-r--r--net/ipv6/icmp.c49
-rw-r--r--net/ipv6/ipv6_sockglue.c12
-rw-r--r--net/ipv6/ndisc.c168
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c1
-rw-r--r--net/ipv6/proc.c1
-rw-r--r--net/ipv6/raw.c16
-rw-r--r--net/ipv6/route.c23
-rw-r--r--net/ipv6/rpl_iptunnel.c2
-rw-r--r--net/ipv6/seg6_local.c352
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/kcm/kcmsock.c3
-rw-r--r--net/l2tp/l2tp_ppp.c125
-rw-r--r--net/mac80211/cfg.c86
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/debugfs_netdev.c3
-rw-r--r--net/mac80211/ieee80211_i.h6
-rw-r--r--net/mac80211/link.c3
-rw-r--r--net/mac80211/mlme.c167
-rw-r--r--net/mac80211/rx.c416
-rw-r--r--net/mac80211/sta_info.c14
-rw-r--r--net/mac80211/sta_info.h27
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c26
-rw-r--r--net/mac80211/vht.c25
-rw-r--r--net/mac802154/Makefile2
-rw-r--r--net/mac802154/cfg.c60
-rw-r--r--net/mac802154/ieee802154_i.h61
-rw-r--r--net/mac802154/iface.c6
-rw-r--r--net/mac802154/llsec.c5
-rw-r--r--net/mac802154/main.c37
-rw-r--r--net/mac802154/rx.c36
-rw-r--r--net/mac802154/scan.c456
-rw-r--r--net/mac802154/tx.c42
-rw-r--r--net/mptcp/options.c3
-rw-r--r--net/mptcp/pm_netlink.c63
-rw-r--r--net/mptcp/pm_userspace.c5
-rw-r--r--net/mptcp/protocol.c39
-rw-r--r--net/mptcp/protocol.h2
-rw-r--r--net/mptcp/sockopt.c3
-rw-r--r--net/mptcp/subflow.c3
-rw-r--r--net/mptcp/token.c14
-rw-r--r--net/mptcp/token_test.c3
-rw-r--r--net/netfilter/Kconfig3
-rw-r--r--net/netfilter/Makefile7
-rw-r--r--net/netfilter/core.c16
-rw-r--r--net/netfilter/ipset/Kconfig2
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nf_conntrack_bpf.c20
-rw-r--r--net/netfilter/nf_conntrack_core.c69
-rw-r--r--net/netfilter/nf_conntrack_helper.c98
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_conntrack_ovs.c178
-rw-r--r--net/netfilter/nf_conntrack_proto.c20
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c44
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c44
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c10
-rw-r--r--net/netfilter/nf_conntrack_standalone.c12
-rw-r--r--net/netfilter/nf_flow_table_core.c5
-rw-r--r--net/netfilter/nf_flow_table_inet.c2
-rw-r--r--net/netfilter/nf_flow_table_offload.c18
-rw-r--r--net/netfilter/nf_log_syslog.c2
-rw-r--r--net/netfilter/nf_nat_bpf.c6
-rw-r--r--net/netfilter/nf_tables_api.c114
-rw-r--r--net/netfilter/nf_tables_core.c35
-rw-r--r--net/netfilter/nft_ct.c39
-rw-r--r--net/netfilter/nft_ct_fast.c56
-rw-r--r--net/netfilter/nft_objref.c12
-rw-r--r--net/netfilter/xt_length.c2
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/openvswitch/Kconfig1
-rw-r--r--net/openvswitch/conntrack.c85
-rw-r--r--net/openvswitch/flow.c12
-rw-r--r--net/openvswitch/flow.h2
-rw-r--r--net/openvswitch/flow_table.c8
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/qrtr/ns.c3
-rw-r--r--net/rds/ib_recv.c1
-rw-r--r--net/rds/message.c2
-rw-r--r--net/rds/recv.c1
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rds/tcp_recv.c2
-rw-r--r--net/rfkill/core.c16
-rw-r--r--net/rfkill/rfkill-gpio.c20
-rw-r--r--net/rxrpc/Kconfig9
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-internal.h15
-rw-r--r--net/rxrpc/call_accept.c2
-rw-r--r--net/rxrpc/call_event.c15
-rw-r--r--net/rxrpc/call_object.c13
-rw-r--r--net/rxrpc/conn_event.c2
-rw-r--r--net/rxrpc/conn_service.c7
-rw-r--r--net/rxrpc/input.c62
-rw-r--r--net/rxrpc/io_thread.c48
-rw-r--r--net/rxrpc/local_object.c7
-rw-r--r--net/rxrpc/misc.c7
-rw-r--r--net/rxrpc/output.c79
-rw-r--r--net/rxrpc/proc.c4
-rw-r--r--net/rxrpc/recvmsg.c36
-rw-r--r--net/rxrpc/skbuff.c4
-rw-r--r--net/rxrpc/sysctl.c17
-rw-r--r--net/rxrpc/txbuf.c12
-rw-r--r--net/sched/Kconfig91
-rw-r--r--net/sched/Makefile7
-rw-r--r--net/sched/act_api.c57
-rw-r--r--net/sched/act_connmark.c107
-rw-r--r--net/sched/act_ct.c141
-rw-r--r--net/sched/act_gate.c30
-rw-r--r--net/sched/act_mirred.c23
-rw-r--r--net/sched/act_nat.c72
-rw-r--r--net/sched/act_pedit.c300
-rw-r--r--net/sched/cls_api.c304
-rw-r--r--net/sched/cls_flower.c80
-rw-r--r--net/sched/cls_matchall.c6
-rw-r--r--net/sched/cls_rsvp.c26
-rw-r--r--net/sched/cls_rsvp.h764
-rw-r--r--net/sched/cls_rsvp6.c26
-rw-r--r--net/sched/cls_tcindex.c742
-rw-r--r--net/sched/sch_api.c87
-rw-r--r--net/sched/sch_atm.c706
-rw-r--r--net/sched/sch_cake.c2
-rw-r--r--net/sched/sch_cbq.c1727
-rw-r--r--net/sched/sch_dsmark.c518
-rw-r--r--net/sched/sch_mqprio.c291
-rw-r--r--net/sched/sch_mqprio_lib.c117
-rw-r--r--net/sched/sch_mqprio_lib.h18
-rw-r--r--net/sched/sch_taprio.c745
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/smc/af_smc.c40
-rw-r--r--net/smc/smc_clc.c11
-rw-r--r--net/smc/smc_core.c105
-rw-r--r--net/smc/smc_core.h6
-rw-r--r--net/smc/smc_diag.c3
-rw-r--r--net/smc/smc_ism.c180
-rw-r--r--net/smc/smc_ism.h3
-rw-r--r--net/smc/smc_llc.c34
-rw-r--r--net/smc/smc_pnet.c40
-rw-r--r--net/smc/smc_rx.c4
-rw-r--r--net/socket.c33
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/tipc/netlink_compat.c16
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/tipc/topsrv.c5
-rw-r--r--net/tls/tls_sw.c3
-rw-r--r--net/unix/af_unix.c21
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/vmw_vsock/virtio_transport.c149
-rw-r--r--net/vmw_vsock/virtio_transport_common.c422
-rw-r--r--net/vmw_vsock/vsock_loopback.c51
-rw-r--r--net/wireless/ap.c2
-rw-r--r--net/wireless/chan.c69
-rw-r--r--net/wireless/core.h4
-rw-r--r--net/wireless/ibss.c5
-rw-r--r--net/wireless/mlme.c5
-rw-r--r--net/wireless/nl80211.c162
-rw-r--r--net/wireless/nl80211.h2
-rw-r--r--net/wireless/reg.c57
-rw-r--r--net/wireless/sme.c54
-rw-r--r--net/wireless/trace.h309
-rw-r--r--net/wireless/util.c185
-rw-r--r--net/wireless/wext-compat.c2
-rw-r--r--net/wireless/wext-core.c20
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/xdp/xsk.c73
-rw-r--r--net/xdp/xsk_buff_pool.c7
-rw-r--r--net/xdp/xsk_queue.c11
-rw-r--r--net/xdp/xsk_queue.h1
-rw-r--r--net/xfrm/espintcp.c3
-rw-r--r--net/xfrm/xfrm_device.c8
-rw-r--r--net/xfrm/xfrm_interface_bpf.c7
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--samples/bpf/Makefile24
-rw-r--r--samples/bpf/gnu/stubs.h1
-rw-r--r--samples/bpf/lwt_len_hist.bpf.c (renamed from samples/bpf/lwt_len_hist_kern.c)29
-rwxr-xr-xsamples/bpf/lwt_len_hist.sh4
-rw-r--r--samples/bpf/map_perf_test.bpf.c (renamed from samples/bpf/map_perf_test_kern.c)48
-rw-r--r--samples/bpf/map_perf_test_user.c2
-rw-r--r--samples/bpf/net_shared.h32
-rw-r--r--samples/bpf/sock_flags.bpf.c (renamed from samples/bpf/sock_flags_kern.c)24
-rw-r--r--samples/bpf/syscall_tp_kern.c14
-rwxr-xr-xsamples/bpf/tc_l2_redirect.sh3
-rwxr-xr-xsamples/bpf/test_cgrp2_sock.sh16
-rwxr-xr-xsamples/bpf/test_cgrp2_sock2.sh9
-rw-r--r--samples/bpf/test_cgrp2_tc.bpf.c (renamed from samples/bpf/test_cgrp2_tc_kern.c)34
-rwxr-xr-xsamples/bpf/test_cgrp2_tc.sh8
-rw-r--r--samples/bpf/test_current_task_under_cgroup.bpf.c (renamed from samples/bpf/test_current_task_under_cgroup_kern.c)11
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c8
-rw-r--r--samples/bpf/test_lru_dist.c5
-rw-r--r--samples/bpf/test_lwt_bpf.c50
-rwxr-xr-xsamples/bpf/test_lwt_bpf.sh19
-rw-r--r--samples/bpf/test_map_in_map.bpf.c (renamed from samples/bpf/test_map_in_map_kern.c)8
-rw-r--r--samples/bpf/test_map_in_map_user.c4
-rw-r--r--samples/bpf/test_overhead_kprobe.bpf.c (renamed from samples/bpf/test_overhead_kprobe_kern.c)6
-rw-r--r--samples/bpf/test_overhead_raw_tp.bpf.c (renamed from samples/bpf/test_overhead_raw_tp_kern.c)4
-rw-r--r--samples/bpf/test_overhead_tp.bpf.c (renamed from samples/bpf/test_overhead_tp_kern.c)29
-rw-r--r--samples/bpf/test_overhead_user.c34
-rw-r--r--samples/bpf/test_probe_write_user.bpf.c (renamed from samples/bpf/test_probe_write_user_kern.c)20
-rw-r--r--samples/bpf/test_probe_write_user_user.c2
-rw-r--r--samples/bpf/trace_common.h13
-rw-r--r--samples/bpf/trace_output.bpf.c (renamed from samples/bpf/trace_output_kern.c)6
-rw-r--r--samples/bpf/trace_output_user.c2
-rw-r--r--samples/bpf/tracex2.bpf.c (renamed from samples/bpf/tracex2_kern.c)13
-rw-r--r--samples/bpf/tracex2_user.c2
-rw-r--r--samples/bpf/tracex4_user.c4
-rw-r--r--samples/bpf/xdp1_user.c2
-rw-r--r--samples/bpf/xdp_adjust_tail_user.c2
-rw-r--r--samples/bpf/xdp_fwd_user.c4
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c4
-rw-r--r--samples/bpf/xdp_rxq_info_user.c2
-rw-r--r--samples/bpf/xdp_sample.bpf.h22
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c2
-rw-r--r--samples/bpf/xdp_tx_iptunnel_user.c2
-rwxr-xr-xscripts/bpf_doc.py2
-rwxr-xr-xscripts/pahole-flags.sh4
-rw-r--r--tools/bpf/bpftool/Makefile8
-rw-r--r--tools/bpf/bpftool/btf.c13
-rw-r--r--tools/bpf/bpftool/btf_dumper.c4
-rw-r--r--tools/bpf/bpftool/cgroup.c4
-rw-r--r--tools/bpf/bpftool/common.c13
-rw-r--r--tools/bpf/bpftool/feature.c8
-rw-r--r--tools/bpf/bpftool/link.c4
-rw-r--r--tools/bpf/bpftool/main.h3
-rw-r--r--tools/bpf/bpftool/map.c8
-rw-r--r--tools/bpf/bpftool/prog.c60
-rw-r--r--tools/bpf/bpftool/struct_ops.c6
-rw-r--r--tools/bpf/resolve_btfids/Build4
-rw-r--r--tools/bpf/resolve_btfids/Makefile47
-rw-r--r--tools/bpf/resolve_btfids/main.c2
-rw-r--r--tools/bpf/runqslower/Makefile2
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--tools/include/uapi/linux/bpf.h35
-rw-r--r--tools/include/uapi/linux/netdev.h59
-rw-r--r--tools/lib/bpf/bpf.c20
-rw-r--r--tools/lib/bpf/bpf.h9
-rw-r--r--tools/lib/bpf/bpf_core_read.h4
-rw-r--r--tools/lib/bpf/bpf_helpers.h2
-rw-r--r--tools/lib/bpf/bpf_tracing.h320
-rw-r--r--tools/lib/bpf/btf.c24
-rw-r--r--tools/lib/bpf/btf_dump.c199
-rw-r--r--tools/lib/bpf/libbpf.c72
-rw-r--r--tools/lib/bpf/libbpf.h126
-rw-r--r--tools/lib/bpf/libbpf.map8
-rw-r--r--tools/lib/bpf/libbpf_errno.c16
-rw-r--r--tools/lib/bpf/libbpf_internal.h5
-rw-r--r--tools/lib/bpf/libbpf_probes.c83
-rw-r--r--tools/lib/bpf/libbpf_version.h2
-rw-r--r--tools/lib/bpf/netlink.c120
-rw-r--r--tools/lib/bpf/nlattr.c2
-rw-r--r--tools/lib/bpf/nlattr.h12
-rw-r--r--tools/lib/bpf/ringbuf.c4
-rw-r--r--tools/lib/bpf/usdt.bpf.h5
-rwxr-xr-xtools/net/ynl/cli.py52
-rw-r--r--tools/net/ynl/lib/__init__.py7
-rw-r--r--tools/net/ynl/lib/nlspec.py310
-rw-r--r--tools/net/ynl/lib/ynl.py528
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py2357
-rwxr-xr-xtools/net/ynl/ynl-regen.sh30
-rw-r--r--tools/testing/selftests/bpf/.gitignore2
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x67
-rw-r--r--tools/testing/selftests/bpf/Makefile87
-rw-r--r--tools/testing/selftests/bpf/bench.c59
-rw-r--r--tools/testing/selftests/bpf/bench.h2
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c5
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c5
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c283
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_loop.c1
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage.c3
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c16
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_ringbufs.c4
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_strncmp.c2
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh2
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh2
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h24
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c2
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c2
-rw-r--r--tools/testing/selftests/bpf/netcnt_common.h6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c69
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/check_mtu.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpumask.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/decap_sanity.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/enable_stats.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c22
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fib_lookup.c187
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/htab_reuse.c101
-rw-r--r--tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c51
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/metadata.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mmap.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/nested_trust.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_link.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_opts.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rbtree.c117
-rw-r--r--tools/testing/selftests/bpf/prog_tests/recursion.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/setget_sockopt.c73
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_kfunc.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_storage.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_bpf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_global_funcs.c133
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c47
-rw-r--r--tools/testing/selftests/bpf/prog_tests/usdt.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/user_ringbuf.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verif_stats.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_attach.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_info.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_link.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c409
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c63
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h32
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_syscall_macro.c26
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c2
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c80
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c162
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c38
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c17
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_common.h114
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_failure.c126
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_success.c426
-rw-r--r--tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c27
-rw-r--r--tools/testing/selftests/bpf/progs/dummy_st_ops_success.c (renamed from tools/testing/selftests/bpf/progs/dummy_st_ops.c)19
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c455
-rw-r--r--tools/testing/selftests/bpf/progs/fib_lookup.c22
-rw-r--r--tools/testing/selftests/bpf/progs/htab_reuse.c19
-rw-r--r--tools/testing/selftests/bpf/progs/jit_probe_mem.c61
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c29
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.c2
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_fail.c100
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c7
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c12
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_common.h12
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_failure.c33
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_success.c19
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h62
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree.c176
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c52
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c49
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c322
-rw-r--r--tools/testing/selftests/bpf/progs/setget_sockopt.c8
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h2
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_failure.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func1.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func10.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func11.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func12.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func13.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func14.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func15.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func16.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func17.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func2.c43
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func3.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func4.c55
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func5.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func6.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func7.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func8.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func9.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c104
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c91
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c64
-rw-r--r--tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_vmlinux.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_vlan.c4
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_fail.c31
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_features.c269
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_hw_metadata.c85
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata.c64
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata2.c23
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xsk_xdp_progs.c30
-rw-r--r--tools/testing/selftests/bpf/test_cpp.cpp2
-rw-r--r--tools/testing/selftests/bpf/test_maps.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py10
-rw-r--r--tools/testing/selftests/bpf/test_progs.c42
-rw-r--r--tools/testing/selftests/bpf/test_progs.h2
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh15
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_tunnel.sh40
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c12
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_features.sh107
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh42
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c110
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_st_mem.c67
-rw-r--r--tools/testing/selftests/bpf/verifier/sleepable.c91
-rw-r--r--tools/testing/selftests/bpf/veristat.c4
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh2
-rw-r--r--tools/testing/selftests/bpf/xdp_features.c699
-rw-r--r--tools/testing/selftests/bpf/xdp_features.h20
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c445
-rw-r--r--tools/testing/selftests/bpf/xdp_metadata.h15
-rw-r--r--tools/testing/selftests/bpf/xdp_synproxy.c16
-rw-r--r--tools/testing/selftests/bpf/xsk.c677
-rw-r--r--tools/testing/selftests/bpf/xsk.h97
-rwxr-xr-xtools/testing/selftests/bpf/xsk_prereqs.sh12
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c382
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h17
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh68
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh23
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh27
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh18
-rw-r--r--tools/testing/selftests/net/Makefile54
-rw-r--r--tools/testing/selftests/net/bpf/Makefile51
-rw-r--r--tools/testing/selftests/net/config3
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile1
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb.sh159
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb_max.sh1336
-rwxr-xr-xtools/testing/selftests/net/forwarding/lib.sh237
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_actions.sh53
-rw-r--r--tools/testing/selftests/net/ip_local_port_range.c447
-rwxr-xr-xtools/testing/selftests/net/ip_local_port_range.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh56
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c4
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh53
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh153
-rw-r--r--tools/testing/selftests/net/nat6to4.c (renamed from tools/testing/selftests/net/bpf/nat6to4.c)0
-rwxr-xr-xtools/testing/selftests/net/rps_default_mask.sh74
-rwxr-xr-xtools/testing/selftests/net/srv6_end_flavors_test.sh869
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c3
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh8
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c6
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/rsvp.json203
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json227
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json94
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json184
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json140
-rw-r--r--tools/testing/vsock/Makefile3
-rw-r--r--tools/testing/vsock/README34
-rw-r--r--tools/testing/vsock/control.c28
-rw-r--r--tools/testing/vsock/control.h2
-rw-r--r--tools/testing/vsock/util.c13
-rw-r--r--tools/testing/vsock/util.h1
-rw-r--r--tools/testing/vsock/vsock_perf.c427
-rw-r--r--tools/testing/vsock/vsock_test.c197
1823 files changed, 155979 insertions, 43114 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net-peak_usb b/Documentation/ABI/testing/sysfs-class-net-peak_usb
new file mode 100644
index 000000000000..9e3d0bf4d4b2
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-peak_usb
@@ -0,0 +1,19 @@
+
+What: /sys/class/net/<iface>/peak_usb/can_channel_id
+Date: November 2022
+KernelVersion: 6.2
+Contact: Stephane Grosjean <s.grosjean@peak-system.com>
+Description:
+ PEAK PCAN-USB devices support user-configurable CAN channel
+ identifiers. Contrary to a USB serial number, these identifiers
+ are writable and can be set per CAN interface. This means that
+ if a USB device exports multiple CAN interfaces, each of them
+ can be assigned a unique channel ID.
+ This attribute provides read-only access to the currently
+ configured value of the channel identifier. Depending on the
+ device type, the identifier has a length of 8 or 32 bit. The
+ value read from this attribute is always an 8 digit 32 bit
+ hexadecimal value in big endian format. If the device only
+ supports an 8 bit identifier, the upper 24 bit of the value are
+ set to zero.
+
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 993f3b6c10ff..5e4ee29cf393 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -557,6 +557,7 @@
Format: <string>
nosocket -- Disable socket memory accounting.
nokmem -- Disable kernel memory accounting.
+ nobpf -- Disable BPF memory accounting.
checkreqprot= [SELINUX] Set initial checkreqprot flag value.
Format: { "0" | "1" }
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 6394f5dc2303..466c560b0c30 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -215,6 +215,12 @@ rmem_max
The maximum receive socket buffer size in bytes.
+rps_default_mask
+----------------
+
+The default RPS CPU mask used on newly created network devices. An empty
+mask means RPS disabled by default.
+
tstamp_allow_data
-----------------
Allow processes to receive tx timestamps looped together with the original
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index cec2371173d7..bfff0e7e37c2 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -208,6 +208,10 @@ data structures and compile with kernel internal headers. Both of these
kernel internals are subject to change and can break with newer kernels
such that the program needs to be adapted accordingly.
+New BPF functionality is generally added through the use of kfuncs instead of
+new helpers. Kfuncs are not considered part of the stable API, and have their own
+lifecycle expectations as described in :ref:`BPF_kfunc_lifecycle_expectations`.
+
Q: Are tracepoints part of the stable ABI?
------------------------------------------
A: NO. Tracepoints are tied to internal implementation details hence they are
@@ -236,8 +240,8 @@ A: NO. Classic BPF programs are converted into extend BPF instructions.
Q: Can BPF call arbitrary kernel functions?
-------------------------------------------
-A: NO. BPF programs can only call a set of helper functions which
-is defined for every program type.
+A: NO. BPF programs can only call specific functions exposed as BPF helpers or
+kfuncs. The set of available functions is defined for every program type.
Q: Can BPF overwrite arbitrary kernel memory?
---------------------------------------------
@@ -263,7 +267,12 @@ Q: New functionality via kernel modules?
Q: Can BPF functionality such as new program or map types, new
helpers, etc be added out of kernel module code?
-A: NO.
+A: Yes, through kfuncs and kptrs
+
+The core BPF functionality such as program types, maps and helpers cannot be
+added to by modules. However, modules can expose functionality to BPF programs
+by exporting kfuncs (which may return pointers to module-internal data
+structures as kptrs).
Q: Directly calling kernel function is an ABI?
----------------------------------------------
@@ -278,7 +287,8 @@ kernel functions have already been used by other kernel tcp
cc (congestion-control) implementations. If any of these kernel
functions has changed, both the in-tree and out-of-tree kernel tcp cc
implementations have to be changed. The same goes for the bpf
-programs and they have to be adjusted accordingly.
+programs and they have to be adjusted accordingly. See
+:ref:`BPF_kfunc_lifecycle_expectations` for details.
Q: Attaching to arbitrary kernel functions is an ABI?
-----------------------------------------------------
@@ -340,6 +350,7 @@ compatibility for these features?
A: NO.
-Unlike map value types, there are no stability guarantees for this case. The
-whole API to work with allocated objects and any support for special fields
-inside them is unstable (since it is exposed through kfuncs).
+Unlike map value types, the API to work with allocated objects and any support
+for special fields inside them is exposed through kfuncs, and thus has the same
+lifecycle expectations as the kfuncs themselves. See
+:ref:`BPF_kfunc_lifecycle_expectations` for details.
diff --git a/Documentation/bpf/cpumasks.rst b/Documentation/bpf/cpumasks.rst
new file mode 100644
index 000000000000..24bef9cbbeee
--- /dev/null
+++ b/Documentation/bpf/cpumasks.rst
@@ -0,0 +1,393 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _cpumasks-header-label:
+
+==================
+BPF cpumask kfuncs
+==================
+
+1. Introduction
+===============
+
+``struct cpumask`` is a bitmap data structure in the kernel whose indices
+reflect the CPUs on the system. Commonly, cpumasks are used to track which CPUs
+a task is affinitized to, but they can also be used to e.g. track which cores
+are associated with a scheduling domain, which cores on a machine are idle,
+etc.
+
+BPF provides programs with a set of :ref:`kfuncs-header-label` that can be
+used to allocate, mutate, query, and free cpumasks.
+
+2. BPF cpumask objects
+======================
+
+There are two different types of cpumasks that can be used by BPF programs.
+
+2.1 ``struct bpf_cpumask *``
+----------------------------
+
+``struct bpf_cpumask *`` is a cpumask that is allocated by BPF, on behalf of a
+BPF program, and whose lifecycle is entirely controlled by BPF. These cpumasks
+are RCU-protected, can be mutated, can be used as kptrs, and can be safely cast
+to a ``struct cpumask *``.
+
+2.1.1 ``struct bpf_cpumask *`` lifecycle
+----------------------------------------
+
+A ``struct bpf_cpumask *`` is allocated, acquired, and released, using the
+following functions:
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_create
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_acquire
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_release
+
+For example:
+
+.. code-block:: c
+
+ struct cpumask_map_value {
+ struct bpf_cpumask __kptr_ref * cpumask;
+ };
+
+ struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct cpumask_map_value);
+ __uint(max_entries, 65536);
+ } cpumask_map SEC(".maps");
+
+ static int cpumask_map_insert(struct bpf_cpumask *mask, u32 pid)
+ {
+ struct cpumask_map_value local, *v;
+ long status;
+ struct bpf_cpumask *old;
+ u32 key = pid;
+
+ local.cpumask = NULL;
+ status = bpf_map_update_elem(&cpumask_map, &key, &local, 0);
+ if (status) {
+ bpf_cpumask_release(mask);
+ return status;
+ }
+
+ v = bpf_map_lookup_elem(&cpumask_map, &key);
+ if (!v) {
+ bpf_cpumask_release(mask);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->cpumask, mask);
+ if (old)
+ bpf_cpumask_release(old);
+
+ return 0;
+ }
+
+ /**
+ * A sample tracepoint showing how a task's cpumask can be queried and
+ * recorded as a kptr.
+ */
+ SEC("tp_btf/task_newtask")
+ int BPF_PROG(record_task_cpumask, struct task_struct *task, u64 clone_flags)
+ {
+ struct bpf_cpumask *cpumask;
+ int ret;
+
+ cpumask = bpf_cpumask_create();
+ if (!cpumask)
+ return -ENOMEM;
+
+ if (!bpf_cpumask_full(task->cpus_ptr))
+ bpf_printk("task %s has CPU affinity", task->comm);
+
+ bpf_cpumask_copy(cpumask, task->cpus_ptr);
+ return cpumask_map_insert(cpumask, task->pid);
+ }
+
+----
+
+2.1.1 ``struct bpf_cpumask *`` as kptrs
+---------------------------------------
+
+As mentioned and illustrated above, these ``struct bpf_cpumask *`` objects can
+also be stored in a map and used as kptrs. If a ``struct bpf_cpumask *`` is in
+a map, the reference can be removed from the map with bpf_kptr_xchg(), or
+opportunistically acquired with bpf_cpumask_kptr_get():
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_kptr_get
+
+Here is an example of a ``struct bpf_cpumask *`` being retrieved from a map:
+
+.. code-block:: c
+
+ /* struct containing the struct bpf_cpumask kptr which is stored in the map. */
+ struct cpumasks_kfunc_map_value {
+ struct bpf_cpumask __kptr_ref * bpf_cpumask;
+ };
+
+ /* The map containing struct cpumasks_kfunc_map_value entries. */
+ struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct cpumasks_kfunc_map_value);
+ __uint(max_entries, 1);
+ } cpumasks_kfunc_map SEC(".maps");
+
+ /* ... */
+
+ /**
+ * A simple example tracepoint program showing how a
+ * struct bpf_cpumask * kptr that is stored in a map can
+ * be acquired using the bpf_cpumask_kptr_get() kfunc.
+ */
+ SEC("tp_btf/cgroup_mkdir")
+ int BPF_PROG(cgrp_ancestor_example, struct cgroup *cgrp, const char *path)
+ {
+ struct bpf_cpumask *kptr;
+ struct cpumasks_kfunc_map_value *v;
+ u32 key = 0;
+
+ /* Assume a bpf_cpumask * kptr was previously stored in the map. */
+ v = bpf_map_lookup_elem(&cpumasks_kfunc_map, &key);
+ if (!v)
+ return -ENOENT;
+
+ /* Acquire a reference to the bpf_cpumask * kptr that's already stored in the map. */
+ kptr = bpf_cpumask_kptr_get(&v->cpumask);
+ if (!kptr)
+ /* If no bpf_cpumask was present in the map, it's because
+ * we're racing with another CPU that removed it with
+ * bpf_kptr_xchg() between the bpf_map_lookup_elem()
+ * above, and our call to bpf_cpumask_kptr_get().
+ * bpf_cpumask_kptr_get() internally safely handles this
+ * race, and will return NULL if the cpumask is no longer
+ * present in the map by the time we invoke the kfunc.
+ */
+ return -EBUSY;
+
+ /* Free the reference we just took above. Note that the
+ * original struct bpf_cpumask * kptr is still in the map. It will
+ * be freed either at a later time if another context deletes
+ * it from the map, or automatically by the BPF subsystem if
+ * it's still present when the map is destroyed.
+ */
+ bpf_cpumask_release(kptr);
+
+ return 0;
+ }
+
+----
+
+2.2 ``struct cpumask``
+----------------------
+
+``struct cpumask`` is the object that actually contains the cpumask bitmap
+being queried, mutated, etc. A ``struct bpf_cpumask`` wraps a ``struct
+cpumask``, which is why it's safe to cast it as such (note however that it is
+**not** safe to cast a ``struct cpumask *`` to a ``struct bpf_cpumask *``, and
+the verifier will reject any program that tries to do so).
+
+As we'll see below, any kfunc that mutates its cpumask argument will take a
+``struct bpf_cpumask *`` as that argument. Any argument that simply queries the
+cpumask will instead take a ``struct cpumask *``.
+
+3. cpumask kfuncs
+=================
+
+Above, we described the kfuncs that can be used to allocate, acquire, release,
+etc a ``struct bpf_cpumask *``. This section of the document will describe the
+kfuncs for mutating and querying cpumasks.
+
+3.1 Mutating cpumasks
+---------------------
+
+Some cpumask kfuncs are "read-only" in that they don't mutate any of their
+arguments, whereas others mutate at least one argument (which means that the
+argument must be a ``struct bpf_cpumask *``, as described above).
+
+This section will describe all of the cpumask kfuncs which mutate at least one
+argument. :ref:`cpumasks-querying-label` below describes the read-only kfuncs.
+
+3.1.1 Setting and clearing CPUs
+-------------------------------
+
+bpf_cpumask_set_cpu() and bpf_cpumask_clear_cpu() can be used to set and clear
+a CPU in a ``struct bpf_cpumask`` respectively:
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_set_cpu bpf_cpumask_clear_cpu
+
+These kfuncs are pretty straightforward, and can be used, for example, as
+follows:
+
+.. code-block:: c
+
+ /**
+ * A sample tracepoint showing how a cpumask can be queried.
+ */
+ SEC("tp_btf/task_newtask")
+ int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
+ {
+ struct bpf_cpumask *cpumask;
+
+ cpumask = bpf_cpumask_create();
+ if (!cpumask)
+ return -ENOMEM;
+
+ bpf_cpumask_set_cpu(0, cpumask);
+ if (!bpf_cpumask_test_cpu(0, cast(cpumask)))
+ /* Should never happen. */
+ goto release_exit;
+
+ bpf_cpumask_clear_cpu(0, cpumask);
+ if (bpf_cpumask_test_cpu(0, cast(cpumask)))
+ /* Should never happen. */
+ goto release_exit;
+
+ /* struct cpumask * pointers such as task->cpus_ptr can also be queried. */
+ if (bpf_cpumask_test_cpu(0, task->cpus_ptr))
+ bpf_printk("task %s can use CPU %d", task->comm, 0);
+
+ release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+ }
+
+----
+
+bpf_cpumask_test_and_set_cpu() and bpf_cpumask_test_and_clear_cpu() are
+complementary kfuncs that allow callers to atomically test and set (or clear)
+CPUs:
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_test_and_set_cpu bpf_cpumask_test_and_clear_cpu
+
+----
+
+We can also set and clear entire ``struct bpf_cpumask *`` objects in one
+operation using bpf_cpumask_setall() and bpf_cpumask_clear():
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_setall bpf_cpumask_clear
+
+3.1.2 Operations between cpumasks
+---------------------------------
+
+In addition to setting and clearing individual CPUs in a single cpumask,
+callers can also perform bitwise operations between multiple cpumasks using
+bpf_cpumask_and(), bpf_cpumask_or(), and bpf_cpumask_xor():
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_and bpf_cpumask_or bpf_cpumask_xor
+
+The following is an example of how they may be used. Note that some of the
+kfuncs shown in this example will be covered in more detail below.
+
+.. code-block:: c
+
+ /**
+ * A sample tracepoint showing how a cpumask can be mutated using
+ bitwise operators (and queried).
+ */
+ SEC("tp_btf/task_newtask")
+ int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
+ {
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+
+ mask1 = bpf_cpumask_create();
+ if (!mask1)
+ return -ENOMEM;
+
+ mask2 = bpf_cpumask_create();
+ if (!mask2) {
+ bpf_cpumask_release(mask1);
+ return -ENOMEM;
+ }
+
+ // ...Safely create the other two masks... */
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+ bpf_cpumask_and(dst1, (const struct cpumask *)mask1, (const struct cpumask *)mask2);
+ if (!bpf_cpumask_empty((const struct cpumask *)dst1))
+ /* Should never happen. */
+ goto release_exit;
+
+ bpf_cpumask_or(dst1, (const struct cpumask *)mask1, (const struct cpumask *)mask2);
+ if (!bpf_cpumask_test_cpu(0, (const struct cpumask *)dst1))
+ /* Should never happen. */
+ goto release_exit;
+
+ if (!bpf_cpumask_test_cpu(1, (const struct cpumask *)dst1))
+ /* Should never happen. */
+ goto release_exit;
+
+ bpf_cpumask_xor(dst2, (const struct cpumask *)mask1, (const struct cpumask *)mask2);
+ if (!bpf_cpumask_equal((const struct cpumask *)dst1,
+ (const struct cpumask *)dst2))
+ /* Should never happen. */
+ goto release_exit;
+
+ release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+ }
+
+----
+
+The contents of an entire cpumask may be copied to another using
+bpf_cpumask_copy():
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_copy
+
+----
+
+.. _cpumasks-querying-label:
+
+3.2 Querying cpumasks
+---------------------
+
+In addition to the above kfuncs, there is also a set of read-only kfuncs that
+can be used to query the contents of cpumasks.
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_first bpf_cpumask_first_zero bpf_cpumask_test_cpu
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_equal bpf_cpumask_intersects bpf_cpumask_subset
+ bpf_cpumask_empty bpf_cpumask_full
+
+.. kernel-doc:: kernel/bpf/cpumask.c
+ :identifiers: bpf_cpumask_any bpf_cpumask_any_and
+
+----
+
+Some example usages of these querying kfuncs were shown above. We will not
+replicate those exmaples here. Note, however, that all of the aforementioned
+kfuncs are tested in `tools/testing/selftests/bpf/progs/cpumask_success.c`_, so
+please take a look there if you're looking for more examples of how they can be
+used.
+
+.. _tools/testing/selftests/bpf/progs/cpumask_success.c:
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/tools/testing/selftests/bpf/progs/cpumask_success.c
+
+
+4. Adding BPF cpumask kfuncs
+============================
+
+The set of supported BPF cpumask kfuncs are not (yet) a 1-1 match with the
+cpumask operations in include/linux/cpumask.h. Any of those cpumask operations
+could easily be encapsulated in a new kfunc if and when required. If you'd like
+to support a new cpumask operation, please feel free to submit a patch. If you
+do add a new cpumask kfunc, please document it here, and add any relevant
+selftest testcases to the cpumask selftest suite.
diff --git a/Documentation/bpf/graph_ds_impl.rst b/Documentation/bpf/graph_ds_impl.rst
new file mode 100644
index 000000000000..61274622b71d
--- /dev/null
+++ b/Documentation/bpf/graph_ds_impl.rst
@@ -0,0 +1,267 @@
+=========================
+BPF Graph Data Structures
+=========================
+
+This document describes implementation details of new-style "graph" data
+structures (linked_list, rbtree), with particular focus on the verifier's
+implementation of semantics specific to those data structures.
+
+Although no specific verifier code is referred to in this document, the document
+assumes that the reader has general knowledge of BPF verifier internals, BPF
+maps, and BPF program writing.
+
+Note that the intent of this document is to describe the current state of
+these graph data structures. **No guarantees** of stability for either
+semantics or APIs are made or implied here.
+
+.. contents::
+ :local:
+ :depth: 2
+
+Introduction
+------------
+
+The BPF map API has historically been the main way to expose data structures
+of various types for use within BPF programs. Some data structures fit naturally
+with the map API (HASH, ARRAY), others less so. Consequentially, programs
+interacting with the latter group of data structures can be hard to parse
+for kernel programmers without previous BPF experience.
+
+Luckily, some restrictions which necessitated the use of BPF map semantics are
+no longer relevant. With the introduction of kfuncs, kptrs, and the any-context
+BPF allocator, it is now possible to implement BPF data structures whose API
+and semantics more closely match those exposed to the rest of the kernel.
+
+Two such data structures - linked_list and rbtree - have many verification
+details in common. Because both have "root"s ("head" for linked_list) and
+"node"s, the verifier code and this document refer to common functionality
+as "graph_api", "graph_root", "graph_node", etc.
+
+Unless otherwise stated, examples and semantics below apply to both graph data
+structures.
+
+Unstable API
+------------
+
+Data structures implemented using the BPF map API have historically used BPF
+helper functions - either standard map API helpers like ``bpf_map_update_elem``
+or map-specific helpers. The new-style graph data structures instead use kfuncs
+to define their manipulation helpers. Because there are no stability guarantees
+for kfuncs, the API and semantics for these data structures can be evolved in
+a way that breaks backwards compatibility if necessary.
+
+Root and node types for the new data structures are opaquely defined in the
+``uapi/linux/bpf.h`` header.
+
+Locking
+-------
+
+The new-style data structures are intrusive and are defined similarly to their
+vanilla kernel counterparts:
+
+.. code-block:: c
+
+ struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+ };
+
+ struct bpf_spin_lock glock;
+ struct bpf_rb_root groot __contains(node_data, node);
+
+The "root" type for both linked_list and rbtree expects to be in a map_value
+which also contains a ``bpf_spin_lock`` - in the above example both global
+variables are placed in a single-value arraymap. The verifier considers this
+spin_lock to be associated with the ``bpf_rb_root`` by virtue of both being in
+the same map_value and will enforce that the correct lock is held when
+verifying BPF programs that manipulate the tree. Since this lock checking
+happens at verification time, there is no runtime penalty.
+
+Non-owning references
+---------------------
+
+**Motivation**
+
+Consider the following BPF code:
+
+.. code-block:: c
+
+ struct node_data *n = bpf_obj_new(typeof(*n)); /* ACQUIRED */
+
+ bpf_spin_lock(&lock);
+
+ bpf_rbtree_add(&tree, n); /* PASSED */
+
+ bpf_spin_unlock(&lock);
+
+From the verifier's perspective, the pointer ``n`` returned from ``bpf_obj_new``
+has type ``PTR_TO_BTF_ID | MEM_ALLOC``, with a ``btf_id`` of
+``struct node_data`` and a nonzero ``ref_obj_id``. Because it holds ``n``, the
+program has ownership of the pointee's (object pointed to by ``n``) lifetime.
+The BPF program must pass off ownership before exiting - either via
+``bpf_obj_drop``, which ``free``'s the object, or by adding it to ``tree`` with
+``bpf_rbtree_add``.
+
+(``ACQUIRED`` and ``PASSED`` comments in the example denote statements where
+"ownership is acquired" and "ownership is passed", respectively)
+
+What should the verifier do with ``n`` after ownership is passed off? If the
+object was ``free``'d with ``bpf_obj_drop`` the answer is obvious: the verifier
+should reject programs which attempt to access ``n`` after ``bpf_obj_drop`` as
+the object is no longer valid. The underlying memory may have been reused for
+some other allocation, unmapped, etc.
+
+When ownership is passed to ``tree`` via ``bpf_rbtree_add`` the answer is less
+obvious. The verifier could enforce the same semantics as for ``bpf_obj_drop``,
+but that would result in programs with useful, common coding patterns being
+rejected, e.g.:
+
+.. code-block:: c
+
+ int x;
+ struct node_data *n = bpf_obj_new(typeof(*n)); /* ACQUIRED */
+
+ bpf_spin_lock(&lock);
+
+ bpf_rbtree_add(&tree, n); /* PASSED */
+ x = n->data;
+ n->data = 42;
+
+ bpf_spin_unlock(&lock);
+
+Both the read from and write to ``n->data`` would be rejected. The verifier
+can do better, though, by taking advantage of two details:
+
+ * Graph data structure APIs can only be used when the ``bpf_spin_lock``
+ associated with the graph root is held
+
+ * Both graph data structures have pointer stability
+
+ * Because graph nodes are allocated with ``bpf_obj_new`` and
+ adding / removing from the root involves fiddling with the
+ ``bpf_{list,rb}_node`` field of the node struct, a graph node will
+ remain at the same address after either operation.
+
+Because the associated ``bpf_spin_lock`` must be held by any program adding
+or removing, if we're in the critical section bounded by that lock, we know
+that no other program can add or remove until the end of the critical section.
+This combined with pointer stability means that, until the critical section
+ends, we can safely access the graph node through ``n`` even after it was used
+to pass ownership.
+
+The verifier considers such a reference a *non-owning reference*. The ref
+returned by ``bpf_obj_new`` is accordingly considered an *owning reference*.
+Both terms currently only have meaning in the context of graph nodes and API.
+
+**Details**
+
+Let's enumerate the properties of both types of references.
+
+*owning reference*
+
+ * This reference controls the lifetime of the pointee
+
+ * Ownership of pointee must be 'released' by passing it to some graph API
+ kfunc, or via ``bpf_obj_drop``, which ``free``'s the pointee
+
+ * If not released before program ends, verifier considers program invalid
+
+ * Access to the pointee's memory will not page fault
+
+*non-owning reference*
+
+ * This reference does not own the pointee
+
+ * It cannot be used to add the graph node to a graph root, nor ``free``'d via
+ ``bpf_obj_drop``
+
+ * No explicit control of lifetime, but can infer valid lifetime based on
+ non-owning ref existence (see explanation below)
+
+ * Access to the pointee's memory will not page fault
+
+From verifier's perspective non-owning references can only exist
+between spin_lock and spin_unlock. Why? After spin_unlock another program
+can do arbitrary operations on the data structure like removing and ``free``-ing
+via bpf_obj_drop. A non-owning ref to some chunk of memory that was remove'd,
+``free``'d, and reused via bpf_obj_new would point to an entirely different thing.
+Or the memory could go away.
+
+To prevent this logic violation all non-owning references are invalidated by the
+verifier after a critical section ends. This is necessary to ensure the "will
+not page fault" property of non-owning references. So if the verifier hasn't
+invalidated a non-owning ref, accessing it will not page fault.
+
+Currently ``bpf_obj_drop`` is not allowed in the critical section, so
+if there's a valid non-owning ref, we must be in a critical section, and can
+conclude that the ref's memory hasn't been dropped-and- ``free``'d or
+dropped-and-reused.
+
+Any reference to a node that is in an rbtree _must_ be non-owning, since
+the tree has control of the pointee's lifetime. Similarly, any ref to a node
+that isn't in rbtree _must_ be owning. This results in a nice property:
+graph API add / remove implementations don't need to check if a node
+has already been added (or already removed), as the ownership model
+allows the verifier to prevent such a state from being valid by simply checking
+types.
+
+However, pointer aliasing poses an issue for the above "nice property".
+Consider the following example:
+
+.. code-block:: c
+
+ struct node_data *n, *m, *o, *p;
+ n = bpf_obj_new(typeof(*n)); /* 1 */
+
+ bpf_spin_lock(&lock);
+
+ bpf_rbtree_add(&tree, n); /* 2 */
+ m = bpf_rbtree_first(&tree); /* 3 */
+
+ o = bpf_rbtree_remove(&tree, n); /* 4 */
+ p = bpf_rbtree_remove(&tree, m); /* 5 */
+
+ bpf_spin_unlock(&lock);
+
+ bpf_obj_drop(o);
+ bpf_obj_drop(p); /* 6 */
+
+Assume the tree is empty before this program runs. If we track verifier state
+changes here using numbers in above comments:
+
+ 1) n is an owning reference
+
+ 2) n is a non-owning reference, it's been added to the tree
+
+ 3) n and m are non-owning references, they both point to the same node
+
+ 4) o is an owning reference, n and m non-owning, all point to same node
+
+ 5) o and p are owning, n and m non-owning, all point to the same node
+
+ 6) a double-free has occurred, since o and p point to same node and o was
+ ``free``'d in previous statement
+
+States 4 and 5 violate our "nice property", as there are non-owning refs to
+a node which is not in an rbtree. Statement 5 will try to remove a node which
+has already been removed as a result of this violation. State 6 is a dangerous
+double-free.
+
+At a minimum we should prevent state 6 from being possible. If we can't also
+prevent state 5 then we must abandon our "nice property" and check whether a
+node has already been removed at runtime.
+
+We prevent both by generalizing the "invalidate non-owning references" behavior
+of ``bpf_spin_unlock`` and doing similar invalidation after
+``bpf_rbtree_remove``. The logic here being that any graph API kfunc which:
+
+ * takes an arbitrary node argument
+
+ * removes it from the data structure
+
+ * returns an owning reference to the removed node
+
+May result in a state where some other non-owning reference points to the same
+node. So ``remove``-type kfuncs must be considered a non-owning reference
+invalidation point as well.
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index b81533d8b061..dbb39e8f9889 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -20,6 +20,7 @@ that goes into great technical depth about the BPF Architecture.
syscall_api
helpers
kfuncs
+ cpumasks
programs
maps
bpf_prog_run
diff --git a/Documentation/bpf/instruction-set.rst b/Documentation/bpf/instruction-set.rst
index e672d5ec6cc7..af515de5fc38 100644
--- a/Documentation/bpf/instruction-set.rst
+++ b/Documentation/bpf/instruction-set.rst
@@ -7,6 +7,11 @@ eBPF Instruction Set Specification, v1.0
This document specifies version 1.0 of the eBPF instruction set.
+Documentation conventions
+=========================
+
+For brevity, this document uses the type notion "u64", "u32", etc.
+to mean an unsigned integer whose width is the specified number of bits.
Registers and calling convention
================================
@@ -30,20 +35,56 @@ Instruction encoding
eBPF has two instruction encodings:
* the basic instruction encoding, which uses 64 bits to encode an instruction
-* the wide instruction encoding, which appends a second 64-bit immediate value
- (imm64) after the basic instruction for a total of 128 bits.
+* the wide instruction encoding, which appends a second 64-bit immediate (i.e.,
+ constant) value after the basic instruction for a total of 128 bits.
+
+The basic instruction encoding is as follows, where MSB and LSB mean the most significant
+bits and least significant bits, respectively:
+
+============= ======= ======= ======= ============
+32 bits (MSB) 16 bits 4 bits 4 bits 8 bits (LSB)
+============= ======= ======= ======= ============
+imm offset src_reg dst_reg opcode
+============= ======= ======= ======= ============
+
+**imm**
+ signed integer immediate value
-The basic instruction encoding looks as follows:
+**offset**
+ signed integer offset used with pointer arithmetic
-============= ======= =============== ==================== ============
-32 bits (MSB) 16 bits 4 bits 4 bits 8 bits (LSB)
-============= ======= =============== ==================== ============
-immediate offset source register destination register opcode
-============= ======= =============== ==================== ============
+**src_reg**
+ the source register number (0-10), except where otherwise specified
+ (`64-bit immediate instructions`_ reuse this field for other purposes)
+
+**dst_reg**
+ destination register number (0-10)
+
+**opcode**
+ operation to perform
Note that most instructions do not use all of the fields.
Unused fields shall be cleared to zero.
+As discussed below in `64-bit immediate instructions`_, a 64-bit immediate
+instruction uses a 64-bit immediate value that is constructed as follows.
+The 64 bits following the basic instruction contain a pseudo instruction
+using the same format but with opcode, dst_reg, src_reg, and offset all set to zero,
+and imm containing the high 32 bits of the immediate value.
+
+================= ==================
+64 bits (MSB) 64 bits (LSB)
+================= ==================
+basic instruction pseudo instruction
+================= ==================
+
+Thus the 64-bit immediate value is constructed as follows:
+
+ imm64 = (next_imm << 32) | imm
+
+where 'next_imm' refers to the imm value of the pseudo instruction
+following the basic instruction.
+
Instruction classes
-------------------
@@ -71,27 +112,32 @@ For arithmetic and jump instructions (``BPF_ALU``, ``BPF_ALU64``, ``BPF_JMP`` an
============== ====== =================
4 bits (MSB) 1 bit 3 bits (LSB)
============== ====== =================
-operation code source instruction class
+code source instruction class
============== ====== =================
-The 4th bit encodes the source operand:
+**code**
+ the operation code, whose meaning varies by instruction class
- ====== ===== ========================================
- source value description
- ====== ===== ========================================
- BPF_K 0x00 use 32-bit immediate as source operand
- BPF_X 0x08 use 'src_reg' register as source operand
- ====== ===== ========================================
+**source**
+ the source operand location, which unless otherwise specified is one of:
-The four MSB bits store the operation code.
+ ====== ===== ==============================================
+ source value description
+ ====== ===== ==============================================
+ BPF_K 0x00 use 32-bit 'imm' value as source operand
+ BPF_X 0x08 use 'src_reg' register value as source operand
+ ====== ===== ==============================================
+**instruction class**
+ the instruction class (see `Instruction classes`_)
Arithmetic instructions
-----------------------
``BPF_ALU`` uses 32-bit wide operands while ``BPF_ALU64`` uses 64-bit wide operands for
otherwise identical operations.
-The 'code' field encodes the operation as below:
+The 'code' field encodes the operation as below, where 'src' and 'dst' refer
+to the values of the source and destination registers, respectively.
======== ===== ==========================================================
code value description
@@ -99,35 +145,49 @@ code value description
BPF_ADD 0x00 dst += src
BPF_SUB 0x10 dst -= src
BPF_MUL 0x20 dst \*= src
-BPF_DIV 0x30 dst /= src
+BPF_DIV 0x30 dst = (src != 0) ? (dst / src) : 0
BPF_OR 0x40 dst \|= src
BPF_AND 0x50 dst &= src
BPF_LSH 0x60 dst <<= src
BPF_RSH 0x70 dst >>= src
BPF_NEG 0x80 dst = ~src
-BPF_MOD 0x90 dst %= src
+BPF_MOD 0x90 dst = (src != 0) ? (dst % src) : dst
BPF_XOR 0xa0 dst ^= src
BPF_MOV 0xb0 dst = src
BPF_ARSH 0xc0 sign extending shift right
BPF_END 0xd0 byte swap operations (see `Byte swap instructions`_ below)
======== ===== ==========================================================
+Underflow and overflow are allowed during arithmetic operations, meaning
+the 64-bit or 32-bit value will wrap. If eBPF program execution would
+result in division by zero, the destination register is instead set to zero.
+If execution would result in modulo by zero, for ``BPF_ALU64`` the value of
+the destination register is unchanged whereas for ``BPF_ALU`` the upper
+32 bits of the destination register are zeroed.
+
``BPF_ADD | BPF_X | BPF_ALU`` means::
- dst_reg = (u32) dst_reg + (u32) src_reg;
+ dst = (u32) ((u32) dst + (u32) src)
+
+where '(u32)' indicates that the upper 32 bits are zeroed.
``BPF_ADD | BPF_X | BPF_ALU64`` means::
- dst_reg = dst_reg + src_reg
+ dst = dst + src
``BPF_XOR | BPF_K | BPF_ALU`` means::
- dst_reg = (u32) dst_reg ^ (u32) imm32
+ dst = (u32) dst ^ (u32) imm32
``BPF_XOR | BPF_K | BPF_ALU64`` means::
- dst_reg = dst_reg ^ imm32
+ dst = dst ^ imm32
+Also note that the division and modulo operations are unsigned. Thus, for
+``BPF_ALU``, 'imm' is first interpreted as an unsigned 32-bit value, whereas
+for ``BPF_ALU64``, 'imm' is first sign extended to 64 bits and the result
+interpreted as an unsigned 64-bit value. There are no instructions for
+signed division or modulo.
Byte swap instructions
~~~~~~~~~~~~~~~~~~~~~~
@@ -155,11 +215,11 @@ Examples:
``BPF_ALU | BPF_TO_LE | BPF_END`` with imm = 16 means::
- dst_reg = htole16(dst_reg)
+ dst = htole16(dst)
``BPF_ALU | BPF_TO_BE | BPF_END`` with imm = 64 means::
- dst_reg = htobe64(dst_reg)
+ dst = htobe64(dst)
Jump instructions
-----------------
@@ -234,15 +294,15 @@ instructions that transfer data between a register and memory.
``BPF_MEM | <size> | BPF_STX`` means::
- *(size *) (dst_reg + off) = src_reg
+ *(size *) (dst + offset) = src
``BPF_MEM | <size> | BPF_ST`` means::
- *(size *) (dst_reg + off) = imm32
+ *(size *) (dst + offset) = imm32
``BPF_MEM | <size> | BPF_LDX`` means::
- dst_reg = *(size *) (src_reg + off)
+ dst = *(size *) (src + offset)
Where size is one of: ``BPF_B``, ``BPF_H``, ``BPF_W``, or ``BPF_DW``.
@@ -276,11 +336,11 @@ BPF_XOR 0xa0 atomic xor
``BPF_ATOMIC | BPF_W | BPF_STX`` with 'imm' = BPF_ADD means::
- *(u32 *)(dst_reg + off16) += src_reg
+ *(u32 *)(dst + offset) += src
``BPF_ATOMIC | BPF_DW | BPF_STX`` with 'imm' = BPF ADD means::
- *(u64 *)(dst_reg + off16) += src_reg
+ *(u64 *)(dst + offset) += src
In addition to the simple atomic operations, there also is a modifier and
two complex atomic operations:
@@ -295,16 +355,16 @@ BPF_CMPXCHG 0xf0 | BPF_FETCH atomic compare and exchange
The ``BPF_FETCH`` modifier is optional for simple atomic operations, and
always set for the complex atomic operations. If the ``BPF_FETCH`` flag
-is set, then the operation also overwrites ``src_reg`` with the value that
+is set, then the operation also overwrites ``src`` with the value that
was in memory before it was modified.
-The ``BPF_XCHG`` operation atomically exchanges ``src_reg`` with the value
-addressed by ``dst_reg + off``.
+The ``BPF_XCHG`` operation atomically exchanges ``src`` with the value
+addressed by ``dst + offset``.
The ``BPF_CMPXCHG`` operation atomically compares the value addressed by
-``dst_reg + off`` with ``R0``. If they match, the value addressed by
-``dst_reg + off`` is replaced with ``src_reg``. In either case, the
-value that was at ``dst_reg + off`` before the operation is zero-extended
+``dst + offset`` with ``R0``. If they match, the value addressed by
+``dst + offset`` is replaced with ``src``. In either case, the
+value that was at ``dst + offset`` before the operation is zero-extended
and loaded back to ``R0``.
64-bit immediate instructions
@@ -317,7 +377,7 @@ There is currently only one such instruction.
``BPF_LD | BPF_DW | BPF_IMM`` means::
- dst_reg = imm64
+ dst = imm64
Legacy BPF Packet access instructions
diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst
index 9fd7fb539f85..ca96ef3f6896 100644
--- a/Documentation/bpf/kfuncs.rst
+++ b/Documentation/bpf/kfuncs.rst
@@ -1,3 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _kfuncs-header-label:
+
=============================
BPF Kernel Functions (kfuncs)
=============================
@@ -9,7 +13,7 @@ BPF Kernel Functions or more commonly known as kfuncs are functions in the Linux
kernel which are exposed for use by BPF programs. Unlike normal BPF helpers,
kfuncs do not have a stable interface and can change from one kernel release to
another. Hence, BPF programs need to be updated in response to changes in the
-kernel.
+kernel. See :ref:`BPF_kfunc_lifecycle_expectations` for more information.
2. Defining a kfunc
===================
@@ -37,7 +41,7 @@ An example is given below::
__diag_ignore_all("-Wmissing-prototypes",
"Global kfuncs as their definitions will be in BTF");
- struct task_struct *bpf_find_get_task_by_vpid(pid_t nr)
+ __bpf_kfunc struct task_struct *bpf_find_get_task_by_vpid(pid_t nr)
{
return find_get_task_by_vpid(nr);
}
@@ -62,7 +66,7 @@ kfunc with a __tag, where tag may be one of the supported annotations.
This annotation is used to indicate a memory and size pair in the argument list.
An example is given below::
- void bpf_memzero(void *mem, int mem__sz)
+ __bpf_kfunc void bpf_memzero(void *mem, int mem__sz)
{
...
}
@@ -82,7 +86,7 @@ safety of the program.
An example is given below::
- void *bpf_obj_new(u32 local_type_id__k, ...)
+ __bpf_kfunc void *bpf_obj_new(u32 local_type_id__k, ...)
{
...
}
@@ -121,6 +125,20 @@ flags on a set of kfuncs as follows::
This set encodes the BTF ID of each kfunc listed above, and encodes the flags
along with it. Ofcourse, it is also allowed to specify no flags.
+kfunc definitions should also always be annotated with the ``__bpf_kfunc``
+macro. This prevents issues such as the compiler inlining the kfunc if it's a
+static kernel function, or the function being elided in an LTO build as it's
+not used in the rest of the kernel. Developers should not manually add
+annotations to their kfunc to prevent these issues. If an annotation is
+required to prevent such an issue with your kfunc, it is a bug and should be
+added to the definition of the macro so that other kfuncs are similarly
+protected. An example is given below::
+
+ __bpf_kfunc struct task_struct *bpf_get_task_pid(s32 pid)
+ {
+ ...
+ }
+
2.4.1 KF_ACQUIRE flag
---------------------
@@ -163,7 +181,8 @@ KF_ACQUIRE and KF_RET_NULL flags.
The KF_TRUSTED_ARGS flag is used for kfuncs taking pointer arguments. It
indicates that the all pointer arguments are valid, and that all pointers to
BTF objects have been passed in their unmodified form (that is, at a zero
-offset, and without having been obtained from walking another pointer).
+offset, and without having been obtained from walking another pointer, with one
+exception described below).
There are two types of pointers to kernel objects which are considered "valid":
@@ -176,6 +195,25 @@ KF_TRUSTED_ARGS kfuncs, and may have a non-zero offset.
The definition of "valid" pointers is subject to change at any time, and has
absolutely no ABI stability guarantees.
+As mentioned above, a nested pointer obtained from walking a trusted pointer is
+no longer trusted, with one exception. If a struct type has a field that is
+guaranteed to be valid as long as its parent pointer is trusted, the
+``BTF_TYPE_SAFE_NESTED`` macro can be used to express that to the verifier as
+follows:
+
+.. code-block:: c
+
+ BTF_TYPE_SAFE_NESTED(struct task_struct) {
+ const cpumask_t *cpus_ptr;
+ };
+
+In other words, you must:
+
+1. Wrap the trusted pointer type in the ``BTF_TYPE_SAFE_NESTED`` macro.
+
+2. Specify the type and name of the trusted nested field. This field must match
+ the field in the original type definition exactly.
+
2.4.6 KF_SLEEPABLE flag
-----------------------
@@ -200,6 +238,28 @@ single argument which must be a trusted argument or a MEM_RCU pointer.
The argument may have reference count of 0 and the kfunc must take this
into consideration.
+.. _KF_deprecated_flag:
+
+2.4.9 KF_DEPRECATED flag
+------------------------
+
+The KF_DEPRECATED flag is used for kfuncs which are scheduled to be
+changed or removed in a subsequent kernel release. A kfunc that is
+marked with KF_DEPRECATED should also have any relevant information
+captured in its kernel doc. Such information typically includes the
+kfunc's expected remaining lifespan, a recommendation for new
+functionality that can replace it if any is available, and possibly a
+rationale for why it is being removed.
+
+Note that while on some occasions, a KF_DEPRECATED kfunc may continue to be
+supported and have its KF_DEPRECATED flag removed, it is likely to be far more
+difficult to remove a KF_DEPRECATED flag after it's been added than it is to
+prevent it from being added in the first place. As described in
+:ref:`BPF_kfunc_lifecycle_expectations`, users that rely on specific kfuncs are
+encouraged to make their use-cases known as early as possible, and participate
+in upstream discussions regarding whether to keep, change, deprecate, or remove
+those kfuncs if and when such discussions occur.
+
2.5 Registering the kfuncs
--------------------------
@@ -223,14 +283,150 @@ type. An example is shown below::
}
late_initcall(init_subsystem);
-3. Core kfuncs
+2.6 Specifying no-cast aliases with ___init
+--------------------------------------------
+
+The verifier will always enforce that the BTF type of a pointer passed to a
+kfunc by a BPF program, matches the type of pointer specified in the kfunc
+definition. The verifier, does, however, allow types that are equivalent
+according to the C standard to be passed to the same kfunc arg, even if their
+BTF_IDs differ.
+
+For example, for the following type definition:
+
+.. code-block:: c
+
+ struct bpf_cpumask {
+ cpumask_t cpumask;
+ refcount_t usage;
+ };
+
+The verifier would allow a ``struct bpf_cpumask *`` to be passed to a kfunc
+taking a ``cpumask_t *`` (which is a typedef of ``struct cpumask *``). For
+instance, both ``struct cpumask *`` and ``struct bpf_cpmuask *`` can be passed
+to bpf_cpumask_test_cpu().
+
+In some cases, this type-aliasing behavior is not desired. ``struct
+nf_conn___init`` is one such example:
+
+.. code-block:: c
+
+ struct nf_conn___init {
+ struct nf_conn ct;
+ };
+
+The C standard would consider these types to be equivalent, but it would not
+always be safe to pass either type to a trusted kfunc. ``struct
+nf_conn___init`` represents an allocated ``struct nf_conn`` object that has
+*not yet been initialized*, so it would therefore be unsafe to pass a ``struct
+nf_conn___init *`` to a kfunc that's expecting a fully initialized ``struct
+nf_conn *`` (e.g. ``bpf_ct_change_timeout()``).
+
+In order to accommodate such requirements, the verifier will enforce strict
+PTR_TO_BTF_ID type matching if two types have the exact same name, with one
+being suffixed with ``___init``.
+
+.. _BPF_kfunc_lifecycle_expectations:
+
+3. kfunc lifecycle expectations
+===============================
+
+kfuncs provide a kernel <-> kernel API, and thus are not bound by any of the
+strict stability restrictions associated with kernel <-> user UAPIs. This means
+they can be thought of as similar to EXPORT_SYMBOL_GPL, and can therefore be
+modified or removed by a maintainer of the subsystem they're defined in when
+it's deemed necessary.
+
+Like any other change to the kernel, maintainers will not change or remove a
+kfunc without having a reasonable justification. Whether or not they'll choose
+to change a kfunc will ultimately depend on a variety of factors, such as how
+widely used the kfunc is, how long the kfunc has been in the kernel, whether an
+alternative kfunc exists, what the norm is in terms of stability for the
+subsystem in question, and of course what the technical cost is of continuing
+to support the kfunc.
+
+There are several implications of this:
+
+a) kfuncs that are widely used or have been in the kernel for a long time will
+ be more difficult to justify being changed or removed by a maintainer. In
+ other words, kfuncs that are known to have a lot of users and provide
+ significant value provide stronger incentives for maintainers to invest the
+ time and complexity in supporting them. It is therefore important for
+ developers that are using kfuncs in their BPF programs to communicate and
+ explain how and why those kfuncs are being used, and to participate in
+ discussions regarding those kfuncs when they occur upstream.
+
+b) Unlike regular kernel symbols marked with EXPORT_SYMBOL_GPL, BPF programs
+ that call kfuncs are generally not part of the kernel tree. This means that
+ refactoring cannot typically change callers in-place when a kfunc changes,
+ as is done for e.g. an upstreamed driver being updated in place when a
+ kernel symbol is changed.
+
+ Unlike with regular kernel symbols, this is expected behavior for BPF
+ symbols, and out-of-tree BPF programs that use kfuncs should be considered
+ relevant to discussions and decisions around modifying and removing those
+ kfuncs. The BPF community will take an active role in participating in
+ upstream discussions when necessary to ensure that the perspectives of such
+ users are taken into account.
+
+c) A kfunc will never have any hard stability guarantees. BPF APIs cannot and
+ will not ever hard-block a change in the kernel purely for stability
+ reasons. That being said, kfuncs are features that are meant to solve
+ problems and provide value to users. The decision of whether to change or
+ remove a kfunc is a multivariate technical decision that is made on a
+ case-by-case basis, and which is informed by data points such as those
+ mentioned above. It is expected that a kfunc being removed or changed with
+ no warning will not be a common occurrence or take place without sound
+ justification, but it is a possibility that must be accepted if one is to
+ use kfuncs.
+
+3.1 kfunc deprecation
+---------------------
+
+As described above, while sometimes a maintainer may find that a kfunc must be
+changed or removed immediately to accommodate some changes in their subsystem,
+usually kfuncs will be able to accommodate a longer and more measured
+deprecation process. For example, if a new kfunc comes along which provides
+superior functionality to an existing kfunc, the existing kfunc may be
+deprecated for some period of time to allow users to migrate their BPF programs
+to use the new one. Or, if a kfunc has no known users, a decision may be made
+to remove the kfunc (without providing an alternative API) after some
+deprecation period so as to provide users with a window to notify the kfunc
+maintainer if it turns out that the kfunc is actually being used.
+
+It's expected that the common case will be that kfuncs will go through a
+deprecation period rather than being changed or removed without warning. As
+described in :ref:`KF_deprecated_flag`, the kfunc framework provides the
+KF_DEPRECATED flag to kfunc developers to signal to users that a kfunc has been
+deprecated. Once a kfunc has been marked with KF_DEPRECATED, the following
+procedure is followed for removal:
+
+1. Any relevant information for deprecated kfuncs is documented in the kfunc's
+ kernel docs. This documentation will typically include the kfunc's expected
+ remaining lifespan, a recommendation for new functionality that can replace
+ the usage of the deprecated function (or an explanation as to why no such
+ replacement exists), etc.
+
+2. The deprecated kfunc is kept in the kernel for some period of time after it
+ was first marked as deprecated. This time period will be chosen on a
+ case-by-case basis, and will typically depend on how widespread the use of
+ the kfunc is, how long it has been in the kernel, and how hard it is to move
+ to alternatives. This deprecation time period is "best effort", and as
+ described :ref:`above<BPF_kfunc_lifecycle_expectations>`, circumstances may
+ sometimes dictate that the kfunc be removed before the full intended
+ deprecation period has elapsed.
+
+3. After the deprecation period the kfunc will be removed. At this point, BPF
+ programs calling the kfunc will be rejected by the verifier.
+
+4. Core kfuncs
==============
The BPF subsystem provides a number of "core" kfuncs that are potentially
applicable to a wide variety of different possible use cases and programs.
Those kfuncs are documented here.
-3.1 struct task_struct * kfuncs
+4.1 struct task_struct * kfuncs
-------------------------------
There are a number of kfuncs that allow ``struct task_struct *`` objects to be
@@ -306,7 +502,7 @@ Here is an example of it being used:
return 0;
}
-3.2 struct cgroup * kfuncs
+4.2 struct cgroup * kfuncs
--------------------------
``struct cgroup *`` objects also have acquire and release functions:
@@ -420,3 +616,10 @@ the verifier. bpf_cgroup_ancestor() can be used as follows:
bpf_cgroup_release(parent);
return 0;
}
+
+4.3 struct cpumask * kfuncs
+---------------------------
+
+BPF provides a set of kfuncs that can be used to query, allocate, mutate, and
+destroy struct cpumask * objects. Please refer to :ref:`cpumasks-header-label`
+for more details.
diff --git a/Documentation/bpf/libbpf/libbpf_naming_convention.rst b/Documentation/bpf/libbpf/libbpf_naming_convention.rst
index c5ac97f3d4c4..b5b41b61b3c0 100644
--- a/Documentation/bpf/libbpf/libbpf_naming_convention.rst
+++ b/Documentation/bpf/libbpf/libbpf_naming_convention.rst
@@ -83,8 +83,8 @@ This prevents from accidentally exporting a symbol, that is not supposed
to be a part of ABI what, in turn, improves both libbpf developer- and
user-experiences.
-ABI versionning
----------------
+ABI versioning
+--------------
To make future ABI extensions possible libbpf ABI is versioned.
Versioning is implemented by ``libbpf.map`` version script that is
@@ -148,7 +148,7 @@ API documentation convention
The libbpf API is documented via comments above definitions in
header files. These comments can be rendered by doxygen and sphinx
for well organized html output. This section describes the
-convention in which these comments should be formated.
+convention in which these comments should be formatted.
Here is an example from btf.h:
diff --git a/Documentation/bpf/map_sockmap.rst b/Documentation/bpf/map_sockmap.rst
new file mode 100644
index 000000000000..cc92047c6630
--- /dev/null
+++ b/Documentation/bpf/map_sockmap.rst
@@ -0,0 +1,498 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+.. Copyright Red Hat
+
+==============================================
+BPF_MAP_TYPE_SOCKMAP and BPF_MAP_TYPE_SOCKHASH
+==============================================
+
+.. note::
+ - ``BPF_MAP_TYPE_SOCKMAP`` was introduced in kernel version 4.14
+ - ``BPF_MAP_TYPE_SOCKHASH`` was introduced in kernel version 4.18
+
+``BPF_MAP_TYPE_SOCKMAP`` and ``BPF_MAP_TYPE_SOCKHASH`` maps can be used to
+redirect skbs between sockets or to apply policy at the socket level based on
+the result of a BPF (verdict) program with the help of the BPF helpers
+``bpf_sk_redirect_map()``, ``bpf_sk_redirect_hash()``,
+``bpf_msg_redirect_map()`` and ``bpf_msg_redirect_hash()``.
+
+``BPF_MAP_TYPE_SOCKMAP`` is backed by an array that uses an integer key as the
+index to look up a reference to a ``struct sock``. The map values are socket
+descriptors. Similarly, ``BPF_MAP_TYPE_SOCKHASH`` is a hash backed BPF map that
+holds references to sockets via their socket descriptors.
+
+.. note::
+ The value type is either __u32 or __u64; the latter (__u64) is to support
+ returning socket cookies to userspace. Returning the ``struct sock *`` that
+ the map holds to user-space is neither safe nor useful.
+
+These maps may have BPF programs attached to them, specifically a parser program
+and a verdict program. The parser program determines how much data has been
+parsed and therefore how much data needs to be queued to come to a verdict. The
+verdict program is essentially the redirect program and can return a verdict
+of ``__SK_DROP``, ``__SK_PASS``, or ``__SK_REDIRECT``.
+
+When a socket is inserted into one of these maps, its socket callbacks are
+replaced and a ``struct sk_psock`` is attached to it. Additionally, this
+``sk_psock`` inherits the programs that are attached to the map.
+
+A sock object may be in multiple maps, but can only inherit a single
+parse or verdict program. If adding a sock object to a map would result
+in having multiple parser programs the update will return an EBUSY error.
+
+The supported programs to attach to these maps are:
+
+.. code-block:: c
+
+ struct sk_psock_progs {
+ struct bpf_prog *msg_parser;
+ struct bpf_prog *stream_parser;
+ struct bpf_prog *stream_verdict;
+ struct bpf_prog *skb_verdict;
+ };
+
+.. note::
+ Users are not allowed to attach ``stream_verdict`` and ``skb_verdict``
+ programs to the same map.
+
+The attach types for the map programs are:
+
+- ``msg_parser`` program - ``BPF_SK_MSG_VERDICT``.
+- ``stream_parser`` program - ``BPF_SK_SKB_STREAM_PARSER``.
+- ``stream_verdict`` program - ``BPF_SK_SKB_STREAM_VERDICT``.
+- ``skb_verdict`` program - ``BPF_SK_SKB_VERDICT``.
+
+There are additional helpers available to use with the parser and verdict
+programs: ``bpf_msg_apply_bytes()`` and ``bpf_msg_cork_bytes()``. With
+``bpf_msg_apply_bytes()`` BPF programs can tell the infrastructure how many
+bytes the given verdict should apply to. The helper ``bpf_msg_cork_bytes()``
+handles a different case where a BPF program cannot reach a verdict on a msg
+until it receives more bytes AND the program doesn't want to forward the packet
+until it is known to be good.
+
+Finally, the helpers ``bpf_msg_pull_data()`` and ``bpf_msg_push_data()`` are
+available to ``BPF_PROG_TYPE_SK_MSG`` BPF programs to pull in data and set the
+start and end pointers to given values or to add metadata to the ``struct
+sk_msg_buff *msg``.
+
+All these helpers will be described in more detail below.
+
+Usage
+=====
+Kernel BPF
+----------
+bpf_msg_redirect_map()
+^^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
+
+This helper is used in programs implementing policies at the socket level. If
+the message ``msg`` is allowed to pass (i.e., if the verdict BPF program
+returns ``SK_PASS``), redirect it to the socket referenced by ``map`` (of type
+``BPF_MAP_TYPE_SOCKMAP``) at index ``key``. Both ingress and egress interfaces
+can be used for redirection. The ``BPF_F_INGRESS`` value in ``flags`` is used
+to select the ingress path otherwise the egress path is selected. This is the
+only flag supported for now.
+
+Returns ``SK_PASS`` on success, or ``SK_DROP`` on error.
+
+bpf_sk_redirect_map()
+^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key u64 flags)
+
+Redirect the packet to the socket referenced by ``map`` (of type
+``BPF_MAP_TYPE_SOCKMAP``) at index ``key``. Both ingress and egress interfaces
+can be used for redirection. The ``BPF_F_INGRESS`` value in ``flags`` is used
+to select the ingress path otherwise the egress path is selected. This is the
+only flag supported for now.
+
+Returns ``SK_PASS`` on success, or ``SK_DROP`` on error.
+
+bpf_map_lookup_elem()
+^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
+
+socket entries of type ``struct sock *`` can be retrieved using the
+``bpf_map_lookup_elem()`` helper.
+
+bpf_sock_map_update()
+^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+
+Add an entry to, or update a ``map`` referencing sockets. The ``skops`` is used
+as a new value for the entry associated to ``key``. The ``flags`` argument can
+be one of the following:
+
+- ``BPF_ANY``: Create a new element or update an existing element.
+- ``BPF_NOEXIST``: Create a new element only if it did not exist.
+- ``BPF_EXIST``: Update an existing element.
+
+If the ``map`` has BPF programs (parser and verdict), those will be inherited
+by the socket being added. If the socket is already attached to BPF programs,
+this results in an error.
+
+Returns 0 on success, or a negative error in case of failure.
+
+bpf_sock_hash_update()
+^^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+
+Add an entry to, or update a sockhash ``map`` referencing sockets. The ``skops``
+is used as a new value for the entry associated to ``key``.
+
+The ``flags`` argument can be one of the following:
+
+- ``BPF_ANY``: Create a new element or update an existing element.
+- ``BPF_NOEXIST``: Create a new element only if it did not exist.
+- ``BPF_EXIST``: Update an existing element.
+
+If the ``map`` has BPF programs (parser and verdict), those will be inherited
+by the socket being added. If the socket is already attached to BPF programs,
+this results in an error.
+
+Returns 0 on success, or a negative error in case of failure.
+
+bpf_msg_redirect_hash()
+^^^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
+
+This helper is used in programs implementing policies at the socket level. If
+the message ``msg`` is allowed to pass (i.e., if the verdict BPF program returns
+``SK_PASS``), redirect it to the socket referenced by ``map`` (of type
+``BPF_MAP_TYPE_SOCKHASH``) using hash ``key``. Both ingress and egress
+interfaces can be used for redirection. The ``BPF_F_INGRESS`` value in
+``flags`` is used to select the ingress path otherwise the egress path is
+selected. This is the only flag supported for now.
+
+Returns ``SK_PASS`` on success, or ``SK_DROP`` on error.
+
+bpf_sk_redirect_hash()
+^^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
+
+This helper is used in programs implementing policies at the skb socket level.
+If the sk_buff ``skb`` is allowed to pass (i.e., if the verdict BPF program
+returns ``SK_PASS``), redirect it to the socket referenced by ``map`` (of type
+``BPF_MAP_TYPE_SOCKHASH``) using hash ``key``. Both ingress and egress
+interfaces can be used for redirection. The ``BPF_F_INGRESS`` value in
+``flags`` is used to select the ingress path otherwise the egress path is
+selected. This is the only flag supported for now.
+
+Returns ``SK_PASS`` on success, or ``SK_DROP`` on error.
+
+bpf_msg_apply_bytes()
+^^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
+
+For socket policies, apply the verdict of the BPF program to the next (number
+of ``bytes``) of message ``msg``. For example, this helper can be used in the
+following cases:
+
+- A single ``sendmsg()`` or ``sendfile()`` system call contains multiple
+ logical messages that the BPF program is supposed to read and for which it
+ should apply a verdict.
+- A BPF program only cares to read the first ``bytes`` of a ``msg``. If the
+ message has a large payload, then setting up and calling the BPF program
+ repeatedly for all bytes, even though the verdict is already known, would
+ create unnecessary overhead.
+
+Returns 0
+
+bpf_msg_cork_bytes()
+^^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
+
+For socket policies, prevent the execution of the verdict BPF program for
+message ``msg`` until the number of ``bytes`` have been accumulated.
+
+This can be used when one needs a specific number of bytes before a verdict can
+be assigned, even if the data spans multiple ``sendmsg()`` or ``sendfile()``
+calls.
+
+Returns 0
+
+bpf_msg_pull_data()
+^^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
+
+For socket policies, pull in non-linear data from user space for ``msg`` and set
+pointers ``msg->data`` and ``msg->data_end`` to ``start`` and ``end`` bytes
+offsets into ``msg``, respectively.
+
+If a program of type ``BPF_PROG_TYPE_SK_MSG`` is run on a ``msg`` it can only
+parse data that the (``data``, ``data_end``) pointers have already consumed.
+For ``sendmsg()`` hooks this is likely the first scatterlist element. But for
+calls relying on the ``sendpage`` handler (e.g., ``sendfile()``) this will be
+the range (**0**, **0**) because the data is shared with user space and by
+default the objective is to avoid allowing user space to modify data while (or
+after) BPF verdict is being decided. This helper can be used to pull in data
+and to set the start and end pointers to given values. Data will be copied if
+necessary (i.e., if data was not linear and if start and end pointers do not
+point to the same chunk).
+
+A call to this helper is susceptible to change the underlying packet buffer.
+Therefore, at load time, all checks on pointers previously done by the verifier
+are invalidated and must be performed again, if the helper is used in
+combination with direct packet access.
+
+All values for ``flags`` are reserved for future usage, and must be left at
+zero.
+
+Returns 0 on success, or a negative error in case of failure.
+
+bpf_map_lookup_elem()
+^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: c
+
+ void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
+
+Look up a socket entry in the sockmap or sockhash map.
+
+Returns the socket entry associated to ``key``, or NULL if no entry was found.
+
+bpf_map_update_elem()
+^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
+
+Add or update a socket entry in a sockmap or sockhash.
+
+The flags argument can be one of the following:
+
+- BPF_ANY: Create a new element or update an existing element.
+- BPF_NOEXIST: Create a new element only if it did not exist.
+- BPF_EXIST: Update an existing element.
+
+Returns 0 on success, or a negative error in case of failure.
+
+bpf_map_delete_elem()
+^^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ long bpf_map_delete_elem(struct bpf_map *map, const void *key)
+
+Delete a socket entry from a sockmap or a sockhash.
+
+Returns 0 on success, or a negative error in case of failure.
+
+User space
+----------
+bpf_map_update_elem()
+^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags)
+
+Sockmap entries can be added or updated using the ``bpf_map_update_elem()``
+function. The ``key`` parameter is the index value of the sockmap array. And the
+``value`` parameter is the FD value of that socket.
+
+Under the hood, the sockmap update function uses the socket FD value to
+retrieve the associated socket and its attached psock.
+
+The flags argument can be one of the following:
+
+- BPF_ANY: Create a new element or update an existing element.
+- BPF_NOEXIST: Create a new element only if it did not exist.
+- BPF_EXIST: Update an existing element.
+
+bpf_map_lookup_elem()
+^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ int bpf_map_lookup_elem(int fd, const void *key, void *value)
+
+Sockmap entries can be retrieved using the ``bpf_map_lookup_elem()`` function.
+
+.. note::
+ The entry returned is a socket cookie rather than a socket itself.
+
+bpf_map_delete_elem()
+^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: c
+
+ int bpf_map_delete_elem(int fd, const void *key)
+
+Sockmap entries can be deleted using the ``bpf_map_delete_elem()``
+function.
+
+Returns 0 on success, or negative error in case of failure.
+
+Examples
+========
+
+Kernel BPF
+----------
+Several examples of the use of sockmap APIs can be found in:
+
+- `tools/testing/selftests/bpf/progs/test_sockmap_kern.h`_
+- `tools/testing/selftests/bpf/progs/sockmap_parse_prog.c`_
+- `tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c`_
+- `tools/testing/selftests/bpf/progs/test_sockmap_listen.c`_
+- `tools/testing/selftests/bpf/progs/test_sockmap_update.c`_
+
+The following code snippet shows how to declare a sockmap.
+
+.. code-block:: c
+
+ struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+ } sock_map_rx SEC(".maps");
+
+The following code snippet shows a sample parser program.
+
+.. code-block:: c
+
+ SEC("sk_skb/stream_parser")
+ int bpf_prog_parser(struct __sk_buff *skb)
+ {
+ return skb->len;
+ }
+
+The following code snippet shows a simple verdict program that interacts with a
+sockmap to redirect traffic to another socket based on the local port.
+
+.. code-block:: c
+
+ SEC("sk_skb/stream_verdict")
+ int bpf_prog_verdict(struct __sk_buff *skb)
+ {
+ __u32 lport = skb->local_port;
+ __u32 idx = 0;
+
+ if (lport == 10000)
+ return bpf_sk_redirect_map(skb, &sock_map_rx, idx, 0);
+
+ return SK_PASS;
+ }
+
+The following code snippet shows how to declare a sockhash map.
+
+.. code-block:: c
+
+ struct socket_key {
+ __u32 src_ip;
+ __u32 dst_ip;
+ __u32 src_port;
+ __u32 dst_port;
+ };
+
+ struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 1);
+ __type(key, struct socket_key);
+ __type(value, __u64);
+ } sock_hash_rx SEC(".maps");
+
+The following code snippet shows a simple verdict program that interacts with a
+sockhash to redirect traffic to another socket based on a hash of some of the
+skb parameters.
+
+.. code-block:: c
+
+ static inline
+ void extract_socket_key(struct __sk_buff *skb, struct socket_key *key)
+ {
+ key->src_ip = skb->remote_ip4;
+ key->dst_ip = skb->local_ip4;
+ key->src_port = skb->remote_port >> 16;
+ key->dst_port = (bpf_htonl(skb->local_port)) >> 16;
+ }
+
+ SEC("sk_skb/stream_verdict")
+ int bpf_prog_verdict(struct __sk_buff *skb)
+ {
+ struct socket_key key;
+
+ extract_socket_key(skb, &key);
+
+ return bpf_sk_redirect_hash(skb, &sock_hash_rx, &key, 0);
+ }
+
+User space
+----------
+Several examples of the use of sockmap APIs can be found in:
+
+- `tools/testing/selftests/bpf/prog_tests/sockmap_basic.c`_
+- `tools/testing/selftests/bpf/test_sockmap.c`_
+- `tools/testing/selftests/bpf/test_maps.c`_
+
+The following code sample shows how to create a sockmap, attach a parser and
+verdict program, as well as add a socket entry.
+
+.. code-block:: c
+
+ int create_sample_sockmap(int sock, int parse_prog_fd, int verdict_prog_fd)
+ {
+ int index = 0;
+ int map, err;
+
+ map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int), sizeof(int), 1, NULL);
+ if (map < 0) {
+ fprintf(stderr, "Failed to create sockmap: %s\n", strerror(errno));
+ return -1;
+ }
+
+ err = bpf_prog_attach(parse_prog_fd, map, BPF_SK_SKB_STREAM_PARSER, 0);
+ if (err){
+ fprintf(stderr, "Failed to attach_parser_prog_to_map: %s\n", strerror(errno));
+ goto out;
+ }
+
+ err = bpf_prog_attach(verdict_prog_fd, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (err){
+ fprintf(stderr, "Failed to attach_verdict_prog_to_map: %s\n", strerror(errno));
+ goto out;
+ }
+
+ err = bpf_map_update_elem(map, &index, &sock, BPF_NOEXIST);
+ if (err) {
+ fprintf(stderr, "Failed to update sockmap: %s\n", strerror(errno));
+ goto out;
+ }
+
+ out:
+ close(map);
+ return err;
+ }
+
+References
+===========
+
+- https://github.com/jrfastab/linux-kernel-xdp/commit/c89fd73cb9d2d7f3c716c3e00836f07b1aeb261f
+- https://lwn.net/Articles/731133/
+- http://vger.kernel.org/lpc_net2018_talks/ktls_bpf_paper.pdf
+- https://lwn.net/Articles/748628/
+- https://lore.kernel.org/bpf/20200218171023.844439-7-jakub@cloudflare.com/
+
+.. _`tools/testing/selftests/bpf/progs/test_sockmap_kern.h`: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
+.. _`tools/testing/selftests/bpf/progs/sockmap_parse_prog.c`: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
+.. _`tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c`: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
+.. _`tools/testing/selftests/bpf/prog_tests/sockmap_basic.c`: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+.. _`tools/testing/selftests/bpf/test_sockmap.c`: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/test_sockmap.c
+.. _`tools/testing/selftests/bpf/test_maps.c`: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/test_maps.c
+.. _`tools/testing/selftests/bpf/progs/test_sockmap_listen.c`: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+.. _`tools/testing/selftests/bpf/progs/test_sockmap_update.c`: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/progs/test_sockmap_update.c
diff --git a/Documentation/bpf/map_xskmap.rst b/Documentation/bpf/map_xskmap.rst
index 7093b8208451..dc143edd9233 100644
--- a/Documentation/bpf/map_xskmap.rst
+++ b/Documentation/bpf/map_xskmap.rst
@@ -178,7 +178,7 @@ The following code snippet shows how to update an XSKMAP with an XSK entry.
For an example on how create AF_XDP sockets, please see the AF_XDP-example and
AF_XDP-forwarding programs in the `bpf-examples`_ directory in the `libxdp`_ repository.
-For a detailed explaination of the AF_XDP interface please see:
+For a detailed explanation of the AF_XDP interface please see:
- `libxdp-readme`_.
- `AF_XDP`_ kernel documentation.
diff --git a/Documentation/bpf/other.rst b/Documentation/bpf/other.rst
index 3d61963403b4..7e6b12018802 100644
--- a/Documentation/bpf/other.rst
+++ b/Documentation/bpf/other.rst
@@ -6,4 +6,5 @@ Other
:maxdepth: 1
ringbuf
- llvm_reloc \ No newline at end of file
+ llvm_reloc
+ graph_ds_impl
diff --git a/Documentation/bpf/ringbuf.rst b/Documentation/bpf/ringbuf.rst
index 6a615cd62bda..a99cd05d79d4 100644
--- a/Documentation/bpf/ringbuf.rst
+++ b/Documentation/bpf/ringbuf.rst
@@ -124,7 +124,7 @@ buffer. Currently 4 are supported:
- ``BPF_RB_AVAIL_DATA`` returns amount of unconsumed data in ring buffer;
- ``BPF_RB_RING_SIZE`` returns the size of ring buffer;
-- ``BPF_RB_CONS_POS``/``BPF_RB_PROD_POS`` returns current logical possition
+- ``BPF_RB_CONS_POS``/``BPF_RB_PROD_POS`` returns current logical position
of consumer/producer, respectively.
Returned values are momentarily snapshots of ring buffer state and could be
@@ -146,7 +146,7 @@ Design and Implementation
This reserve/commit schema allows a natural way for multiple producers, either
on different CPUs or even on the same CPU/in the same BPF program, to reserve
independent records and work with them without blocking other producers. This
-means that if BPF program was interruped by another BPF program sharing the
+means that if BPF program was interrupted by another BPF program sharing the
same ring buffer, they will both get a record reserved (provided there is
enough space left) and can work with it and submit it independently. This
applies to NMI context as well, except that due to using a spinlock during
diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst
index d4326caf01f9..f0ec19db301c 100644
--- a/Documentation/bpf/verifier.rst
+++ b/Documentation/bpf/verifier.rst
@@ -192,7 +192,7 @@ checked and found to be non-NULL, all copies can become PTR_TO_MAP_VALUEs.
As well as range-checking, the tracked information is also used for enforcing
alignment of pointer accesses. For instance, on most systems the packet pointer
is 2 bytes after a 4-byte alignment. If a program adds 14 bytes to that to jump
-over the Ethernet header, then reads IHL and addes (IHL * 4), the resulting
+over the Ethernet header, then reads IHL and adds (IHL * 4), the resulting
pointer will have a variable offset known to be 4n+2 for some n, so adding the 2
bytes (NET_IP_ALIGN) gives a 4-byte alignment and so word-sized accesses through
that pointer are safe.
@@ -316,6 +316,301 @@ Pruning considers not only the registers but also the stack (and any spilled
registers it may hold). They must all be safe for the branch to be pruned.
This is implemented in states_equal().
+Some technical details about state pruning implementation could be found below.
+
+Register liveness tracking
+--------------------------
+
+In order to make state pruning effective, liveness state is tracked for each
+register and stack slot. The basic idea is to track which registers and stack
+slots are actually used during subseqeuent execution of the program, until
+program exit is reached. Registers and stack slots that were never used could be
+removed from the cached state thus making more states equivalent to a cached
+state. This could be illustrated by the following program::
+
+ 0: call bpf_get_prandom_u32()
+ 1: r1 = 0
+ 2: if r0 == 0 goto +1
+ 3: r0 = 1
+ --- checkpoint ---
+ 4: r0 = r1
+ 5: exit
+
+Suppose that a state cache entry is created at instruction #4 (such entries are
+also called "checkpoints" in the text below). The verifier could reach the
+instruction with one of two possible register states:
+
+* r0 = 1, r1 = 0
+* r0 = 0, r1 = 0
+
+However, only the value of register ``r1`` is important to successfully finish
+verification. The goal of the liveness tracking algorithm is to spot this fact
+and figure out that both states are actually equivalent.
+
+Data structures
+~~~~~~~~~~~~~~~
+
+Liveness is tracked using the following data structures::
+
+ enum bpf_reg_liveness {
+ REG_LIVE_NONE = 0,
+ REG_LIVE_READ32 = 0x1,
+ REG_LIVE_READ64 = 0x2,
+ REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
+ REG_LIVE_WRITTEN = 0x4,
+ REG_LIVE_DONE = 0x8,
+ };
+
+ struct bpf_reg_state {
+ ...
+ struct bpf_reg_state *parent;
+ ...
+ enum bpf_reg_liveness live;
+ ...
+ };
+
+ struct bpf_stack_state {
+ struct bpf_reg_state spilled_ptr;
+ ...
+ };
+
+ struct bpf_func_state {
+ struct bpf_reg_state regs[MAX_BPF_REG];
+ ...
+ struct bpf_stack_state *stack;
+ }
+
+ struct bpf_verifier_state {
+ struct bpf_func_state *frame[MAX_CALL_FRAMES];
+ struct bpf_verifier_state *parent;
+ ...
+ }
+
+* ``REG_LIVE_NONE`` is an initial value assigned to ``->live`` fields upon new
+ verifier state creation;
+
+* ``REG_LIVE_WRITTEN`` means that the value of the register (or stack slot) is
+ defined by some instruction verified between this verifier state's parent and
+ verifier state itself;
+
+* ``REG_LIVE_READ{32,64}`` means that the value of the register (or stack slot)
+ is read by a some child state of this verifier state;
+
+* ``REG_LIVE_DONE`` is a marker used by ``clean_verifier_state()`` to avoid
+ processing same verifier state multiple times and for some sanity checks;
+
+* ``->live`` field values are formed by combining ``enum bpf_reg_liveness``
+ values using bitwise or.
+
+Register parentage chains
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to propagate information between parent and child states, a *register
+parentage chain* is established. Each register or stack slot is linked to a
+corresponding register or stack slot in its parent state via a ``->parent``
+pointer. This link is established upon state creation in ``is_state_visited()``
+and might be modified by ``set_callee_state()`` called from
+``__check_func_call()``.
+
+The rules for correspondence between registers / stack slots are as follows:
+
+* For the current stack frame, registers and stack slots of the new state are
+ linked to the registers and stack slots of the parent state with the same
+ indices.
+
+* For the outer stack frames, only caller saved registers (r6-r9) and stack
+ slots are linked to the registers and stack slots of the parent state with the
+ same indices.
+
+* When function call is processed a new ``struct bpf_func_state`` instance is
+ allocated, it encapsulates a new set of registers and stack slots. For this
+ new frame, parent links for r6-r9 and stack slots are set to nil, parent links
+ for r1-r5 are set to match caller r1-r5 parent links.
+
+This could be illustrated by the following diagram (arrows stand for
+``->parent`` pointers)::
+
+ ... ; Frame #0, some instructions
+ --- checkpoint #0 ---
+ 1 : r6 = 42 ; Frame #0
+ --- checkpoint #1 ---
+ 2 : call foo() ; Frame #0
+ ... ; Frame #1, instructions from foo()
+ --- checkpoint #2 ---
+ ... ; Frame #1, instructions from foo()
+ --- checkpoint #3 ---
+ exit ; Frame #1, return from foo()
+ 3 : r1 = r6 ; Frame #0 <- current state
+
+ +-------------------------------+-------------------------------+
+ | Frame #0 | Frame #1 |
+ Checkpoint +-------------------------------+-------------------------------+
+ #0 | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+
+ ^ ^ ^ ^
+ | | | |
+ Checkpoint +-------------------------------+
+ #1 | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+
+ ^ ^ ^
+ |_______|_______|_______________
+ | | |
+ nil nil | | | nil nil
+ | | | | | | |
+ Checkpoint +-------------------------------+-------------------------------+
+ #2 | r0 | r1-r5 | r6-r9 | fp-8 ... | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+-------------------------------+
+ ^ ^ ^ ^ ^
+ nil nil | | | | |
+ | | | | | | |
+ Checkpoint +-------------------------------+-------------------------------+
+ #3 | r0 | r1-r5 | r6-r9 | fp-8 ... | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+-------------------------------+
+ ^ ^
+ nil nil | |
+ | | | |
+ Current +-------------------------------+
+ state | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+
+ \
+ r6 read mark is propagated via these links
+ all the way up to checkpoint #1.
+ The checkpoint #1 contains a write mark for r6
+ because of instruction (1), thus read propagation
+ does not reach checkpoint #0 (see section below).
+
+Liveness marks tracking
+~~~~~~~~~~~~~~~~~~~~~~~
+
+For each processed instruction, the verifier tracks read and written registers
+and stack slots. The main idea of the algorithm is that read marks propagate
+back along the state parentage chain until they hit a write mark, which 'screens
+off' earlier states from the read. The information about reads is propagated by
+function ``mark_reg_read()`` which could be summarized as follows::
+
+ mark_reg_read(struct bpf_reg_state *state, ...):
+ parent = state->parent
+ while parent:
+ if state->live & REG_LIVE_WRITTEN:
+ break
+ if parent->live & REG_LIVE_READ64:
+ break
+ parent->live |= REG_LIVE_READ64
+ state = parent
+ parent = state->parent
+
+Notes:
+
+* The read marks are applied to the **parent** state while write marks are
+ applied to the **current** state. The write mark on a register or stack slot
+ means that it is updated by some instruction in the straight-line code leading
+ from the parent state to the current state.
+
+* Details about REG_LIVE_READ32 are omitted.
+
+* Function ``propagate_liveness()`` (see section :ref:`read_marks_for_cache_hits`)
+ might override the first parent link. Please refer to the comments in the
+ ``propagate_liveness()`` and ``mark_reg_read()`` source code for further
+ details.
+
+Because stack writes could have different sizes ``REG_LIVE_WRITTEN`` marks are
+applied conservatively: stack slots are marked as written only if write size
+corresponds to the size of the register, e.g. see function ``save_register_state()``.
+
+Consider the following example::
+
+ 0: (*u64)(r10 - 8) = 0 ; define 8 bytes of fp-8
+ --- checkpoint #0 ---
+ 1: (*u32)(r10 - 8) = 1 ; redefine lower 4 bytes
+ 2: r1 = (*u32)(r10 - 8) ; read lower 4 bytes defined at (1)
+ 3: r2 = (*u32)(r10 - 4) ; read upper 4 bytes defined at (0)
+
+As stated above, the write at (1) does not count as ``REG_LIVE_WRITTEN``. Should
+it be otherwise, the algorithm above wouldn't be able to propagate the read mark
+from (3) to checkpoint #0.
+
+Once the ``BPF_EXIT`` instruction is reached ``update_branch_counts()`` is
+called to update the ``->branches`` counter for each verifier state in a chain
+of parent verifier states. When the ``->branches`` counter reaches zero the
+verifier state becomes a valid entry in a set of cached verifier states.
+
+Each entry of the verifier states cache is post-processed by a function
+``clean_live_states()``. This function marks all registers and stack slots
+without ``REG_LIVE_READ{32,64}`` marks as ``NOT_INIT`` or ``STACK_INVALID``.
+Registers/stack slots marked in this way are ignored in function ``stacksafe()``
+called from ``states_equal()`` when a state cache entry is considered for
+equivalence with a current state.
+
+Now it is possible to explain how the example from the beginning of the section
+works::
+
+ 0: call bpf_get_prandom_u32()
+ 1: r1 = 0
+ 2: if r0 == 0 goto +1
+ 3: r0 = 1
+ --- checkpoint[0] ---
+ 4: r0 = r1
+ 5: exit
+
+* At instruction #2 branching point is reached and state ``{ r0 == 0, r1 == 0, pc == 4 }``
+ is pushed to states processing queue (pc stands for program counter).
+
+* At instruction #4:
+
+ * ``checkpoint[0]`` states cache entry is created: ``{ r0 == 1, r1 == 0, pc == 4 }``;
+ * ``checkpoint[0].r0`` is marked as written;
+ * ``checkpoint[0].r1`` is marked as read;
+
+* At instruction #5 exit is reached and ``checkpoint[0]`` can now be processed
+ by ``clean_live_states()``. After this processing ``checkpoint[0].r0`` has a
+ read mark and all other registers and stack slots are marked as ``NOT_INIT``
+ or ``STACK_INVALID``
+
+* The state ``{ r0 == 0, r1 == 0, pc == 4 }`` is popped from the states queue
+ and is compared against a cached state ``{ r1 == 0, pc == 4 }``, the states
+ are considered equivalent.
+
+.. _read_marks_for_cache_hits:
+
+Read marks propagation for cache hits
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Another point is the handling of read marks when a previously verified state is
+found in the states cache. Upon cache hit verifier must behave in the same way
+as if the current state was verified to the program exit. This means that all
+read marks, present on registers and stack slots of the cached state, must be
+propagated over the parentage chain of the current state. Example below shows
+why this is important. Function ``propagate_liveness()`` handles this case.
+
+Consider the following state parentage chain (S is a starting state, A-E are
+derived states, -> arrows show which state is derived from which)::
+
+ r1 read
+ <------------- A[r1] == 0
+ C[r1] == 0
+ S ---> A ---> B ---> exit E[r1] == 1
+ |
+ ` ---> C ---> D
+ |
+ ` ---> E ^
+ |___ suppose all these
+ ^ states are at insn #Y
+ |
+ suppose all these
+ states are at insn #X
+
+* Chain of states ``S -> A -> B -> exit`` is verified first.
+
+* While ``B -> exit`` is verified, register ``r1`` is read and this read mark is
+ propagated up to state ``A``.
+
+* When chain of states ``C -> D`` is verified the state ``D`` turns out to be
+ equivalent to state ``B``.
+
+* The read mark for ``r1`` has to be propagated to state ``C``, otherwise state
+ ``C`` might get mistakenly marked as equivalent to state ``E`` even though
+ values for register ``r1`` differ between ``C`` and ``E``.
+
Understanding eBPF verifier messages
====================================
diff --git a/Documentation/conf.py b/Documentation/conf.py
index d927737e3c10..8b4e5451a02d 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -116,6 +116,9 @@ if major >= 3:
# include/linux/linkage.h:
"asmlinkage",
+
+ # include/linux/btf.h
+ "__bpf_kfunc",
]
else:
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 77eb775b8b42..7a3a08d81f11 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -127,6 +127,7 @@ Documents that don't fit elsewhere or which have yet to be categorized.
:maxdepth: 1
librs
+ netlink
.. only:: subproject and html
diff --git a/Documentation/core-api/netlink.rst b/Documentation/core-api/netlink.rst
new file mode 100644
index 000000000000..e4a938a05cc9
--- /dev/null
+++ b/Documentation/core-api/netlink.rst
@@ -0,0 +1,101 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+
+.. _kernel_netlink:
+
+===================================
+Netlink notes for kernel developers
+===================================
+
+General guidance
+================
+
+Attribute enums
+---------------
+
+Older families often define "null" attributes and commands with value
+of ``0`` and named ``unspec``. This is supported (``type: unused``)
+but should be avoided in new families. The ``unspec`` enum values are
+not used in practice, so just set the value of the first attribute to ``1``.
+
+Message enums
+-------------
+
+Use the same command IDs for requests and replies. This makes it easier
+to match them up, and we have plenty of ID space.
+
+Use separate command IDs for notifications. This makes it easier to
+sort the notifications from replies (and present them to the user
+application via a different API than replies).
+
+Answer requests
+---------------
+
+Older families do not reply to all of the commands, especially NEW / ADD
+commands. User only gets information whether the operation succeeded or
+not via the ACK. Try to find useful data to return. Once the command is
+added whether it replies with a full message or only an ACK is uAPI and
+cannot be changed. It's better to err on the side of replying.
+
+Specifically NEW and ADD commands should reply with information identifying
+the created object such as the allocated object's ID (without having to
+resort to using ``NLM_F_ECHO``).
+
+NLM_F_ECHO
+----------
+
+Make sure to pass the request info to genl_notify() to allow ``NLM_F_ECHO``
+to take effect. This is useful for programs that need precise feedback
+from the kernel (for example for logging purposes).
+
+Support dump consistency
+------------------------
+
+If iterating over objects during dump may skip over objects or repeat
+them - make sure to report dump inconsistency with ``NLM_F_DUMP_INTR``.
+This is usually implemented by maintaining a generation id for the
+structure and recording it in the ``seq`` member of struct netlink_callback.
+
+Netlink specification
+=====================
+
+Documentation of the Netlink specification parts which are only relevant
+to the kernel space.
+
+Globals
+-------
+
+kernel-policy
+~~~~~~~~~~~~~
+
+Defines if the kernel validation policy is per operation (``per-op``)
+or for the entire family (``global``). New families should use ``per-op``
+(default) to be able to narrow down the attributes accepted by a specific
+command.
+
+checks
+------
+
+Documentation for the ``checks`` sub-sections of attribute specs.
+
+unterminated-ok
+~~~~~~~~~~~~~~~
+
+Accept strings without the null-termination (for legacy families only).
+Switches from the ``NLA_NUL_STRING`` to ``NLA_STRING`` policy type.
+
+max-len
+~~~~~~~
+
+Defines max length for a binary or string attribute (corresponding
+to the ``len`` member of struct nla_policy). For string attributes terminating
+null character is not counted towards ``max-len``.
+
+The field may either be a literal integer value or a name of a defined
+constant. String types may reduce the constant by one
+(i.e. specify ``max-len: CONST - 1``) to reserve space for the terminating
+character so implementations should recognize such pattern.
+
+min-len
+~~~~~~~
+
+Similar to ``max-len`` but defines minimum length.
diff --git a/Documentation/core-api/packing.rst b/Documentation/core-api/packing.rst
index d8c341fe383e..3ed13bc9a195 100644
--- a/Documentation/core-api/packing.rst
+++ b/Documentation/core-api/packing.rst
@@ -161,6 +161,6 @@ xxx_packing() that calls it using the proper QUIRK_* one-hot bits set.
The packing() function returns an int-encoded error code, which protects the
programmer against incorrect API use. The errors are not expected to occur
-durring runtime, therefore it is reasonable for xxx_packing() to return void
+during runtime, therefore it is reasonable for xxx_packing() to return void
and simply swallow those errors. Optionally it can dump stack or print the
error description.
diff --git a/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml b/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
index 1d1fee1a16c1..8bd1abfc44d9 100644
--- a/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
+++ b/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
@@ -57,6 +57,15 @@ patternProperties:
enum:
- mscc,ocelot-miim
+ "^ethernet-switch@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/net/mscc,vsc7514-switch.yaml
+ unevaluatedProperties: false
+ properties:
+ compatible:
+ enum:
+ - mscc,vsc7512-switch
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/amlogic,g12a-mdio-mux.yaml b/Documentation/devicetree/bindings/net/amlogic,g12a-mdio-mux.yaml
new file mode 100644
index 000000000000..ec5c038ce6a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/amlogic,g12a-mdio-mux.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/amlogic,g12a-mdio-mux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MDIO bus multiplexer/glue of Amlogic G12a SoC family
+
+description:
+ This is a special case of a MDIO bus multiplexer. It allows to choose between
+ the internal mdio bus leading to the embedded 10/100 PHY or the external
+ MDIO bus.
+
+maintainers:
+ - Neil Armstrong <neil.armstrong@linaro.org>
+
+allOf:
+ - $ref: mdio-mux.yaml#
+
+properties:
+ compatible:
+ const: amlogic,g12a-mdio-mux
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: peripheral clock
+ - description: platform crytal
+ - description: SoC 50MHz MPLL
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: clkin0
+ - const: clkin1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ mdio-multiplexer@4c000 {
+ compatible = "amlogic,g12a-mdio-mux";
+ reg = <0x4c000 0xa4>;
+ clocks = <&clkc_eth_phy>, <&xtal>, <&clkc_mpll>;
+ clock-names = "pclk", "clkin0", "clkin1";
+ mdio-parent-bus = <&mdio0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mdio@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@8 {
+ compatible = "ethernet-phy-id0180.3301",
+ "ethernet-phy-ieee802.3-c22";
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <8>;
+ max-speed = <100>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/net/amlogic,gxl-mdio-mux.yaml b/Documentation/devicetree/bindings/net/amlogic,gxl-mdio-mux.yaml
new file mode 100644
index 000000000000..27ae004dbea0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/amlogic,gxl-mdio-mux.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/amlogic,gxl-mdio-mux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic GXL MDIO bus multiplexer
+
+maintainers:
+ - Jerome Brunet <jbrunet@baylibre.com>
+
+description:
+ This is a special case of a MDIO bus multiplexer. It allows to choose between
+ the internal mdio bus leading to the embedded 10/100 PHY or the external
+ MDIO bus on the Amlogic GXL SoC family.
+
+allOf:
+ - $ref: mdio-mux.yaml#
+
+properties:
+ compatible:
+ const: amlogic,gxl-mdio-mux
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ref
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ eth_phy_mux: mdio@558 {
+ compatible = "amlogic,gxl-mdio-mux";
+ reg = <0x558 0xc>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&refclk>;
+ clock-names = "ref";
+ mdio-parent-bus = <&mdio0>;
+
+ external_mdio: mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ internal_mdio: mdio@1 {
+ reg = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/asix,ax88796c.yaml b/Documentation/devicetree/bindings/net/asix,ax88796c.yaml
index 699ebf452479..164d1ff9e83c 100644
--- a/Documentation/devicetree/bindings/net/asix,ax88796c.yaml
+++ b/Documentation/devicetree/bindings/net/asix,ax88796c.yaml
@@ -19,6 +19,7 @@ description: |
allOf:
- $ref: ethernet-controller.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml
properties:
compatible:
@@ -39,8 +40,8 @@ properties:
it should be marked GPIO_ACTIVE_LOW.
maxItems: 1
+ controller-data: true
local-mac-address: true
-
mac-address: true
required:
diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
index 1eb98c9a1a26..d3f45d29fa0a 100644
--- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
@@ -30,13 +30,17 @@ properties:
- items:
- enum:
+ - renesas,r8a779a0-canfd # R-Car V3U
+ - renesas,r8a779g0-canfd # R-Car V4H
+ - const: renesas,rcar-gen4-canfd # R-Car Gen4
+
+ - items:
+ - enum:
- renesas,r9a07g043-canfd # RZ/G2UL and RZ/Five
- renesas,r9a07g044-canfd # RZ/G2{L,LC}
- renesas,r9a07g054-canfd # RZ/V2L
- const: renesas,rzg2l-canfd # RZ/G2L family
- - const: renesas,r8a779a0-canfd # R-Car V3U
-
reg:
maxItems: 1
@@ -60,7 +64,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/flag
description:
The controller can operate in either CAN FD only mode (default) or
- Classical CAN only mode. The mode is global to both the channels.
+ Classical CAN only mode. The mode is global to all channels.
Specify this property to put the controller in Classical CAN only mode.
assigned-clocks:
@@ -80,6 +84,10 @@ patternProperties:
The controller supports multiple channels and each is represented as a
child node. Each channel can be enabled/disabled individually.
+ properties:
+ phys:
+ maxItems: 1
+
additionalProperties: false
required:
@@ -159,7 +167,7 @@ allOf:
properties:
compatible:
contains:
- const: renesas,r8a779a0-canfd
+ const: renesas,rcar-gen4-canfd
then:
patternProperties:
"^channel[2-7]$": false
diff --git a/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml b/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml
index 2a6d126606ca..9565a7402146 100644
--- a/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Arrow SpeedChips XRS7000 Series Switch
allOf:
- - $ref: dsa.yaml#
+ - $ref: dsa.yaml#/$defs/ethernet-ports
maintainers:
- George McCollister <george.mccollister@gmail.com>
diff --git a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
index 1219b830b1a4..5bef4128d175 100644
--- a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
@@ -66,7 +66,7 @@ required:
- reg
allOf:
- - $ref: dsa.yaml#
+ - $ref: dsa.yaml#/$defs/ethernet-ports
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml b/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml
index d159ac78cec1..eed16e216fb6 100644
--- a/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml
@@ -85,11 +85,16 @@ properties:
ports:
type: object
- properties:
- brcm,use-bcm-hdr:
- description: if present, indicates that the switch port has Broadcom
- tags enabled (per-packet metadata)
- type: boolean
+ patternProperties:
+ '^port@[0-9a-f]$':
+ $ref: dsa-port.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ brcm,use-bcm-hdr:
+ description: if present, indicates that the switch port has Broadcom
+ tags enabled (per-packet metadata)
+ type: boolean
required:
- reg
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml b/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml
index b173fceb8998..480120469953 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml
@@ -4,18 +4,19 @@
$id: http://devicetree.org/schemas/net/dsa/dsa-port.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Ethernet Switch port
+title: Generic DSA Switch Port
maintainers:
- Andrew Lunn <andrew@lunn.ch>
- Florian Fainelli <f.fainelli@gmail.com>
- - Vivien Didelot <vivien.didelot@gmail.com>
+ - Vladimir Oltean <olteanv@gmail.com>
description:
- Ethernet switch port Description
+ A DSA switch port is a component of a switch that manages one MAC, and can
+ pass Ethernet frames. It can act as a stanadard Ethernet switch port, or have
+ DSA-specific functionality.
-allOf:
- - $ref: /schemas/net/ethernet-controller.yaml#
+$ref: /schemas/net/ethernet-switch-port.yaml#
properties:
reg:
@@ -58,25 +59,6 @@ properties:
- rtl8_4t
- seville
- phy-handle: true
-
- phy-mode: true
-
- fixed-link: true
-
- mac-address: true
-
- sfp: true
-
- managed: true
-
- rx-internal-delay-ps: true
-
- tx-internal-delay-ps: true
-
-required:
- - reg
-
# CPU and DSA ports must have phylink-compatible link descriptions
if:
oneOf:
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.yaml b/Documentation/devicetree/bindings/net/dsa/dsa.yaml
index 5469ae8a4389..8d971813bab6 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.yaml
@@ -9,7 +9,7 @@ title: Ethernet Switch
maintainers:
- Andrew Lunn <andrew@lunn.ch>
- Florian Fainelli <f.fainelli@gmail.com>
- - Vivien Didelot <vivien.didelot@gmail.com>
+ - Vladimir Oltean <olteanv@gmail.com>
description:
This binding represents Ethernet Switches which have a dedicated CPU
@@ -18,10 +18,9 @@ description:
select: false
-properties:
- $nodename:
- pattern: "^(ethernet-)?switch(@.*)?$"
+$ref: /schemas/net/ethernet-switch.yaml#
+properties:
dsa,member:
minItems: 2
maxItems: 2
@@ -32,30 +31,28 @@ properties:
(single device hanging off a CPU port) must not specify this property
$ref: /schemas/types.yaml#/definitions/uint32-array
-patternProperties:
- "^(ethernet-)?ports$":
- type: object
- properties:
- '#address-cells':
- const: 1
- '#size-cells':
- const: 0
+additionalProperties: true
+
+$defs:
+ ethernet-ports:
+ description: A DSA switch without any extra port properties
+ $ref: '#/'
patternProperties:
- "^(ethernet-)?port@[0-9]+$":
+ "^(ethernet-)?ports$":
type: object
- description: Ethernet switch ports
-
- $ref: dsa-port.yaml#
-
- unevaluatedProperties: false
-
-oneOf:
- - required:
- - ports
- - required:
- - ethernet-ports
-
-additionalProperties: true
+ additionalProperties: false
+
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ "^(ethernet-)?port@[0-9]+$":
+ description: Ethernet switch ports
+ $ref: dsa-port.yaml#
+ unevaluatedProperties: false
...
diff --git a/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml b/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml
index 447589b01e8e..4021b054f684 100644
--- a/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Hirschmann Hellcreek TSN Switch
allOf:
- - $ref: dsa.yaml#
+ - $ref: dsa.yaml#/$defs/ethernet-ports
maintainers:
- Andrew Lunn <andrew@lunn.ch>
diff --git a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
index f2e9ff3f580b..449ee0735012 100644
--- a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
@@ -24,56 +24,46 @@ description: |
There is only the standalone version of MT7531.
- Port 5 on MT7530 has got various ways of configuration.
-
- For standalone MT7530:
+ Port 5 on MT7530 has got various ways of configuration:
- Port 5 can be used as a CPU port.
- - PHY 0 or 4 of the switch can be muxed to connect to the gmac of the SoC
- which port 5 is wired to. Usually used for connecting the wan port
- directly to the CPU to achieve 2 Gbps routing in total.
+ - PHY 0 or 4 of the switch can be muxed to gmac5 of the switch. Therefore,
+ the gmac of the SoC which is wired to port 5 can connect to the PHY.
+ This is usually used for connecting the wan port directly to the CPU to
+ achieve 2 Gbps routing in total.
- The driver looks up the reg on the ethernet-phy node which the phy-handle
- property refers to on the gmac node to mux the specified phy.
+ The driver looks up the reg on the ethernet-phy node, which the phy-handle
+ property on the gmac node refers to, to mux the specified phy.
The driver requires the gmac of the SoC to have "mediatek,eth-mac" as the
- compatible string and the reg must be 1. So, for now, only gmac1 of an
+ compatible string and the reg must be 1. So, for now, only gmac1 of a
MediaTek SoC can benefit this. Banana Pi BPI-R2 suits this.
- Check out example 5 for a similar configuration.
-
- - Port 5 can be wired to an external phy. Port 5 becomes a DSA slave.
- Check out example 7 for a similar configuration.
-
- For multi-chip module MT7530:
-
- - Port 5 can be used as a CPU port.
-
- - PHY 0 or 4 of the switch can be muxed to connect to gmac1 of the SoC.
- Usually used for connecting the wan port directly to the CPU to achieve 2
- Gbps routing in total.
-
- The driver looks up the reg on the ethernet-phy node which the phy-handle
- property refers to on the gmac node to mux the specified phy.
For the MT7621 SoCs, rgmii2 group must be claimed with rgmii2 function.
+
Check out example 5.
- - In case of an external phy wired to gmac1 of the SoC, port 5 must not be
- enabled.
+ - For the multi-chip module MT7530, in case of an external phy wired to
+ gmac1 of the SoC, port 5 must not be enabled.
In case of muxing PHY 0 or 4, the external phy must not be enabled.
For the MT7621 SoCs, rgmii2 group must be claimed with rgmii2 function.
+
Check out example 6.
- - Port 5 can be muxed to an external phy. Port 5 becomes a DSA slave.
- The external phy must be wired TX to TX to gmac1 of the SoC for this to
- work. Ubiquiti EdgeRouter X SFP is wired this way.
+ - Port 5 can be wired to an external phy. Port 5 becomes a DSA slave.
- Muxing PHY 0 or 4 won't work when the external phy is connected TX to TX.
+ For the multi-chip module MT7530, the external phy must be wired TX to TX
+ to gmac1 of the SoC for this to work. Ubiquiti EdgeRouter X SFP is wired
+ this way.
+
+ For the multi-chip module MT7530, muxing PHY 0 or 4 won't work when the
+ external phy is connected TX to TX.
For the MT7621 SoCs, rgmii2 group must be claimed with gpio function.
+
Check out example 7.
properties:
@@ -157,9 +147,6 @@ patternProperties:
patternProperties:
"^(ethernet-)?port@[0-9]+$":
type: object
- description: Ethernet switch ports
-
- unevaluatedProperties: false
properties:
reg:
@@ -168,7 +155,6 @@ patternProperties:
for user ports.
allOf:
- - $ref: dsa-port.yaml#
- if:
required: [ ethernet ]
then:
@@ -238,7 +224,7 @@ $defs:
- sgmii
allOf:
- - $ref: dsa.yaml#
+ - $ref: dsa.yaml#/$defs/ethernet-ports
- if:
required:
- mediatek,mcm
@@ -605,7 +591,7 @@ examples:
label = "lan4";
};
- /* Commented out, phy4 is muxed to gmac1.
+ /* Commented out, phy4 is connected to gmac1.
port@4 {
reg = <4>;
label = "wan";
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
index 4da75b1f9533..a4b53434c85c 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -11,7 +11,7 @@ maintainers:
- Woojung Huh <Woojung.Huh@microchip.com>
allOf:
- - $ref: dsa.yaml#
+ - $ref: dsa.yaml#/$defs/ethernet-ports
- $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml
index b34de303966b..8d7e878b84dc 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml
@@ -10,7 +10,7 @@ maintainers:
- UNGLinuxDriver@microchip.com
allOf:
- - $ref: dsa.yaml#
+ - $ref: dsa.yaml#/$defs/ethernet-ports
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml b/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml
index 347a0e1b3d3f..fe02d05196e4 100644
--- a/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml
@@ -78,7 +78,7 @@ required:
- reg
allOf:
- - $ref: dsa.yaml#
+ - $ref: dsa.yaml#/$defs/ethernet-ports
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
index df98a16e4e75..9a64ed658745 100644
--- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
@@ -13,7 +13,7 @@ description:
depends on the SPI bus master driver.
allOf:
- - $ref: "dsa.yaml#"
+ - $ref: dsa.yaml#/$defs/ethernet-ports
- $ref: /schemas/spi/spi-peripheral-props.yaml#
maintainers:
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
index 978162df51f7..389892592aac 100644
--- a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
@@ -66,15 +66,11 @@ properties:
With the legacy mapping the reg corresponding to the internal
mdio is the switch reg with an offset of -1.
+$ref: "dsa.yaml#"
+
patternProperties:
"^(ethernet-)?ports$":
type: object
- properties:
- '#address-cells':
- const: 1
- '#size-cells':
- const: 0
-
patternProperties:
"^(ethernet-)?port@[0-6]$":
type: object
@@ -116,7 +112,7 @@ required:
- compatible
- reg
-additionalProperties: true
+unevaluatedProperties: false
examples:
- |
@@ -148,8 +144,6 @@ examples:
switch@10 {
compatible = "qca,qca8337";
- #address-cells = <1>;
- #size-cells = <0>;
reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>;
reg = <0x10>;
@@ -209,8 +203,6 @@ examples:
switch@10 {
compatible = "qca,qca8337";
- #address-cells = <1>;
- #size-cells = <0>;
reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>;
reg = <0x10>;
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek.yaml b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
index 1a7d45a8ad66..cfd69c2604ea 100644
--- a/Documentation/devicetree/bindings/net/dsa/realtek.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Realtek switches for unmanaged switches
allOf:
- - $ref: dsa.yaml#
+ - $ref: dsa.yaml#/$defs/ethernet-ports
maintainers:
- Linus Walleij <linus.walleij@linaro.org>
diff --git a/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml b/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml
index 0a0d62b6c00e..833d2f68daa1 100644
--- a/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml
@@ -14,7 +14,7 @@ description: |
handles 4 ports + 1 CPU management port.
allOf:
- - $ref: dsa.yaml#
+ - $ref: dsa.yaml#/$defs/ethernet-ports
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/ethernet-switch-port.yaml b/Documentation/devicetree/bindings/net/ethernet-switch-port.yaml
new file mode 100644
index 000000000000..d5cf7e40e3c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ethernet-switch-port.yaml
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ethernet-switch-port.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic Ethernet Switch Port
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Vladimir Oltean <olteanv@gmail.com>
+
+description:
+ An Ethernet switch port is a component of a switch that manages one MAC, and
+ can pass Ethernet frames.
+
+$ref: ethernet-controller.yaml#
+
+properties:
+ reg:
+ description: Port number
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/net/ethernet-switch.yaml b/Documentation/devicetree/bindings/net/ethernet-switch.yaml
new file mode 100644
index 000000000000..a04f8ef744aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ethernet-switch.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ethernet-switch.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic Ethernet Switch
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Vladimir Oltean <olteanv@gmail.com>
+
+description:
+ Ethernet switches are multi-port Ethernet controllers. Each port has
+ its own number and is represented as its own Ethernet controller.
+ The minimum required functionality is to pass packets to software.
+ They may or may not be able to forward packets automonously between
+ ports.
+
+select: false
+
+properties:
+ $nodename:
+ pattern: "^(ethernet-)?switch(@.*)?$"
+
+patternProperties:
+ "^(ethernet-)?ports$":
+ type: object
+ unevaluatedProperties: false
+
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ "^(ethernet-)?port@[0-9]+$":
+ type: object
+ description: Ethernet switch ports
+
+oneOf:
+ - required:
+ - ports
+ - required:
+ - ethernet-ports
+
+additionalProperties: true
+
+$defs:
+ base:
+ description: An ethernet switch without any extra port properties
+ $ref: '#/'
+
+ patternProperties:
+ "^(ethernet-)?port@[0-9]+$":
+ description: Ethernet switch ports
+ $ref: ethernet-switch-port.yaml#
+ unevaluatedProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/net/fsl,fec.yaml b/Documentation/devicetree/bindings/net/fsl,fec.yaml
index 77e5f32cb62f..e6f2045f05de 100644
--- a/Documentation/devicetree/bindings/net/fsl,fec.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,fec.yaml
@@ -51,6 +51,7 @@ properties:
- fsl,imx8mm-fec
- fsl,imx8mn-fec
- fsl,imx8mp-fec
+ - fsl,imx93-fec
- const: fsl,imx8mq-fec
- const: fsl,imx6sx-fec
- items:
diff --git a/Documentation/devicetree/bindings/net/maxlinear,gpy2xx.yaml b/Documentation/devicetree/bindings/net/maxlinear,gpy2xx.yaml
new file mode 100644
index 000000000000..d71fa9de2b64
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/maxlinear,gpy2xx.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/maxlinear,gpy2xx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MaxLinear GPY2xx PHY
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Michael Walle <michael@walle.cc>
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+properties:
+ maxlinear,use-broken-interrupts:
+ description: |
+ Interrupts are broken on some GPY2xx PHYs in that they keep the
+ interrupt line asserted even after the interrupt status register is
+ cleared. Thus it is blocking the interrupt line which is usually bad
+ for shared lines. By default interrupts are disabled for this PHY and
+ polling mode is used. If one can live with the consequences, this
+ property can be used to enable interrupt handling.
+
+ Affected PHYs (as far as known) are GPY215B and GPY215C.
+ type: boolean
+
+dependencies:
+ maxlinear,use-broken-interrupts: [ interrupts ]
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ethernet {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@0 {
+ reg = <0>;
+ interrupts-extended = <&intc 0>;
+ maxlinear,use-broken-interrupts;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt b/Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt
deleted file mode 100644
index 3a96cbed9294..000000000000
--- a/Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Properties for the MDIO bus multiplexer/glue of Amlogic G12a SoC family.
-
-This is a special case of a MDIO bus multiplexer. It allows to choose between
-the internal mdio bus leading to the embedded 10/100 PHY or the external
-MDIO bus.
-
-Required properties in addition to the generic multiplexer properties:
-- compatible : amlogic,g12a-mdio-mux
-- reg: physical address and length of the multiplexer/glue registers
-- clocks: list of clock phandle, one for each entry clock-names.
-- clock-names: should contain the following:
- * "pclk" : peripheral clock.
- * "clkin0" : platform crytal
- * "clkin1" : SoC 50MHz MPLL
-
-Example :
-
-mdio_mux: mdio-multiplexer@4c000 {
- compatible = "amlogic,g12a-mdio-mux";
- reg = <0x0 0x4c000 0x0 0xa4>;
- clocks = <&clkc CLKID_ETH_PHY>,
- <&xtal>,
- <&clkc CLKID_MPLL_5OM>;
- clock-names = "pclk", "clkin0", "clkin1";
- mdio-parent-bus = <&mdio0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- ext_mdio: mdio@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- int_mdio: mdio@1 {
- reg = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- internal_ephy: ethernet-phy@8 {
- compatible = "ethernet-phy-id0180.3301",
- "ethernet-phy-ieee802.3-c22";
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
- reg = <8>;
- max-speed = <100>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
index df9e844dd6bc..2681168777a1 100644
--- a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
@@ -158,6 +158,7 @@ KSZ9031:
no link will be established.
KSZ9131:
+LAN8841:
All skew control options are specified in picoseconds. The increment
step is 100ps. Unlike KSZ9031, the values represent picoseccond delays.
diff --git a/Documentation/devicetree/bindings/net/motorcomm,yt8xxx.yaml b/Documentation/devicetree/bindings/net/motorcomm,yt8xxx.yaml
new file mode 100644
index 000000000000..157e3bbcaf6f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/motorcomm,yt8xxx.yaml
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/motorcomm,yt8xxx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MotorComm yt8xxx Ethernet PHY
+
+maintainers:
+ - Frank Sae <frank.sae@motor-comm.com>
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ethernet-phy-id4f51.e91a
+ - ethernet-phy-id4f51.e91b
+
+ rx-internal-delay-ps:
+ description: |
+ RGMII RX Clock Delay used only when PHY operates in RGMII mode with
+ internal delay (phy-mode is 'rgmii-id' or 'rgmii-rxid') in pico-seconds.
+ enum: [ 0, 150, 300, 450, 600, 750, 900, 1050, 1200, 1350, 1500, 1650,
+ 1800, 1900, 1950, 2050, 2100, 2200, 2250, 2350, 2500, 2650, 2800,
+ 2950, 3100, 3250, 3400, 3550, 3700, 3850, 4000, 4150 ]
+ default: 1950
+
+ tx-internal-delay-ps:
+ description: |
+ RGMII TX Clock Delay used only when PHY operates in RGMII mode with
+ internal delay (phy-mode is 'rgmii-id' or 'rgmii-txid') in pico-seconds.
+ enum: [ 0, 150, 300, 450, 600, 750, 900, 1050, 1200, 1350, 1500, 1650, 1800,
+ 1950, 2100, 2250 ]
+ default: 1950
+
+ motorcomm,clk-out-frequency-hz:
+ description: clock output on clock output pin.
+ enum: [0, 25000000, 125000000]
+ default: 0
+
+ motorcomm,keep-pll-enabled:
+ description: |
+ If set, keep the PLL enabled even if there is no link. Useful if you
+ want to use the clock output without an ethernet link.
+ type: boolean
+
+ motorcomm,auto-sleep-disabled:
+ description: |
+ If set, PHY will not enter sleep mode and close AFE after unplug cable
+ for a timer.
+ type: boolean
+
+ motorcomm,tx-clk-adj-enabled:
+ description: |
+ This configuration is mainly to adapt to VF2 with JH7110 SoC.
+ Useful if you want to use tx-clk-xxxx-inverted to adj the delay of tx clk.
+ type: boolean
+
+ motorcomm,tx-clk-10-inverted:
+ description: |
+ Use original or inverted RGMII Transmit PHY Clock to drive the RGMII
+ Transmit PHY Clock delay train configuration when speed is 10Mbps.
+ type: boolean
+
+ motorcomm,tx-clk-100-inverted:
+ description: |
+ Use original or inverted RGMII Transmit PHY Clock to drive the RGMII
+ Transmit PHY Clock delay train configuration when speed is 100Mbps.
+ type: boolean
+
+ motorcomm,tx-clk-1000-inverted:
+ description: |
+ Use original or inverted RGMII Transmit PHY Clock to drive the RGMII
+ Transmit PHY Clock delay train configuration when speed is 1000Mbps.
+ type: boolean
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy-mode = "rgmii-id";
+ ethernet-phy@4 {
+ /* Only needed to make DT lint tools work. Do not copy/paste
+ * into real DTS files.
+ */
+ compatible = "ethernet-phy-id4f51.e91a";
+
+ reg = <4>;
+ rx-internal-delay-ps = <2100>;
+ tx-internal-delay-ps = <150>;
+ motorcomm,clk-out-frequency-hz = <0>;
+ motorcomm,keep-pll-enabled;
+ motorcomm,auto-sleep-disabled;
+ };
+ };
+ - |
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy-mode = "rgmii";
+ ethernet-phy@5 {
+ /* Only needed to make DT lint tools work. Do not copy/paste
+ * into real DTS files.
+ */
+ compatible = "ethernet-phy-id4f51.e91a";
+
+ reg = <5>;
+ motorcomm,clk-out-frequency-hz = <125000000>;
+ motorcomm,keep-pll-enabled;
+ motorcomm,auto-sleep-disabled;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/mscc,vsc7514-switch.yaml b/Documentation/devicetree/bindings/net/mscc,vsc7514-switch.yaml
index ee0a504bdb24..8ee2c7d7ff42 100644
--- a/Documentation/devicetree/bindings/net/mscc,vsc7514-switch.yaml
+++ b/Documentation/devicetree/bindings/net/mscc,vsc7514-switch.yaml
@@ -18,14 +18,52 @@ description: |
packets using CPU. Additionally, PTP is supported as well as FDMA for faster
packet extraction/injection.
-properties:
- $nodename:
- pattern: "^switch@[0-9a-f]+$"
+allOf:
+ - if:
+ properties:
+ compatible:
+ const: mscc,vsc7514-switch
+ then:
+ $ref: ethernet-switch.yaml#
+ required:
+ - interrupts
+ - interrupt-names
+ properties:
+ reg:
+ minItems: 21
+ reg-names:
+ minItems: 21
+ ethernet-ports:
+ patternProperties:
+ "^port@[0-9a-f]+$":
+ $ref: ethernet-switch-port.yaml#
+ unevaluatedProperties: false
+
+ - if:
+ properties:
+ compatible:
+ const: mscc,vsc7512-switch
+ then:
+ $ref: /schemas/net/dsa/dsa.yaml#
+ properties:
+ reg:
+ maxItems: 20
+ reg-names:
+ maxItems: 20
+ ethernet-ports:
+ patternProperties:
+ "^port@[0-9a-f]+$":
+ $ref: /schemas/net/dsa/dsa-port.yaml#
+ unevaluatedProperties: false
+properties:
compatible:
- const: mscc,vsc7514-switch
+ enum:
+ - mscc,vsc7512-switch
+ - mscc,vsc7514-switch
reg:
+ minItems: 20
items:
- description: system target
- description: rewriter target
@@ -50,6 +88,7 @@ properties:
- description: fdma target
reg-names:
+ minItems: 20
items:
- const: sys
- const: rew
@@ -87,59 +126,16 @@ properties:
- const: xtr
- const: fdma
- ethernet-ports:
- type: object
-
- properties:
- '#address-cells':
- const: 1
- '#size-cells':
- const: 0
-
- additionalProperties: false
-
- patternProperties:
- "^port@[0-9a-f]+$":
- type: object
- description: Ethernet ports handled by the switch
-
- $ref: ethernet-controller.yaml#
-
- unevaluatedProperties: false
-
- properties:
- reg:
- description: Switch port number
-
- phy-handle: true
-
- phy-mode: true
-
- fixed-link: true
-
- mac-address: true
-
- required:
- - reg
- - phy-mode
-
- oneOf:
- - required:
- - phy-handle
- - required:
- - fixed-link
-
required:
- compatible
- reg
- reg-names
- - interrupts
- - interrupt-names
- ethernet-ports
-additionalProperties: false
+unevaluatedProperties: false
examples:
+ # VSC7514 (Switchdev)
- |
switch@1010000 {
compatible = "mscc,vsc7514-switch";
@@ -187,5 +183,51 @@ examples:
};
};
};
+ # VSC7512 (DSA)
+ - |
+ ethernet-switch@1{
+ compatible = "mscc,vsc7512-switch";
+ reg = <0x71010000 0x10000>,
+ <0x71030000 0x10000>,
+ <0x71080000 0x100>,
+ <0x710e0000 0x10000>,
+ <0x711e0000 0x100>,
+ <0x711f0000 0x100>,
+ <0x71200000 0x100>,
+ <0x71210000 0x100>,
+ <0x71220000 0x100>,
+ <0x71230000 0x100>,
+ <0x71240000 0x100>,
+ <0x71250000 0x100>,
+ <0x71260000 0x100>,
+ <0x71270000 0x100>,
+ <0x71280000 0x100>,
+ <0x71800000 0x80000>,
+ <0x71880000 0x10000>,
+ <0x71040000 0x10000>,
+ <0x71050000 0x10000>,
+ <0x71060000 0x10000>;
+ reg-names = "sys", "rew", "qs", "ptp", "port0", "port1",
+ "port2", "port3", "port4", "port5", "port6",
+ "port7", "port8", "port9", "port10", "qsys",
+ "ana", "s0", "s1", "s2";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ethernet = <&mac_sw>;
+ phy-handle = <&phy0>;
+ phy-mode = "internal";
+ };
+ port@1 {
+ reg = <1>;
+ phy-handle = <&phy1>;
+ phy-mode = "internal";
+ };
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
index 04df496af7e6..63409cbff5ad 100644
--- a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
+++ b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/net/nxp,dwmac-imx.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: NXP i.MX8 DWMAC glue layer
+title: NXP i.MX8/9 DWMAC glue layer
maintainers:
- Clark Wang <xiaoning.wang@nxp.com>
@@ -19,6 +19,7 @@ select:
enum:
- nxp,imx8mp-dwmac-eqos
- nxp,imx8dxl-dwmac-eqos
+ - nxp,imx93-dwmac-eqos
required:
- compatible
@@ -32,6 +33,7 @@ properties:
- enum:
- nxp,imx8mp-dwmac-eqos
- nxp,imx8dxl-dwmac-eqos
+ - nxp,imx93-dwmac-eqos
- const: snps,dwmac-5.10a
clocks:
diff --git a/Documentation/devicetree/bindings/net/rfkill-gpio.yaml b/Documentation/devicetree/bindings/net/rfkill-gpio.yaml
new file mode 100644
index 000000000000..9630c8466fac
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/rfkill-gpio.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/rfkill-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPIO controlled rfkill switch
+
+maintainers:
+ - Johannes Berg <johannes@sipsolutions.net>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+properties:
+ compatible:
+ const: rfkill-gpio
+
+ label:
+ description: rfkill switch name, defaults to node name
+
+ radio-type:
+ description: rfkill radio type
+ enum:
+ - bluetooth
+ - fm
+ - gps
+ - nfc
+ - ultrawideband
+ - wimax
+ - wlan
+ - wwan
+
+ shutdown-gpios:
+ maxItems: 1
+
+required:
+ - compatible
+ - radio-type
+ - shutdown-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ rfkill {
+ compatible = "rfkill-gpio";
+ label = "rfkill-pcie-wlan";
+ radio-type = "wlan";
+ shutdown-gpios = <&gpio2 25 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
index 42fb72b6909d..04936632fcbb 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
@@ -49,11 +49,11 @@ properties:
- rockchip,rk3368-gmac
- rockchip,rk3399-gmac
- rockchip,rv1108-gmac
- - rockchip,rv1126-gmac
- items:
- enum:
- rockchip,rk3568-gmac
- rockchip,rk3588-gmac
+ - rockchip,rv1126-gmac
- const: snps,dwmac-4.20a
clocks:
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index e88a86623fce..16b7d2904696 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -552,7 +552,7 @@ required:
dependencies:
snps,reset-active-low: ["snps,reset-gpio"]
- snps,reset-delay-us: ["snps,reset-gpio"]
+ snps,reset-delays-us: ["snps,reset-gpio"]
allOf:
- $ref: "ethernet-controller.yaml#"
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index 821974815dec..900063411a20 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -57,6 +57,7 @@ properties:
- ti,am654-cpsw-nuss
- ti,j7200-cpswxg-nuss
- ti,j721e-cpsw-nuss
+ - ti,j721e-cpswxg-nuss
- ti,am642-cpsw-nuss
reg:
@@ -111,7 +112,7 @@ properties:
const: 0
patternProperties:
- "^port@[1-4]$":
+ "^port@[1-8]$":
type: object
description: CPSWxG NUSS external ports
@@ -121,7 +122,7 @@ properties:
properties:
reg:
minimum: 1
- maximum: 4
+ maximum: 8
description: CPSW port number
phys:
@@ -186,12 +187,36 @@ allOf:
properties:
compatible:
contains:
- const: ti,j7200-cpswxg-nuss
+ const: ti,j721e-cpswxg-nuss
then:
properties:
ethernet-ports:
patternProperties:
- "^port@[3-4]$": false
+ "^port@[5-8]$": false
+ "^port@[1-4]$":
+ properties:
+ reg:
+ minimum: 1
+ maximum: 4
+
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,j721e-cpswxg-nuss
+ - ti,j7200-cpswxg-nuss
+ then:
+ properties:
+ ethernet-ports:
+ patternProperties:
+ "^port@[3-8]$": false
+ "^port@[1-2]$":
+ properties:
+ reg:
+ minimum: 1
+ maximum: 2
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
index 6230f576134b..3e910d3b24a0 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
@@ -93,6 +93,14 @@ properties:
description:
Number of timestamp Generator function outputs (TS_GENFx)
+ ti,pps:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+ description: |
+ The pair of HWx_TS_PUSH input and TS_GENFy output indexes used for
+ PPS events generation. Platform/board specific.
+
refclk-mux:
type: object
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/net/wireless/esp,esp8089.yaml b/Documentation/devicetree/bindings/net/wireless/esp,esp8089.yaml
index 5557676e9d4b..0ea84d6fe73e 100644
--- a/Documentation/devicetree/bindings/net/wireless/esp,esp8089.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/esp,esp8089.yaml
@@ -29,15 +29,15 @@ additionalProperties: false
examples:
- |
- mmc {
- #address-cells = <1>;
- #size-cells = <0>;
-
- wifi@1 {
- compatible = "esp,esp8089";
- reg = <1>;
- esp,crystal-26M-en = <2>;
- };
- };
+ mmc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wifi@1 {
+ compatible = "esp,esp8089";
+ reg = <1>;
+ esp,crystal-26M-en = <2>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/net/wireless/ieee80211.yaml b/Documentation/devicetree/bindings/net/wireless/ieee80211.yaml
index e68ed9423150..d89f7a3f88a7 100644
--- a/Documentation/devicetree/bindings/net/wireless/ieee80211.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/ieee80211.yaml
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/wireless/ieee80211.yaml#
diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
index 9bf9bbac16e2..cdc303caf5f4 100644
--- a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
+++ b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
@@ -1,4 +1,4 @@
-Marvell 8787/8897/8997 (sd8787/sd8897/sd8997/pcie8997) SDIO/PCIE devices
+Marvell 8787/8897/8978/8997 (sd8787/sd8897/sd8978/sd8997/pcie8997) SDIO/PCIE devices
------
This node provides properties for controlling the Marvell SDIO/PCIE wireless device.
@@ -10,7 +10,9 @@ Required properties:
- compatible : should be one of the following:
* "marvell,sd8787"
* "marvell,sd8897"
+ * "marvell,sd8978"
* "marvell,sd8997"
+ * "nxp,iw416"
* "pci11ab,2b42"
* "pci1b4b,2b42"
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
index f0c78f994491..7d526ff53fb7 100644
--- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/wireless/mediatek,mt76.yaml#
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
index 556eb523606a..7d5f982a3d09 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/wireless/qcom,ath11k.yaml#
@@ -21,6 +20,7 @@ properties:
- qcom,ipq8074-wifi
- qcom,ipq6018-wifi
- qcom,wcn6750-wifi
+ - qcom,ipq5018-wifi
reg:
maxItems: 1
@@ -262,10 +262,10 @@ allOf:
examples:
- |
- q6v5_wcss: q6v5_wcss@CD00000 {
+ q6v5_wcss: remoteproc@cd00000 {
compatible = "qcom,ipq8074-wcss-pil";
- reg = <0xCD00000 0x4040>,
- <0x4AB000 0x20>;
+ reg = <0xcd00000 0x4040>,
+ <0x4ab000 0x20>;
reg-names = "qdsp6",
"rmb";
};
@@ -386,7 +386,7 @@ examples:
#address-cells = <2>;
#size-cells = <2>;
- qcn9074_0: qcn9074_0@51100000 {
+ qcn9074_0: wifi@51100000 {
no-map;
reg = <0x0 0x51100000 0x0 0x03500000>;
};
@@ -463,6 +463,6 @@ examples:
qcom,smem-states = <&wlan_smp2p_out 0>;
qcom,smem-state-names = "wlan-smp2p-out";
wifi-firmware {
- iommus = <&apps_smmu 0x1c02 0x1>;
+ iommus = <&apps_smmu 0x1c02 0x1>;
};
};
diff --git a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
index 583db5d42226..84e5659e50ef 100644
--- a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
@@ -2,7 +2,6 @@
# Copyright (c) 2020, Silicon Laboratories, Inc.
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/net/wireless/silabs,wfx.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.yaml b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.yaml
index e31456730e9f..f799a1e52173 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.yaml
@@ -90,47 +90,47 @@ examples:
// For wl12xx family:
spi1 {
- #address-cells = <1>;
- #size-cells = <0>;
-
- wlcore1: wlcore@1 {
- compatible = "ti,wl1271";
- reg = <1>;
- spi-max-frequency = <48000000>;
- interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
- vwlan-supply = <&vwlan_fixed>;
- clock-xtal;
- ref-clock-frequency = <38400000>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wlcore1: wlcore@1 {
+ compatible = "ti,wl1271";
+ reg = <1>;
+ spi-max-frequency = <48000000>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ vwlan-supply = <&vwlan_fixed>;
+ clock-xtal;
+ ref-clock-frequency = <38400000>;
+ };
};
// For wl18xx family:
spi2 {
- #address-cells = <1>;
- #size-cells = <0>;
-
- wlcore2: wlcore@0 {
- compatible = "ti,wl1835";
- reg = <0>;
- spi-max-frequency = <48000000>;
- interrupts = <27 IRQ_TYPE_EDGE_RISING>;
- vwlan-supply = <&vwlan_fixed>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wlcore2: wlcore@0 {
+ compatible = "ti,wl1835";
+ reg = <0>;
+ spi-max-frequency = <48000000>;
+ interrupts = <27 IRQ_TYPE_EDGE_RISING>;
+ vwlan-supply = <&vwlan_fixed>;
+ };
};
// SDIO example:
mmc3 {
- vmmc-supply = <&wlan_en_reg>;
- bus-width = <4>;
- cap-power-off-card;
- keep-power-in-suspend;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- wlcore3: wlcore@2 {
- compatible = "ti,wl1835";
- reg = <2>;
- interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
- };
+ vmmc-supply = <&wlan_en_reg>;
+ bus-width = <4>;
+ cap-power-off-card;
+ keep-power-in-suspend;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wlcore3: wlcore@2 {
+ compatible = "ti,wl1835";
+ reg = <2>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 852910c0ce30..5c3c6abb983b 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -785,6 +785,8 @@ patternProperties:
description: MaxBotix Inc.
"^maxim,.*":
description: Maxim Integrated Products
+ "^maxlinear,.*":
+ description: MaxLinear Inc.
"^mbvl,.*":
description: Mobiveil Inc.
"^mcube,.*":
@@ -855,6 +857,8 @@ patternProperties:
description: Moortec Semiconductor Ltd.
"^mosaixtech,.*":
description: Mosaix Technologies, Inc.
+ "^motorcomm,.*":
+ description: MotorComm, Inc.
"^motorola,.*":
description: Motorola, Inc.
"^moxa,.*":
diff --git a/Documentation/isdn/interface_capi.rst b/Documentation/isdn/interface_capi.rst
index fe2421444b76..4d63b34b35cf 100644
--- a/Documentation/isdn/interface_capi.rst
+++ b/Documentation/isdn/interface_capi.rst
@@ -323,7 +323,7 @@ If the lowest bit of showcapimsgs is set, kernelcapi logs controller and
application up and down events.
In addition, every registered CAPI controller has an associated traceflag
-parameter controlling how CAPI messages sent from and to tha controller are
+parameter controlling how CAPI messages sent from and to the controller are
logged. The traceflag parameter is initialized with the value of the
showcapimsgs parameter when the controller is registered, but can later be
changed via the MANUFACTURER_REQ command KCAPI_CMD_TRACE.
diff --git a/Documentation/isdn/m_isdn.rst b/Documentation/isdn/m_isdn.rst
index 9957de349e69..5847a164287e 100644
--- a/Documentation/isdn/m_isdn.rst
+++ b/Documentation/isdn/m_isdn.rst
@@ -3,7 +3,7 @@ mISDN Driver
============
mISDN is a new modular ISDN driver, in the long term it should replace
-the old I4L driver architecture for passiv ISDN cards.
+the old I4L driver architecture for passive ISDN cards.
It was designed to allow a broad range of applications and interfaces
but only have the basic function in kernel, the interface to the user
space is based on sockets with a own address family AF_ISDN.
diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml
new file mode 100644
index 000000000000..bbcfa2472b04
--- /dev/null
+++ b/Documentation/netlink/genetlink-c.yaml
@@ -0,0 +1,331 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://kernel.org/schemas/netlink/genetlink-c.yaml#
+$schema: https://json-schema.org/draft-07/schema
+
+# Common defines
+$defs:
+ uint:
+ type: integer
+ minimum: 0
+ len-or-define:
+ type: [ string, integer ]
+ pattern: ^[0-9A-Za-z_]+( - 1)?$
+ minimum: 0
+
+# Schema for specs
+title: Protocol
+description: Specification of a genetlink protocol
+type: object
+required: [ name, doc, attribute-sets, operations ]
+additionalProperties: False
+properties:
+ name:
+ description: Name of the genetlink family.
+ type: string
+ doc:
+ type: string
+ version:
+ description: Generic Netlink family version. Default is 1.
+ type: integer
+ minimum: 1
+ protocol:
+ description: Schema compatibility level. Default is "genetlink".
+ enum: [ genetlink, genetlink-c ]
+ # Start genetlink-c
+ uapi-header:
+ description: Path to the uAPI header, default is linux/${family-name}.h
+ type: string
+ c-family-name:
+ description: Name of the define for the family name.
+ type: string
+ c-version-name:
+ description: Name of the define for the verion of the family.
+ type: string
+ max-by-define:
+ description: Makes the number of attributes and commands be specified by a define, not an enum value.
+ type: boolean
+ # End genetlink-c
+
+ definitions:
+ description: List of type and constant definitions (enums, flags, defines).
+ type: array
+ items:
+ type: object
+ required: [ type, name ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ header:
+ description: For C-compatible languages, header which already defines this value.
+ type: string
+ type:
+ enum: [ const, enum, flags ]
+ doc:
+ type: string
+ # For const
+ value:
+ description: For const - the value.
+ type: [ string, integer ]
+ # For enum and flags
+ value-start:
+ description: For enum or flags the literal initializer for the first value.
+ type: [ string, integer ]
+ entries:
+ description: For enum or flags array of values.
+ type: array
+ items:
+ oneOf:
+ - type: string
+ - type: object
+ required: [ name ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ value:
+ type: integer
+ doc:
+ type: string
+ render-max:
+ description: Render the max members for this enum.
+ type: boolean
+ # Start genetlink-c
+ enum-name:
+ description: Name for enum, if empty no name will be used.
+ type: [ string, "null" ]
+ name-prefix:
+ description: For enum the prefix of the values, optional.
+ type: string
+ # End genetlink-c
+
+ attribute-sets:
+ description: Definition of attribute spaces for this family.
+ type: array
+ items:
+ description: Definition of a single attribute space.
+ type: object
+ required: [ name, attributes ]
+ additionalProperties: False
+ properties:
+ name:
+ description: |
+ Name used when referring to this space in other definitions, not used outside of the spec.
+ type: string
+ name-prefix:
+ description: |
+ Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
+ type: string
+ enum-name:
+ description: Name for the enum type of the attribute.
+ type: string
+ doc:
+ description: Documentation of the space.
+ type: string
+ subset-of:
+ description: |
+ Name of another space which this is a logical part of. Sub-spaces can be used to define
+ a limited group of attributes which are used in a nest.
+ type: string
+ # Start genetlink-c
+ attr-cnt-name:
+ description: The explicit name for constant holding the count of attributes (last attr + 1).
+ type: string
+ attr-max-name:
+ description: The explicit name for last member of attribute enum.
+ type: string
+ # End genetlink-c
+ attributes:
+ description: List of attributes in the space.
+ type: array
+ items:
+ type: object
+ required: [ name, type ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ type: &attr-type
+ enum: [ unused, pad, flag, binary, u8, u16, u32, u64, s32, s64,
+ string, nest, array-nest, nest-type-value ]
+ doc:
+ description: Documentation of the attribute.
+ type: string
+ value:
+ description: Value for the enum item representing this attribute in the uAPI.
+ $ref: '#/$defs/uint'
+ type-value:
+ description: Name of the value extracted from the type of a nest-type-value attribute.
+ type: array
+ items:
+ type: string
+ byte-order:
+ enum: [ little-endian, big-endian ]
+ multi-attr:
+ type: boolean
+ nested-attributes:
+ description: Name of the space (sub-space) used inside the attribute.
+ type: string
+ enum:
+ description: Name of the enum type used for the attribute.
+ type: string
+ enum-as-flags:
+ description: |
+ Treat the enum as flags. In most cases enum is either used as flags or as values.
+ Sometimes, however, both forms are necessary, in which case header contains the enum
+ form while specific attributes may request to convert the values into a bitfield.
+ type: boolean
+ checks:
+ description: Kernel input validation.
+ type: object
+ additionalProperties: False
+ properties:
+ flags-mask:
+ description: Name of the flags constant on which to base mask (unsigned scalar types only).
+ type: string
+ min:
+ description: Min value for an integer attribute.
+ type: integer
+ min-len:
+ description: Min length for a binary attribute.
+ $ref: '#/$defs/len-or-define'
+ max-len:
+ description: Max length for a string or a binary attribute.
+ $ref: '#/$defs/len-or-define'
+ sub-type: *attr-type
+
+ # Make sure name-prefix does not appear in subsets (subsets inherit naming)
+ dependencies:
+ name-prefix:
+ not:
+ required: [ subset-of ]
+ subset-of:
+ not:
+ required: [ name-prefix ]
+
+ operations:
+ description: Operations supported by the protocol.
+ type: object
+ required: [ list ]
+ additionalProperties: False
+ properties:
+ enum-model:
+ description: |
+ The model of assigning values to the operations.
+ "unified" is the recommended model where all message types belong
+ to a single enum.
+ "directional" has the messages sent to the kernel and from the kernel
+ enumerated separately.
+ enum: [ unified ]
+ name-prefix:
+ description: |
+ Prefix for the C enum name of the command. The name is formed by concatenating
+ the prefix with the upper case name of the command, with dashes replaced by underscores.
+ type: string
+ enum-name:
+ description: Name for the enum type with commands.
+ type: string
+ async-prefix:
+ description: Same as name-prefix but used to render notifications and events to separate enum.
+ type: string
+ async-enum:
+ description: Name for the enum type with notifications/events.
+ type: string
+ list:
+ description: List of commands
+ type: array
+ items:
+ type: object
+ additionalProperties: False
+ required: [ name, doc ]
+ properties:
+ name:
+ description: Name of the operation, also defining its C enum value in uAPI.
+ type: string
+ doc:
+ description: Documentation for the command.
+ type: string
+ value:
+ description: Value for the enum in the uAPI.
+ $ref: '#/$defs/uint'
+ attribute-set:
+ description: |
+ Attribute space from which attributes directly in the requests and replies
+ to this command are defined.
+ type: string
+ flags: &cmd_flags
+ description: Command flags.
+ type: array
+ items:
+ enum: [ admin-perm ]
+ dont-validate:
+ description: Kernel attribute validation flags.
+ type: array
+ items:
+ enum: [ strict, dump ]
+ do: &subop-type
+ description: Main command handler.
+ type: object
+ additionalProperties: False
+ properties:
+ request: &subop-attr-list
+ description: Definition of the request message for a given command.
+ type: object
+ additionalProperties: False
+ properties:
+ attributes:
+ description: |
+ Names of attributes from the attribute-set (not full attribute
+ definitions, just names).
+ type: array
+ items:
+ type: string
+ reply: *subop-attr-list
+ pre:
+ description: Hook for a function to run before the main callback (pre_doit or start).
+ type: string
+ post:
+ description: Hook for a function to run after the main callback (post_doit or done).
+ type: string
+ dump: *subop-type
+ notify:
+ description: Name of the command sharing the reply type with this notification.
+ type: string
+ event:
+ type: object
+ additionalProperties: False
+ properties:
+ attributes:
+ description: Explicit list of the attributes for the notification.
+ type: array
+ items:
+ type: string
+ mcgrp:
+ description: Name of the multicast group generating given notification.
+ type: string
+ mcast-groups:
+ description: List of multicast groups.
+ type: object
+ required: [ list ]
+ additionalProperties: False
+ properties:
+ list:
+ description: List of groups.
+ type: array
+ items:
+ type: object
+ required: [ name ]
+ additionalProperties: False
+ properties:
+ name:
+ description: |
+ The name for the group, used to form the define and the value of the define.
+ type: string
+ # Start genetlink-c
+ c-define-name:
+ description: Override for the name of the define in C uAPI.
+ type: string
+ # End genetlink-c
+ flags: *cmd_flags
diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml
new file mode 100644
index 000000000000..5642925c4ceb
--- /dev/null
+++ b/Documentation/netlink/genetlink-legacy.yaml
@@ -0,0 +1,361 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
+$schema: https://json-schema.org/draft-07/schema
+
+# Common defines
+$defs:
+ uint:
+ type: integer
+ minimum: 0
+ len-or-define:
+ type: [ string, integer ]
+ pattern: ^[0-9A-Za-z_]+( - 1)?$
+ minimum: 0
+
+# Schema for specs
+title: Protocol
+description: Specification of a genetlink protocol
+type: object
+required: [ name, doc, attribute-sets, operations ]
+additionalProperties: False
+properties:
+ name:
+ description: Name of the genetlink family.
+ type: string
+ doc:
+ type: string
+ version:
+ description: Generic Netlink family version. Default is 1.
+ type: integer
+ minimum: 1
+ protocol:
+ description: Schema compatibility level. Default is "genetlink".
+ enum: [ genetlink, genetlink-c, genetlink-legacy ] # Trim
+ # Start genetlink-c
+ uapi-header:
+ description: Path to the uAPI header, default is linux/${family-name}.h
+ type: string
+ c-family-name:
+ description: Name of the define for the family name.
+ type: string
+ c-version-name:
+ description: Name of the define for the verion of the family.
+ type: string
+ max-by-define:
+ description: Makes the number of attributes and commands be specified by a define, not an enum value.
+ type: boolean
+ # End genetlink-c
+ # Start genetlink-legacy
+ kernel-policy:
+ description: |
+ Defines if the input policy in the kernel is global, per-operation, or split per operation type.
+ Default is split.
+ enum: [ split, per-op, global ]
+ # End genetlink-legacy
+
+ definitions:
+ description: List of type and constant definitions (enums, flags, defines).
+ type: array
+ items:
+ type: object
+ required: [ type, name ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ header:
+ description: For C-compatible languages, header which already defines this value.
+ type: string
+ type:
+ enum: [ const, enum, flags, struct ] # Trim
+ doc:
+ type: string
+ # For const
+ value:
+ description: For const - the value.
+ type: [ string, integer ]
+ # For enum and flags
+ value-start:
+ description: For enum or flags the literal initializer for the first value.
+ type: [ string, integer ]
+ entries:
+ description: For enum or flags array of values.
+ type: array
+ items:
+ oneOf:
+ - type: string
+ - type: object
+ required: [ name ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ value:
+ type: integer
+ doc:
+ type: string
+ render-max:
+ description: Render the max members for this enum.
+ type: boolean
+ # Start genetlink-c
+ enum-name:
+ description: Name for enum, if empty no name will be used.
+ type: [ string, "null" ]
+ name-prefix:
+ description: For enum the prefix of the values, optional.
+ type: string
+ # End genetlink-c
+ # Start genetlink-legacy
+ members:
+ description: List of struct members. Only scalars and strings members allowed.
+ type: array
+ items:
+ type: object
+ required: [ name, type ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ type:
+ enum: [ u8, u16, u32, u64, s8, s16, s32, s64, string ]
+ len:
+ $ref: '#/$defs/len-or-define'
+ # End genetlink-legacy
+
+ attribute-sets:
+ description: Definition of attribute spaces for this family.
+ type: array
+ items:
+ description: Definition of a single attribute space.
+ type: object
+ required: [ name, attributes ]
+ additionalProperties: False
+ properties:
+ name:
+ description: |
+ Name used when referring to this space in other definitions, not used outside of the spec.
+ type: string
+ name-prefix:
+ description: |
+ Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
+ type: string
+ enum-name:
+ description: Name for the enum type of the attribute.
+ type: string
+ doc:
+ description: Documentation of the space.
+ type: string
+ subset-of:
+ description: |
+ Name of another space which this is a logical part of. Sub-spaces can be used to define
+ a limited group of attributes which are used in a nest.
+ type: string
+ # Start genetlink-c
+ attr-cnt-name:
+ description: The explicit name for constant holding the count of attributes (last attr + 1).
+ type: string
+ attr-max-name:
+ description: The explicit name for last member of attribute enum.
+ type: string
+ # End genetlink-c
+ attributes:
+ description: List of attributes in the space.
+ type: array
+ items:
+ type: object
+ required: [ name, type ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ type: &attr-type
+ enum: [ unused, pad, flag, binary, u8, u16, u32, u64, s32, s64,
+ string, nest, array-nest, nest-type-value ]
+ doc:
+ description: Documentation of the attribute.
+ type: string
+ value:
+ description: Value for the enum item representing this attribute in the uAPI.
+ $ref: '#/$defs/uint'
+ type-value:
+ description: Name of the value extracted from the type of a nest-type-value attribute.
+ type: array
+ items:
+ type: string
+ byte-order:
+ enum: [ little-endian, big-endian ]
+ multi-attr:
+ type: boolean
+ nested-attributes:
+ description: Name of the space (sub-space) used inside the attribute.
+ type: string
+ enum:
+ description: Name of the enum type used for the attribute.
+ type: string
+ enum-as-flags:
+ description: |
+ Treat the enum as flags. In most cases enum is either used as flags or as values.
+ Sometimes, however, both forms are necessary, in which case header contains the enum
+ form while specific attributes may request to convert the values into a bitfield.
+ type: boolean
+ checks:
+ description: Kernel input validation.
+ type: object
+ additionalProperties: False
+ properties:
+ flags-mask:
+ description: Name of the flags constant on which to base mask (unsigned scalar types only).
+ type: string
+ min:
+ description: Min value for an integer attribute.
+ type: integer
+ min-len:
+ description: Min length for a binary attribute.
+ $ref: '#/$defs/len-or-define'
+ max-len:
+ description: Max length for a string or a binary attribute.
+ $ref: '#/$defs/len-or-define'
+ sub-type: *attr-type
+
+ # Make sure name-prefix does not appear in subsets (subsets inherit naming)
+ dependencies:
+ name-prefix:
+ not:
+ required: [ subset-of ]
+ subset-of:
+ not:
+ required: [ name-prefix ]
+
+ operations:
+ description: Operations supported by the protocol.
+ type: object
+ required: [ list ]
+ additionalProperties: False
+ properties:
+ enum-model:
+ description: |
+ The model of assigning values to the operations.
+ "unified" is the recommended model where all message types belong
+ to a single enum.
+ "directional" has the messages sent to the kernel and from the kernel
+ enumerated separately.
+ enum: [ unified, directional ] # Trim
+ name-prefix:
+ description: |
+ Prefix for the C enum name of the command. The name is formed by concatenating
+ the prefix with the upper case name of the command, with dashes replaced by underscores.
+ type: string
+ enum-name:
+ description: Name for the enum type with commands.
+ type: string
+ async-prefix:
+ description: Same as name-prefix but used to render notifications and events to separate enum.
+ type: string
+ async-enum:
+ description: Name for the enum type with notifications/events.
+ type: string
+ list:
+ description: List of commands
+ type: array
+ items:
+ type: object
+ additionalProperties: False
+ required: [ name, doc ]
+ properties:
+ name:
+ description: Name of the operation, also defining its C enum value in uAPI.
+ type: string
+ doc:
+ description: Documentation for the command.
+ type: string
+ value:
+ description: Value for the enum in the uAPI.
+ $ref: '#/$defs/uint'
+ attribute-set:
+ description: |
+ Attribute space from which attributes directly in the requests and replies
+ to this command are defined.
+ type: string
+ flags: &cmd_flags
+ description: Command flags.
+ type: array
+ items:
+ enum: [ admin-perm ]
+ dont-validate:
+ description: Kernel attribute validation flags.
+ type: array
+ items:
+ enum: [ strict, dump ]
+ do: &subop-type
+ description: Main command handler.
+ type: object
+ additionalProperties: False
+ properties:
+ request: &subop-attr-list
+ description: Definition of the request message for a given command.
+ type: object
+ additionalProperties: False
+ properties:
+ attributes:
+ description: |
+ Names of attributes from the attribute-set (not full attribute
+ definitions, just names).
+ type: array
+ items:
+ type: string
+ # Start genetlink-legacy
+ value:
+ description: |
+ ID of this message if value for request and response differ,
+ i.e. requests and responses have different message enums.
+ $ref: '#/$defs/uint'
+ # End genetlink-legacy
+ reply: *subop-attr-list
+ pre:
+ description: Hook for a function to run before the main callback (pre_doit or start).
+ type: string
+ post:
+ description: Hook for a function to run after the main callback (post_doit or done).
+ type: string
+ dump: *subop-type
+ notify:
+ description: Name of the command sharing the reply type with this notification.
+ type: string
+ event:
+ type: object
+ additionalProperties: False
+ properties:
+ attributes:
+ description: Explicit list of the attributes for the notification.
+ type: array
+ items:
+ type: string
+ mcgrp:
+ description: Name of the multicast group generating given notification.
+ type: string
+ mcast-groups:
+ description: List of multicast groups.
+ type: object
+ required: [ list ]
+ additionalProperties: False
+ properties:
+ list:
+ description: List of groups.
+ type: array
+ items:
+ type: object
+ required: [ name ]
+ additionalProperties: False
+ properties:
+ name:
+ description: |
+ The name for the group, used to form the define and the value of the define.
+ type: string
+ # Start genetlink-c
+ c-define-name:
+ description: Override for the name of the define in C uAPI.
+ type: string
+ # End genetlink-c
+ flags: *cmd_flags
diff --git a/Documentation/netlink/genetlink.yaml b/Documentation/netlink/genetlink.yaml
new file mode 100644
index 000000000000..62a922755ce2
--- /dev/null
+++ b/Documentation/netlink/genetlink.yaml
@@ -0,0 +1,296 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
+$schema: https://json-schema.org/draft-07/schema
+
+# Common defines
+$defs:
+ uint:
+ type: integer
+ minimum: 0
+ len-or-define:
+ type: [ string, integer ]
+ pattern: ^[0-9A-Za-z_]+( - 1)?$
+ minimum: 0
+
+# Schema for specs
+title: Protocol
+description: Specification of a genetlink protocol
+type: object
+required: [ name, doc, attribute-sets, operations ]
+additionalProperties: False
+properties:
+ name:
+ description: Name of the genetlink family.
+ type: string
+ doc:
+ type: string
+ version:
+ description: Generic Netlink family version. Default is 1.
+ type: integer
+ minimum: 1
+ protocol:
+ description: Schema compatibility level. Default is "genetlink".
+ enum: [ genetlink ]
+
+ definitions:
+ description: List of type and constant definitions (enums, flags, defines).
+ type: array
+ items:
+ type: object
+ required: [ type, name ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ header:
+ description: For C-compatible languages, header which already defines this value.
+ type: string
+ type:
+ enum: [ const, enum, flags ]
+ doc:
+ type: string
+ # For const
+ value:
+ description: For const - the value.
+ type: [ string, integer ]
+ # For enum and flags
+ value-start:
+ description: For enum or flags the literal initializer for the first value.
+ type: [ string, integer ]
+ entries:
+ description: For enum or flags array of values.
+ type: array
+ items:
+ oneOf:
+ - type: string
+ - type: object
+ required: [ name ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ value:
+ type: integer
+ doc:
+ type: string
+ render-max:
+ description: Render the max members for this enum.
+ type: boolean
+
+ attribute-sets:
+ description: Definition of attribute spaces for this family.
+ type: array
+ items:
+ description: Definition of a single attribute space.
+ type: object
+ required: [ name, attributes ]
+ additionalProperties: False
+ properties:
+ name:
+ description: |
+ Name used when referring to this space in other definitions, not used outside of the spec.
+ type: string
+ name-prefix:
+ description: |
+ Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
+ type: string
+ enum-name:
+ description: Name for the enum type of the attribute.
+ type: string
+ doc:
+ description: Documentation of the space.
+ type: string
+ subset-of:
+ description: |
+ Name of another space which this is a logical part of. Sub-spaces can be used to define
+ a limited group of attributes which are used in a nest.
+ type: string
+ attributes:
+ description: List of attributes in the space.
+ type: array
+ items:
+ type: object
+ required: [ name, type ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ type: &attr-type
+ enum: [ unused, pad, flag, binary, u8, u16, u32, u64, s32, s64,
+ string, nest, array-nest, nest-type-value ]
+ doc:
+ description: Documentation of the attribute.
+ type: string
+ value:
+ description: Value for the enum item representing this attribute in the uAPI.
+ $ref: '#/$defs/uint'
+ type-value:
+ description: Name of the value extracted from the type of a nest-type-value attribute.
+ type: array
+ items:
+ type: string
+ byte-order:
+ enum: [ little-endian, big-endian ]
+ multi-attr:
+ type: boolean
+ nested-attributes:
+ description: Name of the space (sub-space) used inside the attribute.
+ type: string
+ enum:
+ description: Name of the enum type used for the attribute.
+ type: string
+ enum-as-flags:
+ description: |
+ Treat the enum as flags. In most cases enum is either used as flags or as values.
+ Sometimes, however, both forms are necessary, in which case header contains the enum
+ form while specific attributes may request to convert the values into a bitfield.
+ type: boolean
+ checks:
+ description: Kernel input validation.
+ type: object
+ additionalProperties: False
+ properties:
+ flags-mask:
+ description: Name of the flags constant on which to base mask (unsigned scalar types only).
+ type: string
+ min:
+ description: Min value for an integer attribute.
+ type: integer
+ min-len:
+ description: Min length for a binary attribute.
+ $ref: '#/$defs/len-or-define'
+ max-len:
+ description: Max length for a string or a binary attribute.
+ $ref: '#/$defs/len-or-define'
+ sub-type: *attr-type
+
+ # Make sure name-prefix does not appear in subsets (subsets inherit naming)
+ dependencies:
+ name-prefix:
+ not:
+ required: [ subset-of ]
+ subset-of:
+ not:
+ required: [ name-prefix ]
+
+ operations:
+ description: Operations supported by the protocol.
+ type: object
+ required: [ list ]
+ additionalProperties: False
+ properties:
+ enum-model:
+ description: |
+ The model of assigning values to the operations.
+ "unified" is the recommended model where all message types belong
+ to a single enum.
+ "directional" has the messages sent to the kernel and from the kernel
+ enumerated separately.
+ enum: [ unified ]
+ name-prefix:
+ description: |
+ Prefix for the C enum name of the command. The name is formed by concatenating
+ the prefix with the upper case name of the command, with dashes replaced by underscores.
+ type: string
+ enum-name:
+ description: Name for the enum type with commands.
+ type: string
+ async-prefix:
+ description: Same as name-prefix but used to render notifications and events to separate enum.
+ type: string
+ async-enum:
+ description: Name for the enum type with notifications/events.
+ type: string
+ list:
+ description: List of commands
+ type: array
+ items:
+ type: object
+ additionalProperties: False
+ required: [ name, doc ]
+ properties:
+ name:
+ description: Name of the operation, also defining its C enum value in uAPI.
+ type: string
+ doc:
+ description: Documentation for the command.
+ type: string
+ value:
+ description: Value for the enum in the uAPI.
+ $ref: '#/$defs/uint'
+ attribute-set:
+ description: |
+ Attribute space from which attributes directly in the requests and replies
+ to this command are defined.
+ type: string
+ flags: &cmd_flags
+ description: Command flags.
+ type: array
+ items:
+ enum: [ admin-perm ]
+ dont-validate:
+ description: Kernel attribute validation flags.
+ type: array
+ items:
+ enum: [ strict, dump ]
+ do: &subop-type
+ description: Main command handler.
+ type: object
+ additionalProperties: False
+ properties:
+ request: &subop-attr-list
+ description: Definition of the request message for a given command.
+ type: object
+ additionalProperties: False
+ properties:
+ attributes:
+ description: |
+ Names of attributes from the attribute-set (not full attribute
+ definitions, just names).
+ type: array
+ items:
+ type: string
+ reply: *subop-attr-list
+ pre:
+ description: Hook for a function to run before the main callback (pre_doit or start).
+ type: string
+ post:
+ description: Hook for a function to run after the main callback (post_doit or done).
+ type: string
+ dump: *subop-type
+ notify:
+ description: Name of the command sharing the reply type with this notification.
+ type: string
+ event:
+ type: object
+ additionalProperties: False
+ properties:
+ attributes:
+ description: Explicit list of the attributes for the notification.
+ type: array
+ items:
+ type: string
+ mcgrp:
+ description: Name of the multicast group generating given notification.
+ type: string
+ mcast-groups:
+ description: List of multicast groups.
+ type: object
+ required: [ list ]
+ additionalProperties: False
+ properties:
+ list:
+ description: List of groups.
+ type: array
+ items:
+ type: object
+ required: [ name ]
+ additionalProperties: False
+ properties:
+ name:
+ description: |
+ The name for the group, used to form the define and the value of the define.
+ type: string
+ flags: *cmd_flags
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
new file mode 100644
index 000000000000..08b776908d15
--- /dev/null
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -0,0 +1,397 @@
+name: ethtool
+
+protocol: genetlink-legacy
+
+doc: Partial family for Ethtool Netlink.
+
+attribute-sets:
+ -
+ name: header
+ attributes:
+ -
+ name: dev-index
+ type: u32
+ value: 1
+ -
+ name: dev-name
+ type: string
+ -
+ name: flags
+ type: u32
+
+ -
+ name: bitset-bit
+ attributes:
+ -
+ name: index
+ type: u32
+ value: 1
+ -
+ name: name
+ type: string
+ -
+ name: value
+ type: flag
+ -
+ name: bitset-bits
+ attributes:
+ -
+ name: bit
+ type: nest
+ nested-attributes: bitset-bit
+ value: 1
+ -
+ name: bitset
+ attributes:
+ -
+ name: nomask
+ type: flag
+ value: 1
+ -
+ name: size
+ type: u32
+ -
+ name: bits
+ type: nest
+ nested-attributes: bitset-bits
+
+ -
+ name: string
+ attributes:
+ -
+ name: index
+ type: u32
+ value: 1
+ -
+ name: value
+ type: string
+ -
+ name: strings
+ attributes:
+ -
+ name: string
+ type: nest
+ value: 1
+ multi-attr: true
+ nested-attributes: string
+ -
+ name: stringset
+ attributes:
+ -
+ name: id
+ type: u32
+ value: 1
+ -
+ name: count
+ type: u32
+ -
+ name: strings
+ type: nest
+ multi-attr: true
+ nested-attributes: strings
+ -
+ name: stringsets
+ attributes:
+ -
+ name: stringset
+ type: nest
+ multi-attr: true
+ value: 1
+ nested-attributes: stringset
+ -
+ name: strset
+ attributes:
+ -
+ name: header
+ value: 1
+ type: nest
+ nested-attributes: header
+ -
+ name: stringsets
+ type: nest
+ nested-attributes: stringsets
+ -
+ name: counts-only
+ type: flag
+
+ -
+ name: privflags
+ attributes:
+ -
+ name: header
+ value: 1
+ type: nest
+ nested-attributes: header
+ -
+ name: flags
+ type: nest
+ nested-attributes: bitset
+
+ -
+ name: rings
+ attributes:
+ -
+ name: header
+ value: 1
+ type: nest
+ nested-attributes: header
+ -
+ name: rx-max
+ type: u32
+ -
+ name: rx-mini-max
+ type: u32
+ -
+ name: rx-jumbo-max
+ type: u32
+ -
+ name: tx-max
+ type: u32
+ -
+ name: rx
+ type: u32
+ -
+ name: rx-mini
+ type: u32
+ -
+ name: rx-jumbo
+ type: u32
+ -
+ name: tx
+ type: u32
+ -
+ name: rx-buf-len
+ type: u32
+ -
+ name: tcp-data-split
+ type: u8
+ -
+ name: cqe-size
+ type: u32
+ -
+ name: tx-push
+ type: u8
+ -
+ name: rx-push
+ type: u8
+
+ -
+ name: mm-stat
+ attributes:
+ -
+ name: pad
+ value: 1
+ type: pad
+ -
+ name: reassembly-errors
+ type: u64
+ -
+ name: smd-errors
+ type: u64
+ -
+ name: reassembly-ok
+ type: u64
+ -
+ name: rx-frag-count
+ type: u64
+ -
+ name: tx-frag-count
+ type: u64
+ -
+ name: hold-count
+ type: u64
+ -
+ name: mm
+ attributes:
+ -
+ name: header
+ value: 1
+ type: nest
+ nested-attributes: header
+ -
+ name: pmac-enabled
+ type: u8
+ -
+ name: tx-enabled
+ type: u8
+ -
+ name: tx-active
+ type: u8
+ -
+ name: tx-min-frag-size
+ type: u32
+ -
+ name: tx-min-frag-size
+ type: u32
+ -
+ name: verify-enabled
+ type: u8
+ -
+ name: verify-status
+ type: u8
+ -
+ name: verify-time
+ type: u32
+ -
+ name: max-verify-time
+ type: u32
+ -
+ name: stats
+ type: nest
+ nested-attributes: mm-stat
+
+operations:
+ enum-model: directional
+ list:
+ -
+ name: strset-get
+ doc: Get string set from the kernel.
+
+ attribute-set: strset
+
+ do: &strset-get-op
+ request:
+ value: 1
+ attributes:
+ - header
+ - stringsets
+ - counts-only
+ reply:
+ value: 1
+ attributes:
+ - header
+ - stringsets
+ dump: *strset-get-op
+
+ # TODO: fill in the requests in between
+
+ -
+ name: privflags-get
+ doc: Get device private flags.
+
+ attribute-set: privflags
+
+ do: &privflag-get-op
+ request:
+ value: 13
+ attributes:
+ - header
+ reply:
+ value: 14
+ attributes:
+ - header
+ - flags
+ dump: *privflag-get-op
+ -
+ name: privflags-set
+ doc: Set device private flags.
+
+ attribute-set: privflags
+
+ do:
+ request:
+ attributes:
+ - header
+ - flags
+ -
+ name: privflags-ntf
+ doc: Notification for change in device private flags.
+ notify: privflags-get
+
+ -
+ name: rings-get
+ doc: Get ring params.
+
+ attribute-set: rings
+
+ do: &ring-get-op
+ request:
+ attributes:
+ - header
+ reply:
+ attributes:
+ - header
+ - rx-max
+ - rx-mini-max
+ - rx-jumbo-max
+ - tx-max
+ - rx
+ - rx-mini
+ - rx-jumbo
+ - tx
+ - rx-buf-len
+ - tcp-data-split
+ - cqe-size
+ - tx-push
+ - rx-push
+ dump: *ring-get-op
+ -
+ name: rings-set
+ doc: Set ring params.
+
+ attribute-set: rings
+
+ do:
+ request:
+ attributes:
+ - header
+ - rx
+ - rx-mini
+ - rx-jumbo
+ - tx
+ - rx-buf-len
+ - tcp-data-split
+ - cqe-size
+ - tx-push
+ - rx-push
+ -
+ name: rings-ntf
+ doc: Notification for change in ring params.
+ notify: rings-get
+
+ # TODO: fill in the requests in between
+
+ -
+ name: mm-get
+ doc: Get MAC Merge configuration and state
+
+ attribute-set: mm
+
+ do: &mm-get-op
+ request:
+ value: 42
+ attributes:
+ - header
+ reply:
+ value: 42
+ attributes:
+ - header
+ - pmac-enabled
+ - tx-enabled
+ - tx-active
+ - tx-min-frag-size
+ - rx-min-frag-size
+ - verify-enabled
+ - verify-time
+ - max-verify-time
+ - stats
+ dump: *mm-get-op
+ -
+ name: mm-set
+ doc: Set MAC Merge configuration
+
+ attribute-set: mm
+
+ do:
+ request:
+ attributes:
+ - header
+ - verify-enabled
+ - verify-time
+ - tx-enabled
+ - pmac-enabled
+ - tx-min-frag-size
+ -
+ name: mm-ntf
+ doc: Notification for change in MAC Merge configuration.
+ notify: mm-get
diff --git a/Documentation/netlink/specs/fou.yaml b/Documentation/netlink/specs/fou.yaml
new file mode 100644
index 000000000000..266c386eedf3
--- /dev/null
+++ b/Documentation/netlink/specs/fou.yaml
@@ -0,0 +1,128 @@
+name: fou
+
+protocol: genetlink-legacy
+
+doc: |
+ Foo-over-UDP.
+
+c-family-name: fou-genl-name
+c-version-name: fou-genl-version
+max-by-define: true
+kernel-policy: global
+
+definitions:
+ -
+ type: enum
+ name: encap_type
+ name-prefix: fou-encap-
+ enum-name:
+ entries: [ unspec, direct, gue ]
+
+attribute-sets:
+ -
+ name: fou
+ name-prefix: fou-attr-
+ attributes:
+ -
+ name: unspec
+ type: unused
+ -
+ name: port
+ type: u16
+ byte-order: big-endian
+ -
+ name: af
+ type: u8
+ -
+ name: ipproto
+ type: u8
+ -
+ name: type
+ type: u8
+ -
+ name: remcsum_nopartial
+ type: flag
+ -
+ name: local_v4
+ type: u32
+ -
+ name: local_v6
+ type: binary
+ checks:
+ min-len: 16
+ -
+ name: peer_v4
+ type: u32
+ -
+ name: peer_v6
+ type: binary
+ checks:
+ min-len: 16
+ -
+ name: peer_port
+ type: u16
+ byte-order: big-endian
+ -
+ name: ifindex
+ type: s32
+
+operations:
+ list:
+ -
+ name: unspec
+ doc: unused
+
+ -
+ name: add
+ doc: Add port.
+ attribute-set: fou
+
+ dont-validate: [ strict, dump ]
+ flags: [ admin-perm ]
+
+ do:
+ request: &all_attrs
+ attributes:
+ - port
+ - ipproto
+ - type
+ - remcsum_nopartial
+ - local_v4
+ - peer_v4
+ - local_v6
+ - peer_v6
+ - peer_port
+ - ifindex
+
+ -
+ name: del
+ doc: Delete port.
+ attribute-set: fou
+
+ dont-validate: [ strict, dump ]
+ flags: [ admin-perm ]
+
+ do:
+ request: &select_attrs
+ attributes:
+ - af
+ - ifindex
+ - port
+ - peer_port
+ - local_v4
+ - peer_v4
+ - local_v6
+ - peer_v6
+
+ -
+ name: get
+ doc: Get tunnel info.
+ attribute-set: fou
+ dont-validate: [ strict, dump ]
+
+ do:
+ request: *select_attrs
+ reply: *all_attrs
+
+ dump:
+ reply: *all_attrs
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
new file mode 100644
index 000000000000..b4dcdae54ffd
--- /dev/null
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -0,0 +1,100 @@
+name: netdev
+
+doc:
+ netdev configuration over generic netlink.
+
+definitions:
+ -
+ type: flags
+ name: xdp-act
+ entries:
+ -
+ name: basic
+ doc:
+ XDP feautues set supported by all drivers
+ (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
+ -
+ name: redirect
+ doc:
+ The netdev supports XDP_REDIRECT
+ -
+ name: ndo-xmit
+ doc:
+ This feature informs if netdev implements ndo_xdp_xmit callback.
+ -
+ name: xsk-zerocopy
+ doc:
+ This feature informs if netdev supports AF_XDP in zero copy mode.
+ -
+ name: hw-offload
+ doc:
+ This feature informs if netdev supports XDP hw oflloading.
+ -
+ name: rx-sg
+ doc:
+ This feature informs if netdev implements non-linear XDP buffer
+ support in the driver napi callback.
+ -
+ name: ndo-xmit-sg
+ doc:
+ This feature informs if netdev implements non-linear XDP buffer
+ support in ndo_xdp_xmit callback.
+
+attribute-sets:
+ -
+ name: dev
+ attributes:
+ -
+ name: ifindex
+ doc: netdev ifindex
+ type: u32
+ value: 1
+ checks:
+ min: 1
+ -
+ name: pad
+ type: pad
+ -
+ name: xdp-features
+ doc: Bitmask of enabled xdp-features.
+ type: u64
+ enum: xdp-act
+ enum-as-flags: true
+
+operations:
+ list:
+ -
+ name: dev-get
+ doc: Get / dump information about a netdev.
+ value: 1
+ attribute-set: dev
+ do:
+ request:
+ attributes:
+ - ifindex
+ reply: &dev-all
+ attributes:
+ - ifindex
+ - xdp-features
+ dump:
+ reply: *dev-all
+ -
+ name: dev-add-ntf
+ doc: Notification about device appearing.
+ notify: dev-get
+ mcgrp: mgmt
+ -
+ name: dev-del-ntf
+ doc: Notification about device disappearing.
+ notify: dev-get
+ mcgrp: mgmt
+ -
+ name: dev-change-ntf
+ doc: Notification about device configuration being changed.
+ notify: dev-get
+ mcgrp: mgmt
+
+mcast-groups:
+ list:
+ -
+ name: mgmt
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 60b217b436be..247c6c4127e9 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -419,7 +419,7 @@ XDP_UMEM_REG setsockopt
-----------------------
This setsockopt registers a UMEM to a socket. This is the area that
-contain all the buffers that packet can recide in. The call takes a
+contain all the buffers that packet can reside in. The call takes a
pointer to the beginning of this area and the size of it. Moreover, it
also has parameter called chunk_size that is the size that the UMEM is
divided into. It can only be 2K or 4K at the moment. If you have an
@@ -592,7 +592,7 @@ A: When a netdev of a physical NIC is initialized, Linux usually
A number of other ways are possible all up to the capabilities of
the NIC you have.
-Q: Can I use the XSKMAP to implement a switch betwen different umems
+Q: Can I use the XSKMAP to implement a switch between different umems
in copy mode?
A: The short answer is no, that is not supported at the moment. The
diff --git a/Documentation/networking/arcnet-hardware.rst b/Documentation/networking/arcnet-hardware.rst
index ac249ac8fcf2..982215723582 100644
--- a/Documentation/networking/arcnet-hardware.rst
+++ b/Documentation/networking/arcnet-hardware.rst
@@ -1902,7 +1902,7 @@ of 32 possible I/O Base addresses using the following tables::
6 | 10
The I/O address is sum of all switches set to "1". Remember that
-the I/O address space bellow 0x200 is RESERVED for mainboard, so
+the I/O address space below 0x200 is RESERVED for mainboard, so
switch 1 should be ALWAYS SET TO OFF.
diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst
index b85563ea3682..8a0dcb1894b4 100644
--- a/Documentation/networking/batman-adv.rst
+++ b/Documentation/networking/batman-adv.rst
@@ -159,7 +159,7 @@ Please send us comments, experiences, questions, anything :)
IRC:
#batadv on ircs://irc.hackint.org/
Mailing-list:
- b.a.t.m.a.n@open-mesh.org (optional subscription at
+ b.a.t.m.a.n@lists.open-mesh.org (optional subscription at
https://lists.open-mesh.org/mailman3/postorius/lists/b.a.t.m.a.n.lists.open-mesh.org/)
You can also contact the Authors:
diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
index 90121deef217..d7e1ada905b2 100644
--- a/Documentation/networking/can.rst
+++ b/Documentation/networking/can.rst
@@ -931,7 +931,7 @@ ival1:
ival2:
Throttle the received message rate down to the value of ival2. This
is useful to reduce messages for the application when the signal inside the
- CAN frame is stateless as state changes within the ival2 periode may get
+ CAN frame is stateless as state changes within the ival2 period may get
lost.
Broadcast Manager Multiplex Message Receive Filter
diff --git a/Documentation/networking/can_ucan_protocol.rst b/Documentation/networking/can_ucan_protocol.rst
index 638ac1ee7914..935d872ae87c 100644
--- a/Documentation/networking/can_ucan_protocol.rst
+++ b/Documentation/networking/can_ucan_protocol.rst
@@ -50,7 +50,7 @@ Setup Packet
``wIndex`` USB Interface Index (0 for device commands)
``wLength`` * Host to Device - Number of bytes to transmit
* Device to Host - Maximum Number of bytes to
- receive. If the device send less. Commom ZLP
+ receive. If the device send less. Common ZLP
semantics are used.
================= =====================================================
diff --git a/Documentation/networking/cdc_mbim.rst b/Documentation/networking/cdc_mbim.rst
index 0048409c06b4..37f968acc473 100644
--- a/Documentation/networking/cdc_mbim.rst
+++ b/Documentation/networking/cdc_mbim.rst
@@ -93,7 +93,7 @@ MBIM function can be looked up using sysfs. For example::
USB configuration descriptors
-----------------------------
The wMaxControlMessage field of the CDC MBIM functional descriptor
-limits the maximum control message size. The managament application is
+limits the maximum control message size. The management application is
responsible for negotiating a control message size complying with the
requirements in section 9.3.1 of [1], taking this descriptor field
into consideration.
diff --git a/Documentation/networking/device_drivers/atm/iphase.rst b/Documentation/networking/device_drivers/atm/iphase.rst
index 92d9b757d75a..388c7101e2cb 100644
--- a/Documentation/networking/device_drivers/atm/iphase.rst
+++ b/Documentation/networking/device_drivers/atm/iphase.rst
@@ -4,7 +4,7 @@
ATM (i)Chip IA Linux Driver Source
==================================
- READ ME FISRT
+ READ ME FIRST
--------------------------------------------------------------------------------
diff --git a/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst b/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst
index 40c92ea272af..1a4fc6607582 100644
--- a/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst
+++ b/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst
@@ -577,7 +577,7 @@ CTU CAN FD IP Core and Driver Development Acknowledgment
* Linux driver development
* continuous integration platform architect and GHDL updates
- * theses `Open-source and Open-hardware CAN FD Protocol Support <https://dspace.cvut.cz/bitstream/handle/10467/80366/F3-DP-2019-Jerabek-Martin-Jerabek-thesis-2019-canfd.pdf>`_
+ * thesis `Open-source and Open-hardware CAN FD Protocol Support <https://dspace.cvut.cz/bitstream/handle/10467/80366/F3-DP-2019-Jerabek-Martin-Jerabek-thesis-2019-canfd.pdf>`_
* Jiri Novak <jnovak@fel.cvut.cz>
@@ -603,7 +603,7 @@ CTU CAN FD IP Core and Driver Development Acknowledgment
* Jan Charvat
* implemented CTU CAN FD functional model for QEMU which has been integrated into QEMU mainline (`docs/system/devices/can.rst <https://www.qemu.org/docs/master/system/devices/can.html>`_)
- * Bachelor theses Model of CAN FD Communication Controller for QEMU Emulator
+ * Bachelor thesis Model of CAN FD Communication Controller for QEMU Emulator
Notes
-----
diff --git a/Documentation/networking/device_drivers/can/ctu/fsm_txt_buffer_user.svg b/Documentation/networking/device_drivers/can/ctu/fsm_txt_buffer_user.svg
index b371650788f4..381323423b4c 100644
--- a/Documentation/networking/device_drivers/can/ctu/fsm_txt_buffer_user.svg
+++ b/Documentation/networking/device_drivers/can/ctu/fsm_txt_buffer_user.svg
@@ -129,10 +129,10 @@
</g>
</g>
<text transform="matrix(.264583 0 0 .264583 91.8919 139.964)" x="26.959213" y="9.11724" fill="#2aa1ff" filter="url(#filter1204-6-2-9-1-3-1)" font-size="12px" stroke-width="3.77953" text-align="center" text-anchor="middle" style="line-height:1.1" xml:space="preserve"><tspan x="26.959213" y="9.11724" text-align="center">Set</tspan><tspan x="26.959213" y="22.31724" text-align="center">abort</tspan></text>
- <text transform="translate(49.0277 104.823)" x="57.620724" y="16.855087" filter="url(#filter1204)" font-size="3.175px" text-align="center" text-anchor="middle" style="line-height:1.1" xml:space="preserve"><tspan x="57.620724" y="16.855087" text-align="center">Transmission</tspan><tspan x="57.620724" y="20.347588" text-align="center">unsuccesfull</tspan></text>
+ <text transform="translate(49.0277 104.823)" x="57.620724" y="16.855087" filter="url(#filter1204)" font-size="3.175px" text-align="center" text-anchor="middle" style="line-height:1.1" xml:space="preserve"><tspan x="57.620724" y="16.855087" text-align="center">Transmission</tspan><tspan x="57.620724" y="20.347588" text-align="center">unsuccessful</tspan></text>
<g font-size="12px" stroke-width="3.77953" text-anchor="middle">
<text transform="matrix(.264583 0 0 .264583 68.5988 118.913)" x="38.824219" y="9.1171875" filter="url(#filter1204)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="38.824219" y="9.1171875" text-align="center">Transmission</tspan><tspan x="38.824219" y="22.317188" text-align="center">starts</tspan></text>
- <text transform="matrix(.264583 0 0 .264583 106.802 130.509)" x="38.824219" y="9.1171875" filter="url(#filter1204)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="38.824219" y="9.1171875" text-align="center">Transmission</tspan><tspan x="38.824219" y="22.317188" text-align="center">succesfull</tspan></text>
+ <text transform="matrix(.264583 0 0 .264583 106.802 130.509)" x="38.824219" y="9.1171875" filter="url(#filter1204)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="38.824219" y="9.1171875" text-align="center">Transmission</tspan><tspan x="38.824219" y="22.317188" text-align="center">successful</tspan></text>
<text transform="matrix(.264583 0 0 .264583 107.77 145.476)" x="38.824219" y="9.1171875" filter="url(#filter1204)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="38.824219" y="9.1171875" text-align="center">Transmission</tspan><tspan x="38.824219" y="22.317188" text-align="center">sborted</tspan></text>
</g>
<g stroke-width="3.77953" text-anchor="middle">
diff --git a/Documentation/networking/device_drivers/ethernet/3com/vortex.rst b/Documentation/networking/device_drivers/ethernet/3com/vortex.rst
index e89e4192af88..a060f84c4f96 100644
--- a/Documentation/networking/device_drivers/ethernet/3com/vortex.rst
+++ b/Documentation/networking/device_drivers/ethernet/3com/vortex.rst
@@ -254,7 +254,7 @@ Media selection
A number of the older NICs such as the 3c590 and 3c900 series have
10base2 and AUI interfaces.
-Prior to January, 2001 this driver would autoeselect the 10base2 or AUI
+Prior to January, 2001 this driver would autoselect the 10base2 or AUI
port if it didn't detect activity on the 10baseT port. It would then
get stuck on the 10base2 port and a driver reload was necessary to
switch back to 10baseT. This behaviour could not be prevented with a
diff --git a/Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst b/Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst
index 595ddef1c8b3..099280a261be 100644
--- a/Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst
+++ b/Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst
@@ -270,7 +270,7 @@ RX flow rules (ntuple filters)
ethtool -K ethX ntuple <on|off>
- When disabling ntuple filters, all the user programed filters are
+ When disabling ntuple filters, all the user programmed filters are
flushed from the driver cache and hardware. All needed filters must
be re-added when ntuple is re-enabled.
@@ -418,7 +418,7 @@ Default value: 0xFFFF
0 Disable interrupt throttling.
1 Enable interrupt throttling and use specified tx and rx rates.
0xFFFF Auto throttling mode. Driver will choose the best RX and TX
- interrupt throtting settings based on link speed.
+ interrupt throttling settings based on link speed.
====== ==============================================================
aq_itr_tx - TX interrupt throttle rate
@@ -456,7 +456,7 @@ AQ_CFG_RX_PAGEORDER
Default value: 0
-RX page order override. Thats a power of 2 number of RX pages allocated for
+RX page order override. That's a power of 2 number of RX pages allocated for
each descriptor. Received descriptor size is still limited by
AQ_CFG_RX_FRAME_MAX.
diff --git a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
index 1d2f55feca24..e2a36d0d88ef 100644
--- a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
+++ b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
@@ -11,7 +11,7 @@ Overview
--------
The DPAA2 MAC / PHY support consists of a set of APIs that help DPAA2 network
-drivers (dpaa2-eth, dpaa2-ethsw) interract with the PHY library.
+drivers (dpaa2-eth, dpaa2-ethsw) interact with the PHY library.
DPAA2 Software Architecture
---------------------------
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 5196905582c5..392969ac88ad 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -39,7 +39,7 @@ Contents:
intel/ice
marvell/octeontx2
marvell/octeon_ep
- mellanox/mlx5
+ mellanox/mlx5/index
microsoft/netvsc
neterion/s2io
netronome/nfp
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
index b481b81f3be5..5efea4dd1251 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
@@ -901,15 +901,17 @@ To enable/disable UDP Segmentation Offload, issue the following command::
# ethtool -K <ethX> tx-udp-segmentation [off|on]
+
GNSS module
-----------
-Allows user to read messages from the GNSS module and write supported commands.
-If the module is physically present, driver creates 2 TTYs for each supported
-device in /dev, ttyGNSS_<device>:<function>_0 and _1. First one (_0) is RW and
-the second one is RO.
-The protocol of write commands is dependent on the GNSS module as the driver
-writes raw bytes from the TTY to the GNSS i2c. Please refer to the module
-documentation for details.
+Requires kernel compiled with CONFIG_GNSS=y or CONFIG_GNSS=m.
+Allows user to read messages from the GNSS hardware module and write supported
+commands. If the module is physically present, a GNSS device is spawned:
+``/dev/gnss<id>``.
+The protocol of write command is dependent on the GNSS hardware module as the
+driver writes raw bytes by the GNSS object to the receiver through i2c. Please
+refer to the hardware GNSS module documentation for configuration details.
+
Performance Optimization
========================
diff --git a/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst b/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
index dd5cd69467be..5ba9015336e2 100644
--- a/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
+++ b/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
@@ -127,7 +127,7 @@ Type1:
Type2:
- RVU PF0 ie admin function creates these VFs and maps them to loopback block's channels.
- A set of two VFs (VF0 & VF1, VF2 & VF3 .. so on) works as a pair ie pkts sent out of
- VF0 will be received by VF1 and viceversa.
+ VF0 will be received by VF1 and vice versa.
- These VFs can be used by applications or virtual machines to communicate between them
without sending traffic outside. There is no switch present in HW, hence the support
for loopback VFs.
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst
deleted file mode 100644
index 6969652f593c..000000000000
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst
+++ /dev/null
@@ -1,746 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-
-=================================================
-Mellanox ConnectX(R) mlx5 core VPI Network Driver
-=================================================
-
-Copyright (c) 2019, Mellanox Technologies LTD.
-
-Contents
-========
-
-- `Enabling the driver and kconfig options`_
-- `Devlink info`_
-- `Devlink parameters`_
-- `Bridge offload`_
-- `mlx5 subfunction`_
-- `mlx5 function attributes`_
-- `Devlink health reporters`_
-- `mlx5 tracepoints`_
-
-Enabling the driver and kconfig options
-=======================================
-
-| mlx5 core is modular and most of the major mlx5 core driver features can be selected (compiled in/out)
-| at build time via kernel Kconfig flags.
-| Basic features, ethernet net device rx/tx offloads and XDP, are available with the most basic flags
-| CONFIG_MLX5_CORE=y/m and CONFIG_MLX5_CORE_EN=y.
-| For the list of advanced features, please see below.
-
-**CONFIG_MLX5_CORE=(y/m/n)** (module mlx5_core.ko)
-
-| The driver can be enabled by choosing CONFIG_MLX5_CORE=y/m in kernel config.
-| This will provide mlx5 core driver for mlx5 ulps to interface with (mlx5e, mlx5_ib).
-
-
-**CONFIG_MLX5_CORE_EN=(y/n)**
-
-| Choosing this option will allow basic ethernet netdevice support with all of the standard rx/tx offloads.
-| mlx5e is the mlx5 ulp driver which provides netdevice kernel interface, when chosen, mlx5e will be
-| built-in into mlx5_core.ko.
-
-
-**CONFIG_MLX5_EN_ARFS=(y/n)**
-
-| Enables Hardware-accelerated receive flow steering (arfs) support, and ntuple filtering.
-| https://community.mellanox.com/s/article/howto-configure-arfs-on-connectx-4
-
-
-**CONFIG_MLX5_EN_RXNFC=(y/n)**
-
-| Enables ethtool receive network flow classification, which allows user defined
-| flow rules to direct traffic into arbitrary rx queue via ethtool set/get_rxnfc API.
-
-
-**CONFIG_MLX5_CORE_EN_DCB=(y/n)**:
-
-| Enables `Data Center Bridging (DCB) Support <https://community.mellanox.com/s/article/howto-auto-config-pfc-and-ets-on-connectx-4-via-lldp-dcbx>`_.
-
-
-**CONFIG_MLX5_MPFS=(y/n)**
-
-| Ethernet Multi-Physical Function Switch (MPFS) support in ConnectX NIC.
-| MPFs is required for when `Multi-Host <http://www.mellanox.com/page/multihost>`_ configuration is enabled to allow passing
-| user configured unicast MAC addresses to the requesting PF.
-
-
-**CONFIG_MLX5_ESWITCH=(y/n)**
-
-| Ethernet SRIOV E-Switch support in ConnectX NIC. E-Switch provides internal SRIOV packet steering
-| and switching for the enabled VFs and PF in two available modes:
-| 1) `Legacy SRIOV mode (L2 mac vlan steering based) <https://community.mellanox.com/s/article/howto-configure-sr-iov-for-connectx-4-connectx-5-with-kvm--ethernet-x>`_.
-| 2) `Switchdev mode (eswitch offloads) <https://www.mellanox.com/related-docs/prod_software/ASAP2_Hardware_Offloading_for_vSwitches_User_Manual_v4.4.pdf>`_.
-
-
-**CONFIG_MLX5_CORE_IPOIB=(y/n)**
-
-| IPoIB offloads & acceleration support.
-| Requires CONFIG_MLX5_CORE_EN to provide an accelerated interface for the rdma
-| IPoIB ulp netdevice.
-
-
-**CONFIG_MLX5_FPGA=(y/n)**
-
-| Build support for the Innova family of network cards by Mellanox Technologies.
-| Innova network cards are comprised of a ConnectX chip and an FPGA chip on one board.
-| If you select this option, the mlx5_core driver will include the Innova FPGA core and allow
-| building sandbox-specific client drivers.
-
-
-**CONFIG_MLX5_EN_IPSEC=(y/n)**
-
-| Enables `IPSec XFRM cryptography-offload acceleration <http://www.mellanox.com/related-docs/prod_software/Mellanox_Innova_IPsec_Ethernet_Adapter_Card_User_Manual.pdf>`_.
-
-**CONFIG_MLX5_EN_TLS=(y/n)**
-
-| TLS cryptography-offload acceleration.
-
-
-**CONFIG_MLX5_INFINIBAND=(y/n/m)** (module mlx5_ib.ko)
-
-| Provides low-level InfiniBand/RDMA and `RoCE <https://community.mellanox.com/s/article/recommended-network-configuration-examples-for-roce-deployment>`_ support.
-
-**CONFIG_MLX5_SF=(y/n)**
-
-| Build support for subfunction.
-| Subfunctons are more light weight than PCI SRIOV VFs. Choosing this option
-| will enable support for creating subfunction devices.
-
-**External options** ( Choose if the corresponding mlx5 feature is required )
-
-- CONFIG_PTP_1588_CLOCK: When chosen, mlx5 ptp support will be enabled
-- CONFIG_VXLAN: When chosen, mlx5 vxlan support will be enabled.
-- CONFIG_MLXFW: When chosen, mlx5 firmware flashing support will be enabled (via devlink and ethtool).
-
-Devlink info
-============
-
-The devlink info reports the running and stored firmware versions on device.
-It also prints the device PSID which represents the HCA board type ID.
-
-User command example::
-
- $ devlink dev info pci/0000:00:06.0
- pci/0000:00:06.0:
- driver mlx5_core
- versions:
- fixed:
- fw.psid MT_0000000009
- running:
- fw.version 16.26.0100
- stored:
- fw.version 16.26.0100
-
-Devlink parameters
-==================
-
-flow_steering_mode: Device flow steering mode
----------------------------------------------
-The flow steering mode parameter controls the flow steering mode of the driver.
-Two modes are supported:
-1. 'dmfs' - Device managed flow steering.
-2. 'smfs' - Software/Driver managed flow steering.
-
-In DMFS mode, the HW steering entities are created and managed through the
-Firmware.
-In SMFS mode, the HW steering entities are created and managed though by
-the driver directly into hardware without firmware intervention.
-
-SMFS mode is faster and provides better rule insertion rate compared to default DMFS mode.
-
-User command examples:
-
-- Set SMFS flow steering mode::
-
- $ devlink dev param set pci/0000:06:00.0 name flow_steering_mode value "smfs" cmode runtime
-
-- Read device flow steering mode::
-
- $ devlink dev param show pci/0000:06:00.0 name flow_steering_mode
- pci/0000:06:00.0:
- name flow_steering_mode type driver-specific
- values:
- cmode runtime value smfs
-
-enable_roce: RoCE enablement state
-----------------------------------
-RoCE enablement state controls driver support for RoCE traffic.
-When RoCE is disabled, there is no gid table, only raw ethernet QPs are supported and traffic on the well-known UDP RoCE port is handled as raw ethernet traffic.
-
-To change RoCE enablement state, a user must change the driverinit cmode value and run devlink reload.
-
-User command examples:
-
-- Disable RoCE::
-
- $ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
- $ devlink dev reload pci/0000:06:00.0
-
-- Read RoCE enablement state::
-
- $ devlink dev param show pci/0000:06:00.0 name enable_roce
- pci/0000:06:00.0:
- name enable_roce type generic
- values:
- cmode driverinit value true
-
-esw_port_metadata: Eswitch port metadata state
-----------------------------------------------
-When applicable, disabling eswitch metadata can increase packet rate
-up to 20% depending on the use case and packet sizes.
-
-Eswitch port metadata state controls whether to internally tag packets with
-metadata. Metadata tagging must be enabled for multi-port RoCE, failover
-between representors and stacked devices.
-By default metadata is enabled on the supported devices in E-switch.
-Metadata is applicable only for E-switch in switchdev mode and
-users may disable it when NONE of the below use cases will be in use:
-1. HCA is in Dual/multi-port RoCE mode.
-2. VF/SF representor bonding (Usually used for Live migration)
-3. Stacked devices
-
-When metadata is disabled, the above use cases will fail to initialize if
-users try to enable them.
-
-- Show eswitch port metadata::
-
- $ devlink dev param show pci/0000:06:00.0 name esw_port_metadata
- pci/0000:06:00.0:
- name esw_port_metadata type driver-specific
- values:
- cmode runtime value true
-
-- Disable eswitch port metadata::
-
- $ devlink dev param set pci/0000:06:00.0 name esw_port_metadata value false cmode runtime
-
-- Change eswitch mode to switchdev mode where after choosing the metadata value::
-
- $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
-
-Bridge offload
-==============
-The mlx5 driver implements support for offloading bridge rules when in switchdev
-mode. Linux bridge FDBs are automatically offloaded when mlx5 switchdev
-representor is attached to bridge.
-
-- Change device to switchdev mode::
-
- $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
-
-- Attach mlx5 switchdev representor 'enp8s0f0' to bridge netdev 'bridge1'::
-
- $ ip link set enp8s0f0 master bridge1
-
-VLANs
------
-Following bridge VLAN functions are supported by mlx5:
-
-- VLAN filtering (including multiple VLANs per port)::
-
- $ ip link set bridge1 type bridge vlan_filtering 1
- $ bridge vlan add dev enp8s0f0 vid 2-3
-
-- VLAN push on bridge ingress::
-
- $ bridge vlan add dev enp8s0f0 vid 3 pvid
-
-- VLAN pop on bridge egress::
-
- $ bridge vlan add dev enp8s0f0 vid 3 untagged
-
-mlx5 subfunction
-================
-mlx5 supports subfunction management using devlink port (see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface.
-
-A subfunction has its own function capabilities and its own resources. This
-means a subfunction has its own dedicated queues (txq, rxq, cq, eq). These
-queues are neither shared nor stolen from the parent PCI function.
-
-When a subfunction is RDMA capable, it has its own QP1, GID table, and RDMA
-resources neither shared nor stolen from the parent PCI function.
-
-A subfunction has a dedicated window in PCI BAR space that is not shared
-with the other subfunctions or the parent PCI function. This ensures that all
-devices (netdev, rdma, vdpa, etc.) of the subfunction accesses only assigned
-PCI BAR space.
-
-A subfunction supports eswitch representation through which it supports tc
-offloads. The user configures eswitch to send/receive packets from/to
-the subfunction port.
-
-Subfunctions share PCI level resources such as PCI MSI-X IRQs with
-other subfunctions and/or with its parent PCI function.
-
-Example mlx5 software, system, and device view::
-
- _______
- | admin |
- | user |----------
- |_______| |
- | |
- ____|____ __|______ _________________
- | | | | | |
- | devlink | | tc tool | | user |
- | tool | |_________| | applications |
- |_________| | |_________________|
- | | | |
- | | | | Userspace
- +---------|-------------|-------------------|----------|--------------------+
- | | +----------+ +----------+ Kernel
- | | | netdev | | rdma dev |
- | | +----------+ +----------+
- (devlink port add/del | ^ ^
- port function set) | | |
- | | +---------------|
- _____|___ | | _______|_______
- | | | | | mlx5 class |
- | devlink | +------------+ | | drivers |
- | kernel | | rep netdev | | |(mlx5_core,ib) |
- |_________| +------------+ | |_______________|
- | | | ^
- (devlink ops) | | (probe/remove)
- _________|________ | | ____|________
- | subfunction | | +---------------+ | subfunction |
- | management driver|----- | subfunction |---| driver |
- | (mlx5_core) | | auxiliary dev | | (mlx5_core) |
- |__________________| +---------------+ |_____________|
- | ^
- (sf add/del, vhca events) |
- | (device add/del)
- _____|____ ____|________
- | | | subfunction |
- | PCI NIC |--- activate/deactivate events--->| host driver |
- |__________| | (mlx5_core) |
- |_____________|
-
-Subfunction is created using devlink port interface.
-
-- Change device to switchdev mode::
-
- $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
-
-- Add a devlink port of subfunction flavour::
-
- $ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
- pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
- function:
- hw_addr 00:00:00:00:00:00 state inactive opstate detached
-
-- Show a devlink port of the subfunction::
-
- $ devlink port show pci/0000:06:00.0/32768
- pci/0000:06:00.0/32768: type eth netdev enp6s0pf0sf88 flavour pcisf pfnum 0 sfnum 88
- function:
- hw_addr 00:00:00:00:00:00 state inactive opstate detached
-
-- Delete a devlink port of subfunction after use::
-
- $ devlink port del pci/0000:06:00.0/32768
-
-mlx5 function attributes
-========================
-The mlx5 driver provides a mechanism to setup PCI VF/SF function attributes in
-a unified way for SmartNIC and non-SmartNIC.
-
-This is supported only when the eswitch mode is set to switchdev. Port function
-configuration of the PCI VF/SF is supported through devlink eswitch port.
-
-Port function attributes should be set before PCI VF/SF is enumerated by the
-driver.
-
-MAC address setup
------------------
-mlx5 driver support devlink port function attr mechanism to setup MAC
-address. (refer to Documentation/networking/devlink/devlink-port.rst)
-
-RoCE capability setup
----------------------
-Not all mlx5 PCI devices/SFs require RoCE capability.
-
-When RoCE capability is disabled, it saves 1 Mbytes worth of system memory per
-PCI devices/SF.
-
-mlx5 driver support devlink port function attr mechanism to setup RoCE
-capability. (refer to Documentation/networking/devlink/devlink-port.rst)
-
-migratable capability setup
----------------------------
-User who wants mlx5 PCI VFs to be able to perform live migration need to
-explicitly enable the VF migratable capability.
-
-mlx5 driver support devlink port function attr mechanism to setup migratable
-capability. (refer to Documentation/networking/devlink/devlink-port.rst)
-
-SF state setup
---------------
-To use the SF, the user must activate the SF using the SF function state
-attribute.
-
-- Get the state of the SF identified by its unique devlink port index::
-
- $ devlink port show ens2f0npf0sf88
- pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
- function:
- hw_addr 00:00:00:00:88:88 state inactive opstate detached
-
-- Activate the function and verify its state is active::
-
- $ devlink port function set ens2f0npf0sf88 state active
-
- $ devlink port show ens2f0npf0sf88
- pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
- function:
- hw_addr 00:00:00:00:88:88 state active opstate detached
-
-Upon function activation, the PF driver instance gets the event from the device
-that a particular SF was activated. It's the cue to put the device on bus, probe
-it and instantiate the devlink instance and class specific auxiliary devices
-for it.
-
-- Show the auxiliary device and port of the subfunction::
-
- $ devlink dev show
- devlink dev show auxiliary/mlx5_core.sf.4
-
- $ devlink port show auxiliary/mlx5_core.sf.4/1
- auxiliary/mlx5_core.sf.4/1: type eth netdev p0sf88 flavour virtual port 0 splittable false
-
- $ rdma link show mlx5_0/1
- link mlx5_0/1 state ACTIVE physical_state LINK_UP netdev p0sf88
-
- $ rdma dev show
- 8: rocep6s0f1: node_type ca fw 16.29.0550 node_guid 248a:0703:00b3:d113 sys_image_guid 248a:0703:00b3:d112
- 13: mlx5_0: node_type ca fw 16.29.0550 node_guid 0000:00ff:fe00:8888 sys_image_guid 248a:0703:00b3:d112
-
-- Subfunction auxiliary device and class device hierarchy::
-
- mlx5_core.sf.4
- (subfunction auxiliary device)
- /\
- / \
- / \
- / \
- / \
- mlx5_core.eth.4 mlx5_core.rdma.4
- (sf eth aux dev) (sf rdma aux dev)
- | |
- | |
- p0sf88 mlx5_0
- (sf netdev) (sf rdma device)
-
-Additionally, the SF port also gets the event when the driver attaches to the
-auxiliary device of the subfunction. This results in changing the operational
-state of the function. This provides visibility to the user to decide when is it
-safe to delete the SF port for graceful termination of the subfunction.
-
-- Show the SF port operational state::
-
- $ devlink port show ens2f0npf0sf88
- pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
- function:
- hw_addr 00:00:00:00:88:88 state active opstate attached
-
-Devlink health reporters
-========================
-
-tx reporter
------------
-The tx reporter is responsible for reporting and recovering of the following two error scenarios:
-
-- tx timeout
- Report on kernel tx timeout detection.
- Recover by searching lost interrupts.
-- tx error completion
- Report on error tx completion.
- Recover by flushing the tx queue and reset it.
-
-tx reporter also support on demand diagnose callback, on which it provides
-real time information of its send queues status.
-
-User commands examples:
-
-- Diagnose send queues status::
-
- $ devlink health diagnose pci/0000:82:00.0 reporter tx
-
-NOTE: This command has valid output only when interface is up, otherwise the command has empty output.
-
-- Show number of tx errors indicated, number of recover flows ended successfully,
- is autorecover enabled and graceful period from last recover::
-
- $ devlink health show pci/0000:82:00.0 reporter tx
-
-rx reporter
------------
-The rx reporter is responsible for reporting and recovering of the following two error scenarios:
-
-- rx queues' initialization (population) timeout
- Population of rx queues' descriptors on ring initialization is done
- in napi context via triggering an irq. In case of a failure to get
- the minimum amount of descriptors, a timeout would occur, and
- descriptors could be recovered by polling the EQ (Event Queue).
-- rx completions with errors (reported by HW on interrupt context)
- Report on rx completion error.
- Recover (if needed) by flushing the related queue and reset it.
-
-rx reporter also supports on demand diagnose callback, on which it
-provides real time information of its receive queues' status.
-
-- Diagnose rx queues' status and corresponding completion queue::
-
- $ devlink health diagnose pci/0000:82:00.0 reporter rx
-
-NOTE: This command has valid output only when interface is up. Otherwise, the command has empty output.
-
-- Show number of rx errors indicated, number of recover flows ended successfully,
- is autorecover enabled, and graceful period from last recover::
-
- $ devlink health show pci/0000:82:00.0 reporter rx
-
-fw reporter
------------
-The fw reporter implements `diagnose` and `dump` callbacks.
-It follows symptoms of fw error such as fw syndrome by triggering
-fw core dump and storing it into the dump buffer.
-The fw reporter diagnose command can be triggered any time by the user to check
-current fw status.
-
-User commands examples:
-
-- Check fw heath status::
-
- $ devlink health diagnose pci/0000:82:00.0 reporter fw
-
-- Read FW core dump if already stored or trigger new one::
-
- $ devlink health dump show pci/0000:82:00.0 reporter fw
-
-NOTE: This command can run only on the PF which has fw tracer ownership,
-running it on other PF or any VF will return "Operation not permitted".
-
-fw fatal reporter
------------------
-The fw fatal reporter implements `dump` and `recover` callbacks.
-It follows fatal errors indications by CR-space dump and recover flow.
-The CR-space dump uses vsc interface which is valid even if the FW command
-interface is not functional, which is the case in most FW fatal errors.
-The recover function runs recover flow which reloads the driver and triggers fw
-reset if needed.
-On firmware error, the health buffer is dumped into the dmesg. The log
-level is derived from the error's severity (given in health buffer).
-
-User commands examples:
-
-- Run fw recover flow manually::
-
- $ devlink health recover pci/0000:82:00.0 reporter fw_fatal
-
-- Read FW CR-space dump if already stored or trigger new one::
-
- $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
-
-NOTE: This command can run only on PF.
-
-mlx5 tracepoints
-================
-
-mlx5 driver provides internal tracepoints for tracking and debugging using
-kernel tracepoints interfaces (refer to Documentation/trace/ftrace.rst).
-
-For the list of support mlx5 events, check `/sys/kernel/debug/tracing/events/mlx5/`.
-
-tc and eswitch offloads tracepoints:
-
-- mlx5e_configure_flower: trace flower filter actions and cookies offloaded to mlx5::
-
- $ echo mlx5:mlx5e_configure_flower >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- tc-6535 [019] ...1 2672.404466: mlx5e_configure_flower: cookie=0000000067874a55 actions= REDIRECT
-
-- mlx5e_delete_flower: trace flower filter actions and cookies deleted from mlx5::
-
- $ echo mlx5:mlx5e_delete_flower >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- tc-6569 [010] .N.1 2686.379075: mlx5e_delete_flower: cookie=0000000067874a55 actions= NULL
-
-- mlx5e_stats_flower: trace flower stats request::
-
- $ echo mlx5:mlx5e_stats_flower >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- tc-6546 [010] ...1 2679.704889: mlx5e_stats_flower: cookie=0000000060eb3d6a bytes=0 packets=0 lastused=4295560217
-
-- mlx5e_tc_update_neigh_used_value: trace tunnel rule neigh update value offloaded to mlx5::
-
- $ echo mlx5:mlx5e_tc_update_neigh_used_value >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- kworker/u48:4-8806 [009] ...1 55117.882428: mlx5e_tc_update_neigh_used_value: netdev: ens1f0 IPv4: 1.1.1.10 IPv6: ::ffff:1.1.1.10 neigh_used=1
-
-- mlx5e_rep_neigh_update: trace neigh update tasks scheduled due to neigh state change events::
-
- $ echo mlx5:mlx5e_rep_neigh_update >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- kworker/u48:7-2221 [009] ...1 1475.387435: mlx5e_rep_neigh_update: netdev: ens1f0 MAC: 24:8a:07:9a:17:9a IPv4: 1.1.1.10 IPv6: ::ffff:1.1.1.10 neigh_connected=1
-
-Bridge offloads tracepoints:
-
-- mlx5_esw_bridge_fdb_entry_init: trace bridge FDB entry offloaded to mlx5::
-
- $ echo mlx5:mlx5_esw_bridge_fdb_entry_init >> set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- kworker/u20:9-2217 [003] ...1 318.582243: mlx5_esw_bridge_fdb_entry_init: net_device=enp8s0f0_0 addr=e4:fd:05:08:00:02 vid=0 flags=0 used=0
-
-- mlx5_esw_bridge_fdb_entry_cleanup: trace bridge FDB entry deleted from mlx5::
-
- $ echo mlx5:mlx5_esw_bridge_fdb_entry_cleanup >> set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- ip-2581 [005] ...1 318.629871: mlx5_esw_bridge_fdb_entry_cleanup: net_device=enp8s0f0_1 addr=e4:fd:05:08:00:03 vid=0 flags=0 used=16
-
-- mlx5_esw_bridge_fdb_entry_refresh: trace bridge FDB entry offload refreshed in
- mlx5::
-
- $ echo mlx5:mlx5_esw_bridge_fdb_entry_refresh >> set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- kworker/u20:8-3849 [003] ...1 466716: mlx5_esw_bridge_fdb_entry_refresh: net_device=enp8s0f0_0 addr=e4:fd:05:08:00:02 vid=3 flags=0 used=0
-
-- mlx5_esw_bridge_vlan_create: trace bridge VLAN object add on mlx5
- representor::
-
- $ echo mlx5:mlx5_esw_bridge_vlan_create >> set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- ip-2560 [007] ...1 318.460258: mlx5_esw_bridge_vlan_create: vid=1 flags=6
-
-- mlx5_esw_bridge_vlan_cleanup: trace bridge VLAN object delete from mlx5
- representor::
-
- $ echo mlx5:mlx5_esw_bridge_vlan_cleanup >> set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- bridge-2582 [007] ...1 318.653496: mlx5_esw_bridge_vlan_cleanup: vid=2 flags=8
-
-- mlx5_esw_bridge_vport_init: trace mlx5 vport assigned with bridge upper
- device::
-
- $ echo mlx5:mlx5_esw_bridge_vport_init >> set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- ip-2560 [007] ...1 318.458915: mlx5_esw_bridge_vport_init: vport_num=1
-
-- mlx5_esw_bridge_vport_cleanup: trace mlx5 vport removed from bridge upper
- device::
-
- $ echo mlx5:mlx5_esw_bridge_vport_cleanup >> set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- ip-5387 [000] ...1 573713: mlx5_esw_bridge_vport_cleanup: vport_num=1
-
-Eswitch QoS tracepoints:
-
-- mlx5_esw_vport_qos_create: trace creation of transmit scheduler arbiter for vport::
-
- $ echo mlx5:mlx5_esw_vport_qos_create >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- <...>-23496 [018] .... 73136.838831: mlx5_esw_vport_qos_create: (0000:82:00.0) vport=2 tsar_ix=4 bw_share=0, max_rate=0 group=000000007b576bb3
-
-- mlx5_esw_vport_qos_config: trace configuration of transmit scheduler arbiter for vport::
-
- $ echo mlx5:mlx5_esw_vport_qos_config >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- <...>-26548 [023] .... 75754.223823: mlx5_esw_vport_qos_config: (0000:82:00.0) vport=1 tsar_ix=3 bw_share=34, max_rate=10000 group=000000007b576bb3
-
-- mlx5_esw_vport_qos_destroy: trace deletion of transmit scheduler arbiter for vport::
-
- $ echo mlx5:mlx5_esw_vport_qos_destroy >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- <...>-27418 [004] .... 76546.680901: mlx5_esw_vport_qos_destroy: (0000:82:00.0) vport=1 tsar_ix=3
-
-- mlx5_esw_group_qos_create: trace creation of transmit scheduler arbiter for rate group::
-
- $ echo mlx5:mlx5_esw_group_qos_create >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- <...>-26578 [008] .... 75776.022112: mlx5_esw_group_qos_create: (0000:82:00.0) group=000000008dac63ea tsar_ix=5
-
-- mlx5_esw_group_qos_config: trace configuration of transmit scheduler arbiter for rate group::
-
- $ echo mlx5:mlx5_esw_group_qos_config >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- <...>-27303 [020] .... 76461.455356: mlx5_esw_group_qos_config: (0000:82:00.0) group=000000008dac63ea tsar_ix=5 bw_share=100 max_rate=20000
-
-- mlx5_esw_group_qos_destroy: trace deletion of transmit scheduler arbiter for group::
-
- $ echo mlx5:mlx5_esw_group_qos_destroy >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- <...>-27418 [006] .... 76547.187258: mlx5_esw_group_qos_destroy: (0000:82:00.0) group=000000007b576bb3 tsar_ix=1
-
-SF tracepoints:
-
-- mlx5_sf_add: trace addition of the SF port::
-
- $ echo mlx5:mlx5_sf_add >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- devlink-9363 [031] ..... 24610.188722: mlx5_sf_add: (0000:06:00.0) port_index=32768 controller=0 hw_id=0x8000 sfnum=88
-
-- mlx5_sf_free: trace freeing of the SF port::
-
- $ echo mlx5:mlx5_sf_free >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- devlink-9830 [038] ..... 26300.404749: mlx5_sf_free: (0000:06:00.0) port_index=32768 controller=0 hw_id=0x8000
-
-- mlx5_sf_hwc_alloc: trace allocating of the hardware SF context::
-
- $ echo mlx5:mlx5_sf_hwc_alloc >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- devlink-9775 [031] ..... 26296.385259: mlx5_sf_hwc_alloc: (0000:06:00.0) controller=0 hw_id=0x8000 sfnum=88
-
-- mlx5_sf_hwc_free: trace freeing of the hardware SF context::
-
- $ echo mlx5:mlx5_sf_hwc_free >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- kworker/u128:3-9093 [046] ..... 24625.365771: mlx5_sf_hwc_free: (0000:06:00.0) hw_id=0x8000
-
-- mlx5_sf_hwc_deferred_free : trace deferred freeing of the hardware SF context::
-
- $ echo mlx5:mlx5_sf_hwc_deferred_free >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- devlink-9519 [046] ..... 24624.400271: mlx5_sf_hwc_deferred_free: (0000:06:00.0) hw_id=0x8000
-
-- mlx5_sf_vhca_event: trace SF vhca event and state::
-
- $ echo mlx5:mlx5_sf_vhca_event >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- kworker/u128:3-9093 [046] ..... 24625.365525: mlx5_sf_vhca_event: (0000:06:00.0) hw_id=0x8000 sfnum=88 vhca_state=1
-
-- mlx5_sf_dev_add : trace SF device add event::
-
- $ echo mlx5:mlx5_sf_dev_add>> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- kworker/u128:3-9093 [000] ..... 24616.524495: mlx5_sf_dev_add: (0000:06:00.0) sfdev=00000000fc5d96fd aux_id=4 hw_id=0x8000 sfnum=88
-
-- mlx5_sf_dev_del : trace SF device delete event::
-
- $ echo mlx5:mlx5_sf_dev_del >> /sys/kernel/debug/tracing/set_event
- $ cat /sys/kernel/debug/tracing/trace
- ...
- kworker/u128:3-9093 [044] ..... 24624.400749: mlx5_sf_dev_del: (0000:06:00.0) sfdev=00000000fc5d96fd aux_id=4 hw_id=0x8000 sfnum=88
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
new file mode 100644
index 000000000000..4cd8e869762b
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
@@ -0,0 +1,1302 @@
+.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+.. include:: <isonum.txt>
+
+================
+Ethtool counters
+================
+
+:Copyright: |copy| 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+Contents
+========
+
+- `Overview`_
+- `Groups`_
+- `Types`_
+- `Descriptions`_
+
+Overview
+========
+
+There are several counter groups based on where the counter is being counted. In
+addition, each group of counters may have different counter types.
+
+These counter groups are based on which component in a networking setup,
+illustrated below, that they describe::
+
+ ----------------------------------------
+ | |
+ ---------------------------------------- ---------------------------------------- |
+ | Hypervisor | | VM | |
+ | | | | |
+ | ------------------- --------------- | | ------------------- --------------- | |
+ | | Ethernet driver | | RDMA driver | | | | Ethernet driver | | RDMA driver | | |
+ | ------------------- --------------- | | ------------------- --------------- | |
+ | | | | | | | | |
+ | ------------------- | | ------------------- | |
+ | | | | | |--
+ ---------------------------------------- ----------------------------------------
+ | |
+ ------------- -----------------------------
+ | |
+ ------ ------ ------ ------ ------ ------ ------
+ -----| PF |----------------------| VF |-| VF |-| VF |----- --| PF |--- --| PF |--- --| PF |---
+ | ------ ------ ------ ------ | | ------ | | ------ | | ------ |
+ | | | | | | | |
+ | | | | | | | |
+ | | | | | | | |
+ | eSwitch | | eSwitch | | eSwitch | | eSwitch |
+ ---------------------------------------------------------- ----------- ----------- -----------
+ -------------------------------------------------------------------------------
+ | |
+ | |
+ | Uplink (no counters) |
+ -------------------------------------------------------------------------------
+ ---------------------------------------------------------------
+ | |
+ | |
+ | MPFS (no counters) |
+ ---------------------------------------------------------------
+ |
+ |
+ | Port
+
+Groups
+======
+
+Ring
+ Software counters populated by the driver stack.
+
+Netdev
+ An aggregation of software ring counters.
+
+vPort counters
+ Traffic counters and drops due to steering or no buffers. May indicate issues
+ with NIC. These counters include Ethernet traffic counters (including Raw
+ Ethernet) and RDMA/RoCE traffic counters.
+
+Physical port counters
+ Counters that collect statistics about the PFs and VFs. May indicate issues
+ with NIC, link, or network. This measuring point holds information on
+ standardized counters like IEEE 802.3, RFC2863, RFC 2819, RFC 3635 and
+ additional counters like flow control, FEC and more. Physical port counters
+ are not exposed to virtual machines.
+
+Priority Port Counters
+ A set of the physical port counters, per priority per port.
+
+Types
+=====
+
+Counters are divided into three types.
+
+Traffic Informative Counters
+ Counters which count traffic. These counters can be used for load estimation
+ or for general debug.
+
+Traffic Acceleration Counters
+ Counters which count traffic that was accelerated by Mellanox driver or by
+ hardware. The counters are an additional layer to the informative counter set,
+ and the same traffic is counted in both informative and acceleration counters.
+
+.. [#accel] Traffic acceleration counter.
+
+Error Counters
+ Increment of these counters might indicate a problem. Each of these counters
+ has an explanation and correction action.
+
+Statistic can be fetched via the `ip link` or `ethtool` commands. `ethtool`
+provides more detailed information.::
+
+ ip –s link show <if-name>
+ ethtool -S <if-name>
+
+Descriptions
+============
+
+XSK, PTP, and QoS counters that are similar to counters defined previously will
+not be separately listed. For example, `ptp_tx[i]_packets` will not be
+explicitly documented since `tx[i]_packets` describes the behavior of both
+counters, except `ptp_tx[i]_packets` is only counted when precision time
+protocol is used.
+
+Ring / Netdev Counter
+----------------------------
+The following counters are available per ring or software port.
+
+These counters provide information on the amount of traffic that was accelerated
+by the NIC. The counters are counting the accelerated traffic in addition to the
+standard counters which counts it (i.e. accelerated traffic is counted twice).
+
+The counter names in the table below refers to both ring and port counters. The
+notation for ring counters includes the [i] index without the braces. The
+notation for port counters doesn't include the [i]. A counter name
+`rx[i]_packets` will be printed as `rx0_packets` for ring 0 and `rx_packets` for
+the software port.
+
+.. flat-table:: Ring / Software Port Counter Table
+ :widths: 2 3 1
+
+ * - Counter
+ - Description
+ - Type
+
+ * - `rx[i]_packets`
+ - The number of packets received on ring i.
+ - Informative
+
+ * - `rx[i]_bytes`
+ - The number of bytes received on ring i.
+ - Informative
+
+ * - `tx[i]_packets`
+ - The number of packets transmitted on ring i.
+ - Informative
+
+ * - `tx[i]_bytes`
+ - The number of bytes transmitted on ring i.
+ - Informative
+
+ * - `tx[i]_recover`
+ - The number of times the SQ was recovered.
+ - Error
+
+ * - `tx[i]_cqes`
+ - Number of CQEs events on SQ issued on ring i.
+ - Informative
+
+ * - `tx[i]_cqe_err`
+ - The number of error CQEs encountered on the SQ for ring i.
+ - Error
+
+ * - `tx[i]_tso_packets`
+ - The number of TSO packets transmitted on ring i [#accel]_.
+ - Acceleration
+
+ * - `tx[i]_tso_bytes`
+ - The number of TSO bytes transmitted on ring i [#accel]_.
+ - Acceleration
+
+ * - `tx[i]_tso_inner_packets`
+ - The number of TSO packets which are indicated to be carry internal
+ encapsulation transmitted on ring i [#accel]_.
+ - Acceleration
+
+ * - `tx[i]_tso_inner_bytes`
+ - The number of TSO bytes which are indicated to be carry internal
+ encapsulation transmitted on ring i [#accel]_.
+ - Acceleration
+
+ * - `rx[i]_gro_packets`
+ - Number of received packets processed using hardware-accelerated GRO. The
+ number of hardware GRO offloaded packets received on ring i.
+ - Acceleration
+
+ * - `rx[i]_gro_bytes`
+ - Number of received bytes processed using hardware-accelerated GRO. The
+ number of hardware GRO offloaded bytes received on ring i.
+ - Acceleration
+
+ * - `rx[i]_gro_skbs`
+ - The number of receive SKBs constructed while performing
+ hardware-accelerated GRO.
+ - Informative
+
+ * - `rx[i]_gro_match_packets`
+ - Number of received packets processed using hardware-accelerated GRO that
+ met the flow table match criteria.
+ - Informative
+
+ * - `rx[i]_gro_large_hds`
+ - Number of receive packets using hardware-accelerated GRO that have large
+ headers that require additional memory to be allocated.
+ - Informative
+
+ * - `rx[i]_lro_packets`
+ - The number of LRO packets received on ring i [#accel]_.
+ - Acceleration
+
+ * - `rx[i]_lro_bytes`
+ - The number of LRO bytes received on ring i [#accel]_.
+ - Acceleration
+
+ * - `rx[i]_ecn_mark`
+ - The number of received packets where the ECN mark was turned on.
+ - Informative
+
+ * - `rx_oversize_pkts_buffer`
+ - The number of dropped received packets due to length which arrived to RQ
+ and exceed software buffer size allocated by the device for incoming
+ traffic. It might imply that the device MTU is larger than the software
+ buffers size.
+ - Error
+
+ * - `rx_oversize_pkts_sw_drop`
+ - Number of received packets dropped in software because the CQE data is
+ larger than the MTU size.
+ - Error
+
+ * - `rx[i]_csum_unnecessary`
+ - Packets received with a `CHECKSUM_UNNECESSARY` on ring i [#accel]_.
+ - Acceleration
+
+ * - `rx[i]_csum_unnecessary_inner`
+ - Packets received with inner encapsulation with a `CHECKSUM_UNNECESSARY`
+ on ring i [#accel]_.
+ - Acceleration
+
+ * - `rx[i]_csum_none`
+ - Packets received with a `CHECKSUM_NONE` on ring i [#accel]_.
+ - Acceleration
+
+ * - `rx[i]_csum_complete`
+ - Packets received with a `CHECKSUM_COMPLETE` on ring i [#accel]_.
+ - Acceleration
+
+ * - `rx[i]_csum_complete_tail`
+ - Number of received packets that had checksum calculation computed,
+ potentially needed padding, and were able to do so with
+ `CHECKSUM_PARTIAL`.
+ - Informative
+
+ * - `rx[i]_csum_complete_tail_slow`
+ - Number of received packets that need padding larger than eight bytes for
+ the checksum.
+ - Informative
+
+ * - `tx[i]_csum_partial`
+ - Packets transmitted with a `CHECKSUM_PARTIAL` on ring i [#accel]_.
+ - Acceleration
+
+ * - `tx[i]_csum_partial_inner`
+ - Packets transmitted with inner encapsulation with a `CHECKSUM_PARTIAL` on
+ ring i [#accel]_.
+ - Acceleration
+
+ * - `tx[i]_csum_none`
+ - Packets transmitted with no hardware checksum acceleration on ring i.
+ - Informative
+
+ * - `tx[i]_stopped` / `tx_queue_stopped` [#ring_global]_
+ - Events where SQ was full on ring i. If this counter is increased, check
+ the amount of buffers allocated for transmission.
+ - Informative
+
+ * - `tx[i]_wake` / `tx_queue_wake` [#ring_global]_
+ - Events where SQ was full and has become not full on ring i.
+ - Informative
+
+ * - `tx[i]_dropped` / `tx_queue_dropped` [#ring_global]_
+ - Packets transmitted that were dropped due to DMA mapping failure on
+ ring i. If this counter is increased, check the amount of buffers
+ allocated for transmission.
+ - Error
+
+ * - `tx[i]_nop`
+ - The number of nop WQEs (empty WQEs) inserted to the SQ (related to
+ ring i) due to the reach of the end of the cyclic buffer. When reaching
+ near to the end of cyclic buffer the driver may add those empty WQEs to
+ avoid handling a state the a WQE start in the end of the queue and ends
+ in the beginning of the queue. This is a normal condition.
+ - Informative
+
+ * - `tx[i]_added_vlan_packets`
+ - The number of packets sent where vlan tag insertion was offloaded to the
+ hardware.
+ - Acceleration
+
+ * - `rx[i]_removed_vlan_packets`
+ - The number of packets received where vlan tag stripping was offloaded to
+ the hardware.
+ - Acceleration
+
+ * - `rx[i]_wqe_err`
+ - The number of wrong opcodes received on ring i.
+ - Error
+
+ * - `rx[i]_mpwqe_frag`
+ - The number of WQEs that failed to allocate compound page and hence
+ fragmented MPWQE’s (Multi Packet WQEs) were used on ring i. If this
+ counter raise, it may suggest that there is no enough memory for large
+ pages, the driver allocated fragmented pages. This is not abnormal
+ condition.
+ - Informative
+
+ * - `rx[i]_mpwqe_filler_cqes`
+ - The number of filler CQEs events that were issued on ring i.
+ - Informative
+
+ * - `rx[i]_mpwqe_filler_strides`
+ - The number of strides consumed by filler CQEs on ring i.
+ - Informative
+
+ * - `tx[i]_mpwqe_blks`
+ - The number of send blocks processed from Multi-Packet WQEs (mpwqe).
+ - Informative
+
+ * - `tx[i]_mpwqe_pkts`
+ - The number of send packets processed from Multi-Packet WQEs (mpwqe).
+ - Informative
+
+ * - `rx[i]_cqe_compress_blks`
+ - The number of receive blocks with CQE compression on ring i [#accel]_.
+ - Acceleration
+
+ * - `rx[i]_cqe_compress_pkts`
+ - The number of receive packets with CQE compression on ring i [#accel]_.
+ - Acceleration
+
+ * - `rx[i]_cache_reuse`
+ - The number of events of successful reuse of a page from a driver's
+ internal page cache.
+ - Acceleration
+
+ * - `rx[i]_cache_full`
+ - The number of events of full internal page cache where driver can't put a
+ page back to the cache for recycling (page will be freed).
+ - Acceleration
+
+ * - `rx[i]_cache_empty`
+ - The number of events where cache was empty - no page to give. Driver
+ shall allocate new page.
+ - Acceleration
+
+ * - `rx[i]_cache_busy`
+ - The number of events where cache head was busy and cannot be recycled.
+ Driver allocated new page.
+ - Acceleration
+
+ * - `rx[i]_cache_waive`
+ - The number of cache evacuation. This can occur due to page move to
+ another NUMA node or page was pfmemalloc-ed and should be freed as soon
+ as possible.
+ - Acceleration
+
+ * - `rx[i]_arfs_err`
+ - Number of flow rules that failed to be added to the flow table.
+ - Error
+
+ * - `rx[i]_recover`
+ - The number of times the RQ was recovered.
+ - Error
+
+ * - `tx[i]_xmit_more`
+ - The number of packets sent with `xmit_more` indication set on the skbuff
+ (no doorbell).
+ - Acceleration
+
+ * - `ch[i]_poll`
+ - The number of invocations of NAPI poll of channel i.
+ - Informative
+
+ * - `ch[i]_arm`
+ - The number of times the NAPI poll function completed and armed the
+ completion queues on channel i.
+ - Informative
+
+ * - `ch[i]_aff_change`
+ - The number of times the NAPI poll function explicitly stopped execution
+ on a CPU due to a change in affinity, on channel i.
+ - Informative
+
+ * - `ch[i]_events`
+ - The number of hard interrupt events on the completion queues of channel i.
+ - Informative
+
+ * - `ch[i]_eq_rearm`
+ - The number of times the EQ was recovered.
+ - Error
+
+ * - `ch[i]_force_irq`
+ - Number of times NAPI is triggered by XSK wakeups by posting a NOP to
+ ICOSQ.
+ - Acceleration
+
+ * - `rx[i]_congst_umr`
+ - The number of times an outstanding UMR request is delayed due to
+ congestion, on ring i.
+ - Informative
+
+ * - `rx_pp_alloc_fast`
+ - Number of successful fast path allocations.
+ - Informative
+
+ * - `rx_pp_alloc_slow`
+ - Number of slow path order-0 allocations.
+ - Informative
+
+ * - `rx_pp_alloc_slow_high_order`
+ - Number of slow path high order allocations.
+ - Informative
+
+ * - `rx_pp_alloc_empty`
+ - Counter is incremented when ptr ring is empty, so a slow path allocation
+ was forced.
+ - Informative
+
+ * - `rx_pp_alloc_refill`
+ - Counter is incremented when an allocation which triggered a refill of the
+ cache.
+ - Informative
+
+ * - `rx_pp_alloc_waive`
+ - Counter is incremented when pages obtained from the ptr ring that cannot
+ be added to the cache due to a NUMA mismatch.
+ - Informative
+
+ * - `rx_pp_recycle_cached`
+ - Counter is incremented when recycling placed page in the page pool cache.
+ - Informative
+
+ * - `rx_pp_recycle_cache_full`
+ - Counter is incremented when page pool cache was full.
+ - Informative
+
+ * - `rx_pp_recycle_ring`
+ - Counter is incremented when page placed into the ptr ring.
+ - Informative
+
+ * - `rx_pp_recycle_ring_full`
+ - Counter is incremented when page released from page pool because the ptr
+ ring was full.
+ - Informative
+
+ * - `rx_pp_recycle_released_ref`
+ - Counter is incremented when page released (and not recycled) because
+ refcnt > 1.
+ - Informative
+
+ * - `rx[i]_xsk_buff_alloc_err`
+ - The number of times allocating an skb or XSK buffer failed in the XSK RQ
+ context.
+ - Error
+
+ * - `rx[i]_xsk_arfs_err`
+ - aRFS (accelerated Receive Flow Steering) does not occur in the XSK RQ
+ context, so this counter should never increment.
+ - Error
+
+ * - `rx[i]_xdp_tx_xmit`
+ - The number of packets forwarded back to the port due to XDP program
+ `XDP_TX` action (bouncing). these packets are not counted by other
+ software counters. These packets are counted by physical port and vPort
+ counters.
+ - Informative
+
+ * - `rx[i]_xdp_tx_mpwqe`
+ - Number of multi-packet WQEs transmitted by the netdev and `XDP_TX`-ed by
+ the netdev during the RQ context.
+ - Acceleration
+
+ * - `rx[i]_xdp_tx_inlnw`
+ - Number of WQE data segments transmitted where the data could be inlined
+ in the WQE and then `XDP_TX`-ed during the RQ context.
+ - Acceleration
+
+ * - `rx[i]_xdp_tx_nops`
+ - Number of NOP WQEBBs (WQE building blocks) received posted to the XDP SQ.
+ - Acceleration
+
+ * - `rx[i]_xdp_tx_full`
+ - The number of packets that should have been forwarded back to the port
+ due to `XDP_TX` action but were dropped due to full tx queue. These packets
+ are not counted by other software counters. These packets are counted by
+ physical port and vPort counters. You may open more rx queues and spread
+ traffic rx over all queues and/or increase rx ring size.
+ - Error
+
+ * - `rx[i]_xdp_tx_err`
+ - The number of times an `XDP_TX` error such as frame too long and frame
+ too short occurred on `XDP_TX` ring of RX ring.
+ - Error
+
+ * - `rx[i]_xdp_tx_cqes` / `rx_xdp_tx_cqe` [#ring_global]_
+ - The number of completions received on the CQ of the `XDP_TX` ring.
+ - Informative
+
+ * - `rx[i]_xdp_drop`
+ - The number of packets dropped due to XDP program `XDP_DROP` action. these
+ packets are not counted by other software counters. These packets are
+ counted by physical port and vPort counters.
+ - Informative
+
+ * - `rx[i]_xdp_redirect`
+ - The number of times an XDP redirect action was triggered on ring i.
+ - Acceleration
+
+ * - `tx[i]_xdp_xmit`
+ - The number of packets redirected to the interface(due to XDP redirect).
+ These packets are not counted by other software counters. These packets
+ are counted by physical port and vPort counters.
+ - Informative
+
+ * - `tx[i]_xdp_full`
+ - The number of packets redirected to the interface(due to XDP redirect),
+ but were dropped due to full tx queue. these packets are not counted by
+ other software counters. you may enlarge tx queues.
+ - Informative
+
+ * - `tx[i]_xdp_mpwqe`
+ - Number of multi-packet WQEs offloaded onto the NIC that were
+ `XDP_REDIRECT`-ed from other netdevs.
+ - Acceleration
+
+ * - `tx[i]_xdp_inlnw`
+ - Number of WQE data segments where the data could be inlined in the WQE
+ where the data segments were `XDP_REDIRECT`-ed from other netdevs.
+ - Acceleration
+
+ * - `tx[i]_xdp_nops`
+ - Number of NOP WQEBBs (WQE building blocks) posted to the SQ that were
+ `XDP_REDIRECT`-ed from other netdevs.
+ - Acceleration
+
+ * - `tx[i]_xdp_err`
+ - The number of packets redirected to the interface(due to XDP redirect)
+ but were dropped due to error such as frame too long and frame too short.
+ - Error
+
+ * - `tx[i]_xdp_cqes`
+ - The number of completions received for packets redirected to the
+ interface(due to XDP redirect) on the CQ.
+ - Informative
+
+ * - `tx[i]_xsk_xmit`
+ - The number of packets transmitted using XSK zerocopy functionality.
+ - Acceleration
+
+ * - `tx[i]_xsk_mpwqe`
+ - Number of multi-packet WQEs offloaded onto the NIC that were
+ `XDP_REDIRECT`-ed from other netdevs.
+ - Acceleration
+
+ * - `tx[i]_xsk_inlnw`
+ - Number of WQE data segments where the data could be inlined in the WQE
+ that are transmitted using XSK zerocopy.
+ - Acceleration
+
+ * - `tx[i]_xsk_full`
+ - Number of times doorbell is rung in XSK zerocopy mode when SQ is full.
+ - Error
+
+ * - `tx[i]_xsk_err`
+ - Number of errors that occurred in XSK zerocopy mode such as if the data
+ size is larger than the MTU size.
+ - Error
+
+ * - `tx[i]_xsk_cqes`
+ - Number of CQEs processed in XSK zerocopy mode.
+ - Acceleration
+
+ * - `tx_tls_ctx`
+ - Number of TLS TX HW offload contexts added to device for encryption.
+ - Acceleration
+
+ * - `tx_tls_del`
+ - Number of TLS TX HW offload contexts removed from device (connection
+ closed).
+ - Acceleration
+
+ * - `tx_tls_pool_alloc`
+ - Number of times a unit of work is successfully allocated in the TLS HW
+ offload pool.
+ - Acceleration
+
+ * - `tx_tls_pool_free`
+ - Number of times a unit of work is freed in the TLS HW offload pool.
+ - Acceleration
+
+ * - `rx_tls_ctx`
+ - Number of TLS RX HW offload contexts added to device for decryption.
+ - Acceleration
+
+ * - `rx_tls_del`
+ - Number of TLS RX HW offload contexts deleted from device (connection has
+ finished).
+ - Acceleration
+
+ * - `rx[i]_tls_decrypted_packets`
+ - Number of successfully decrypted RX packets which were part of a TLS
+ stream.
+ - Acceleration
+
+ * - `rx[i]_tls_decrypted_bytes`
+ - Number of TLS payload bytes in RX packets which were successfully
+ decrypted.
+ - Acceleration
+
+ * - `rx[i]_tls_resync_req_pkt`
+ - Number of received TLS packets with a resync request.
+ - Acceleration
+
+ * - `rx[i]_tls_resync_req_start`
+ - Number of times the TLS async resync request was started.
+ - Acceleration
+
+ * - `rx[i]_tls_resync_req_end`
+ - Number of times the TLS async resync request properly ended with
+ providing the HW tracked tcp-seq.
+ - Acceleration
+
+ * - `rx[i]_tls_resync_req_skip`
+ - Number of times the TLS async resync request procedure was started but
+ not properly ended.
+ - Error
+
+ * - `rx[i]_tls_resync_res_ok`
+ - Number of times the TLS resync response call to the driver was
+ successfully handled.
+ - Acceleration
+
+ * - `rx[i]_tls_resync_res_retry`
+ - Number of times the TLS resync response call to the driver was
+ reattempted when ICOSQ is full.
+ - Error
+
+ * - `rx[i]_tls_resync_res_skip`
+ - Number of times the TLS resync response call to the driver was terminated
+ unsuccessfully.
+ - Error
+
+ * - `rx[i]_tls_err`
+ - Number of times when CQE TLS offload was problematic.
+ - Error
+
+ * - `tx[i]_tls_encrypted_packets`
+ - The number of send packets that are TLS encrypted by the kernel.
+ - Acceleration
+
+ * - `tx[i]_tls_encrypted_bytes`
+ - The number of send bytes that are TLS encrypted by the kernel.
+ - Acceleration
+
+ * - `tx[i]_tls_ooo`
+ - Number of times out of order TLS SQE fragments were handled on ring i.
+ - Acceleration
+
+ * - `tx[i]_tls_dump_packets`
+ - Number of TLS decrypted packets copied over from NIC over DMA.
+ - Acceleration
+
+ * - `tx[i]_tls_dump_bytes`
+ - Number of TLS decrypted bytes copied over from NIC over DMA.
+ - Acceleration
+
+ * - `tx[i]_tls_resync_bytes`
+ - Number of TLS bytes requested to be resynchronized in order to be
+ decrypted.
+ - Acceleration
+
+ * - `tx[i]_tls_skip_no_sync_data`
+ - Number of TLS send data that can safely be skipped / do not need to be
+ decrypted.
+ - Acceleration
+
+ * - `tx[i]_tls_drop_no_sync_data`
+ - Number of TLS send data that were dropped due to retransmission of TLS
+ data.
+ - Acceleration
+
+ * - `ptp_cq[i]_abort`
+ - Number of times a CQE has to be skipped in precision time protocol due to
+ a skew between the port timestamp and CQE timestamp being greater than
+ 128 seconds.
+ - Error
+
+ * - `ptp_cq[i]_abort_abs_diff_ns`
+ - Accumulation of time differences between the port timestamp and CQE
+ timestamp when the difference is greater than 128 seconds in precision
+ time protocol.
+ - Error
+
+.. [#ring_global] The corresponding ring and global counters do not share the
+ same name (i.e. do not follow the common naming scheme).
+
+vPort Counters
+--------------
+Counters on the NIC port that is connected to a eSwitch.
+
+.. flat-table:: vPort Counter Table
+ :widths: 2 3 1
+
+ * - Counter
+ - Description
+ - Type
+
+ * - `rx_vport_unicast_packets`
+ - Unicast packets received, steered to a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `rx_vport_unicast_bytes`
+ - Unicast bytes received, steered to a port including Raw Ethernet QP/DPDK
+ traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `tx_vport_unicast_packets`
+ - Unicast packets transmitted, steered from a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `tx_vport_unicast_bytes`
+ - Unicast bytes transmitted, steered from a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `rx_vport_multicast_packets`
+ - Multicast packets received, steered to a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `rx_vport_multicast_bytes`
+ - Multicast bytes received, steered to a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `tx_vport_multicast_packets`
+ - Multicast packets transmitted, steered from a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `tx_vport_multicast_bytes`
+ - Multicast bytes transmitted, steered from a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `rx_vport_broadcast_packets`
+ - Broadcast packets received, steered to a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `rx_vport_broadcast_bytes`
+ - Broadcast bytes received, steered to a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `tx_vport_broadcast_packets`
+ - Broadcast packets transmitted, steered from a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `tx_vport_broadcast_bytes`
+ - Broadcast bytes transmitted, steered from a port including Raw Ethernet
+ QP/DPDK traffic, excluding RDMA traffic.
+ - Informative
+
+ * - `rx_vport_rdma_unicast_packets`
+ - RDMA unicast packets received, steered to a port (counters counts
+ RoCE/UD/RC traffic) [#accel]_.
+ - Acceleration
+
+ * - `rx_vport_rdma_unicast_bytes`
+ - RDMA unicast bytes received, steered to a port (counters counts
+ RoCE/UD/RC traffic) [#accel]_.
+ - Acceleration
+
+ * - `tx_vport_rdma_unicast_packets`
+ - RDMA unicast packets transmitted, steered from a port (counters counts
+ RoCE/UD/RC traffic) [#accel]_.
+ - Acceleration
+
+ * - `tx_vport_rdma_unicast_bytes`
+ - RDMA unicast bytes transmitted, steered from a port (counters counts
+ RoCE/UD/RC traffic) [#accel]_.
+ - Acceleration
+
+ * - `rx_vport_rdma_multicast_packets`
+ - RDMA multicast packets received, steered to a port (counters counts
+ RoCE/UD/RC traffic) [#accel]_.
+ - Acceleration
+
+ * - `rx_vport_rdma_multicast_bytes`
+ - RDMA multicast bytes received, steered to a port (counters counts
+ RoCE/UD/RC traffic) [#accel]_.
+ - Acceleration
+
+ * - `tx_vport_rdma_multicast_packets`
+ - RDMA multicast packets transmitted, steered from a port (counters counts
+ RoCE/UD/RC traffic) [#accel]_.
+ - Acceleration
+
+ * - `tx_vport_rdma_multicast_bytes`
+ - RDMA multicast bytes transmitted, steered from a port (counters counts
+ RoCE/UD/RC traffic) [#accel]_.
+ - Acceleration
+
+ * - `rx_steer_missed_packets`
+ - Number of packets that was received by the NIC, however was discarded
+ because it did not match any flow in the NIC flow table.
+ - Error
+
+ * - `rx_packets`
+ - Representor only: packets received, that were handled by the hypervisor.
+ - Informative
+
+ * - `rx_bytes`
+ - Representor only: bytes received, that were handled by the hypervisor.
+ - Informative
+
+ * - `tx_packets`
+ - Representor only: packets transmitted, that were handled by the
+ hypervisor.
+ - Informative
+
+ * - `tx_bytes`
+ - Representor only: bytes transmitted, that were handled by the hypervisor.
+ - Informative
+
+ * - `dev_internal_queue_oob`
+ - The number of dropped packets due to lack of receive WQEs for an internal
+ device RQ.
+ - Error
+
+Physical Port Counters
+----------------------
+The physical port counters are the counters on the external port connecting the
+adapter to the network. This measuring point holds information on standardized
+counters like IEEE 802.3, RFC2863, RFC 2819, RFC 3635 and additional counters
+like flow control, FEC and more.
+
+.. flat-table:: Physical Port Counter Table
+ :widths: 2 3 1
+
+ * - Counter
+ - Description
+ - Type
+
+ * - `rx_packets_phy`
+ - The number of packets received on the physical port. This counter doesn’t
+ include packets that were discarded due to FCS, frame size and similar
+ errors.
+ - Informative
+
+ * - `tx_packets_phy`
+ - The number of packets transmitted on the physical port.
+ - Informative
+
+ * - `rx_bytes_phy`
+ - The number of bytes received on the physical port, including Ethernet
+ header and FCS.
+ - Informative
+
+ * - `tx_bytes_phy`
+ - The number of bytes transmitted on the physical port.
+ - Informative
+
+ * - `rx_multicast_phy`
+ - The number of multicast packets received on the physical port.
+ - Informative
+
+ * - `tx_multicast_phy`
+ - The number of multicast packets transmitted on the physical port.
+ - Informative
+
+ * - `rx_broadcast_phy`
+ - The number of broadcast packets received on the physical port.
+ - Informative
+
+ * - `tx_broadcast_phy`
+ - The number of broadcast packets transmitted on the physical port.
+ - Informative
+
+ * - `rx_crc_errors_phy`
+ - The number of dropped received packets due to FCS (Frame Check Sequence)
+ error on the physical port. If this counter is increased in high rate,
+ check the link quality using `rx_symbol_error_phy` and
+ `rx_corrected_bits_phy` counters below.
+ - Error
+
+ * - `rx_in_range_len_errors_phy`
+ - The number of received packets dropped due to length/type errors on a
+ physical port.
+ - Error
+
+ * - `rx_out_of_range_len_phy`
+ - The number of received packets dropped due to length greater than allowed
+ on a physical port. If this counter is increasing, it implies that the
+ peer connected to the adapter has a larger MTU configured. Using same MTU
+ configuration shall resolve this issue.
+ - Error
+
+ * - `rx_oversize_pkts_phy`
+ - The number of dropped received packets due to length which exceed MTU
+ size on a physical port. If this counter is increasing, it implies that
+ the peer connected to the adapter has a larger MTU configured. Using same
+ MTU configuration shall resolve this issue.
+ - Error
+
+ * - `rx_symbol_err_phy`
+ - The number of received packets dropped due to physical coding errors
+ (symbol errors) on a physical port.
+ - Error
+
+ * - `rx_mac_control_phy`
+ - The number of MAC control packets received on the physical port.
+ - Informative
+
+ * - `tx_mac_control_phy`
+ - The number of MAC control packets transmitted on the physical port.
+ - Informative
+
+ * - `rx_pause_ctrl_phy`
+ - The number of link layer pause packets received on a physical port. If
+ this counter is increasing, it implies that the network is congested and
+ cannot absorb the traffic coming from to the adapter.
+ - Informative
+
+ * - `tx_pause_ctrl_phy`
+ - The number of link layer pause packets transmitted on a physical port. If
+ this counter is increasing, it implies that the NIC is congested and
+ cannot absorb the traffic coming from the network.
+ - Informative
+
+ * - `rx_unsupported_op_phy`
+ - The number of MAC control packets received with unsupported opcode on a
+ physical port.
+ - Error
+
+ * - `rx_discards_phy`
+ - The number of received packets dropped due to lack of buffers on a
+ physical port. If this counter is increasing, it implies that the adapter
+ is congested and cannot absorb the traffic coming from the network.
+ - Error
+
+ * - `tx_discards_phy`
+ - The number of packets which were discarded on transmission, even no
+ errors were detected. the drop might occur due to link in down state,
+ head of line drop, pause from the network, etc.
+ - Error
+
+ * - `tx_errors_phy`
+ - The number of transmitted packets dropped due to a length which exceed
+ MTU size on a physical port.
+ - Error
+
+ * - `rx_undersize_pkts_phy`
+ - The number of received packets dropped due to length which is shorter
+ than 64 bytes on a physical port. If this counter is increasing, it
+ implies that the peer connected to the adapter has a non-standard MTU
+ configured or malformed packet had arrived.
+ - Error
+
+ * - `rx_fragments_phy`
+ - The number of received packets dropped due to a length which is shorter
+ than 64 bytes and has FCS error on a physical port. If this counter is
+ increasing, it implies that the peer connected to the adapter has a
+ non-standard MTU configured.
+ - Error
+
+ * - `rx_jabbers_phy`
+ - The number of received packets d due to a length which is longer than 64
+ bytes and had FCS error on a physical port.
+ - Error
+
+ * - `rx_64_bytes_phy`
+ - The number of packets received on the physical port with size of 64 bytes.
+ - Informative
+
+ * - `rx_65_to_127_bytes_phy`
+ - The number of packets received on the physical port with size of 65 to
+ 127 bytes.
+ - Informative
+
+ * - `rx_128_to_255_bytes_phy`
+ - The number of packets received on the physical port with size of 128 to
+ 255 bytes.
+ - Informative
+
+ * - `rx_256_to_511_bytes_phy`
+ - The number of packets received on the physical port with size of 256 to
+ 512 bytes.
+ - Informative
+
+ * - `rx_512_to_1023_bytes_phy`
+ - The number of packets received on the physical port with size of 512 to
+ 1023 bytes.
+ - Informative
+
+ * - `rx_1024_to_1518_bytes_phy`
+ - The number of packets received on the physical port with size of 1024 to
+ 1518 bytes.
+ - Informative
+
+ * - `rx_1519_to_2047_bytes_phy`
+ - The number of packets received on the physical port with size of 1519 to
+ 2047 bytes.
+ - Informative
+
+ * - `rx_2048_to_4095_bytes_phy`
+ - The number of packets received on the physical port with size of 2048 to
+ 4095 bytes.
+ - Informative
+
+ * - `rx_4096_to_8191_bytes_phy`
+ - The number of packets received on the physical port with size of 4096 to
+ 8191 bytes.
+ - Informative
+
+ * - `rx_8192_to_10239_bytes_phy`
+ - The number of packets received on the physical port with size of 8192 to
+ 10239 bytes.
+ - Informative
+
+ * - `link_down_events_phy`
+ - The number of times where the link operative state changed to down. In
+ case this counter is increasing it may imply on port flapping. You may
+ need to replace the cable/transceiver.
+ - Error
+
+ * - `rx_out_of_buffer`
+ - Number of times receive queue had no software buffers allocated for the
+ adapter's incoming traffic.
+ - Error
+
+ * - `module_bus_stuck`
+ - The number of times that module's I\ :sup:`2`\C bus (data or clock)
+ short-wire was detected. You may need to replace the cable/transceiver.
+ - Error
+
+ * - `module_high_temp`
+ - The number of times that the module temperature was too high. If this
+ issue persist, you may need to check the ambient temperature or replace
+ the cable/transceiver module.
+ - Error
+
+ * - `module_bad_shorted`
+ - The number of times that the module cables were shorted. You may need to
+ replace the cable/transceiver module.
+ - Error
+
+ * - `module_unplug`
+ - The number of times that module was ejected.
+ - Informative
+
+ * - `rx_buffer_passed_thres_phy`
+ - The number of events where the port receive buffer was over 85% full.
+ - Informative
+
+ * - `tx_pause_storm_warning_events`
+ - The number of times the device was sending pauses for a long period of
+ time.
+ - Informative
+
+ * - `tx_pause_storm_error_events`
+ - The number of times the device was sending pauses for a long period of
+ time, reaching time out and disabling transmission of pause frames. on
+ the period where pause frames were disabled, drop could have been
+ occurred.
+ - Error
+
+ * - `rx[i]_buff_alloc_err`
+ - Failed to allocate a buffer to received packet (or SKB) on ring i.
+ - Error
+
+ * - `rx_bits_phy`
+ - This counter provides information on the total amount of traffic that
+ could have been received and can be used as a guideline to measure the
+ ratio of errored traffic in `rx_pcs_symbol_err_phy` and
+ `rx_corrected_bits_phy`.
+ - Informative
+
+ * - `rx_pcs_symbol_err_phy`
+ - This counter counts the number of symbol errors that wasn’t corrected by
+ FEC correction algorithm or that FEC algorithm was not active on this
+ interface. If this counter is increasing, it implies that the link
+ between the NIC and the network is suffering from high BER, and that
+ traffic is lost. You may need to replace the cable/transceiver. The error
+ rate is the number of `rx_pcs_symbol_err_phy` divided by the number of
+ `rx_bits_phy` on a specific time frame.
+ - Error
+
+ * - `rx_corrected_bits_phy`
+ - The number of corrected bits on this port according to active FEC
+ (RS/FC). If this counter is increasing, it implies that the link between
+ the NIC and the network is suffering from high BER. The corrected bit
+ rate is the number of `rx_corrected_bits_phy` divided by the number of
+ `rx_bits_phy` on a specific time frame.
+ - Error
+
+ * - `rx_err_lane_[l]_phy`
+ - This counter counts the number of physical raw errors per lane l index.
+ The counter counts errors before FEC corrections. If this counter is
+ increasing, it implies that the link between the NIC and the network is
+ suffering from high BER, and that traffic might be lost. You may need to
+ replace the cable/transceiver. Please check in accordance with
+ `rx_corrected_bits_phy`.
+ - Error
+
+ * - `rx_global_pause`
+ - The number of pause packets received on the physical port. If this
+ counter is increasing, it implies that the network is congested and
+ cannot absorb the traffic coming from the adapter. Note: This counter is
+ only enabled when global pause mode is enabled.
+ - Informative
+
+ * - `rx_global_pause_duration`
+ - The duration of pause received (in microSec) on the physical port. The
+ counter represents the time the port did not send any traffic. If this
+ counter is increasing, it implies that the network is congested and
+ cannot absorb the traffic coming from the adapter. Note: This counter is
+ only enabled when global pause mode is enabled.
+ - Informative
+
+ * - `tx_global_pause`
+ - The number of pause packets transmitted on a physical port. If this
+ counter is increasing, it implies that the adapter is congested and
+ cannot absorb the traffic coming from the network. Note: This counter is
+ only enabled when global pause mode is enabled.
+ - Informative
+
+ * - `tx_global_pause_duration`
+ - The duration of pause transmitter (in microSec) on the physical port.
+ Note: This counter is only enabled when global pause mode is enabled.
+ - Informative
+
+ * - `rx_global_pause_transition`
+ - The number of times a transition from Xoff to Xon on the physical port
+ has occurred. Note: This counter is only enabled when global pause mode
+ is enabled.
+ - Informative
+
+ * - `rx_if_down_packets`
+ - The number of received packets that were dropped due to interface down.
+ - Informative
+
+Priority Port Counters
+----------------------
+The following counters are physical port counters that are counted per L2
+priority (0-7).
+
+**Note:** `p` in the counter name represents the priority.
+
+.. flat-table:: Priority Port Counter Table
+ :widths: 2 3 1
+
+ * - Counter
+ - Description
+ - Type
+
+ * - `rx_prio[p]_bytes`
+ - The number of bytes received with priority p on the physical port.
+ - Informative
+
+ * - `rx_prio[p]_packets`
+ - The number of packets received with priority p on the physical port.
+ - Informative
+
+ * - `tx_prio[p]_bytes`
+ - The number of bytes transmitted on priority p on the physical port.
+ - Informative
+
+ * - `tx_prio[p]_packets`
+ - The number of packets transmitted on priority p on the physical port.
+ - Informative
+
+ * - `rx_prio[p]_pause`
+ - The number of pause packets received with priority p on a physical port.
+ If this counter is increasing, it implies that the network is congested
+ and cannot absorb the traffic coming from the adapter. Note: This counter
+ is available only if PFC was enabled on priority p.
+ - Informative
+
+ * - `rx_prio[p]_pause_duration`
+ - The duration of pause received (in microSec) on priority p on the
+ physical port. The counter represents the time the port did not send any
+ traffic on this priority. If this counter is increasing, it implies that
+ the network is congested and cannot absorb the traffic coming from the
+ adapter. Note: This counter is available only if PFC was enabled on
+ priority p.
+ - Informative
+
+ * - `rx_prio[p]_pause_transition`
+ - The number of times a transition from Xoff to Xon on priority p on the
+ physical port has occurred. Note: This counter is available only if PFC
+ was enabled on priority p.
+ - Informative
+
+ * - `tx_prio[p]_pause`
+ - The number of pause packets transmitted on priority p on a physical port.
+ If this counter is increasing, it implies that the adapter is congested
+ and cannot absorb the traffic coming from the network. Note: This counter
+ is available only if PFC was enabled on priority p.
+ - Informative
+
+ * - `tx_prio[p]_pause_duration`
+ - The duration of pause transmitter (in microSec) on priority p on the
+ physical port. Note: This counter is available only if PFC was enabled on
+ priority p.
+ - Informative
+
+ * - `rx_prio[p]_buf_discard`
+ - The number of packets discarded by device due to lack of per host receive
+ buffers.
+ - Informative
+
+ * - `rx_prio[p]_cong_discard`
+ - The number of packets discarded by device due to per host congestion.
+ - Informative
+
+ * - `rx_prio[p]_marked`
+ - The number of packets ecn marked by device due to per host congestion.
+ - Informative
+
+ * - `rx_prio[p]_discards`
+ - The number of packets discarded by device due to lack of receive buffers.
+ - Informative
+
+Device Counters
+---------------
+.. flat-table:: Device Counter Table
+ :widths: 2 3 1
+
+ * - Counter
+ - Description
+ - Type
+
+ * - `rx_pci_signal_integrity`
+ - Counts physical layer PCIe signal integrity errors, the number of
+ transitions to recovery due to Framing errors and CRC (dlp and tlp). If
+ this counter is raising, try moving the adapter card to a different slot
+ to rule out a bad PCI slot. Validate that you are running with the latest
+ firmware available and latest server BIOS version.
+ - Error
+
+ * - `tx_pci_signal_integrity`
+ - Counts physical layer PCIe signal integrity errors, the number of
+ transition to recovery initiated by the other side (moving to recovery
+ due to getting TS/EIEOS). If this counter is raising, try moving the
+ adapter card to a different slot to rule out a bad PCI slot. Validate
+ that you are running with the latest firmware available and latest server
+ BIOS version.
+ - Error
+
+ * - `outbound_pci_buffer_overflow`
+ - The number of packets dropped due to pci buffer overflow. If this counter
+ is raising in high rate, it might indicate that the receive traffic rate
+ for a host is larger than the PCIe bus and therefore a congestion occurs.
+ - Informative
+
+ * - `outbound_pci_stalled_rd`
+ - The percentage (in the range 0...100) of time within the last second that
+ the NIC had outbound non-posted reads requests but could not perform the
+ operation due to insufficient posted credits.
+ - Informative
+
+ * - `outbound_pci_stalled_wr`
+ - The percentage (in the range 0...100) of time within the last second that
+ the NIC had outbound posted writes requests but could not perform the
+ operation due to insufficient posted credits.
+ - Informative
+
+ * - `outbound_pci_stalled_rd_events`
+ - The number of seconds where `outbound_pci_stalled_rd` was above 30%.
+ - Informative
+
+ * - `outbound_pci_stalled_wr_events`
+ - The number of seconds where `outbound_pci_stalled_wr` was above 30%.
+ - Informative
+
+ * - `dev_out_of_buffer`
+ - The number of times the device owned queue had not enough buffers
+ allocated.
+ - Error
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst
new file mode 100644
index 000000000000..9b5c40ba7f0d
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst
@@ -0,0 +1,224 @@
+.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+.. include:: <isonum.txt>
+
+=======
+Devlink
+=======
+
+:Copyright: |copy| 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+Contents
+========
+
+- `Info`_
+- `Parameters`_
+- `Health reporters`_
+
+Info
+====
+
+The devlink info reports the running and stored firmware versions on device.
+It also prints the device PSID which represents the HCA board type ID.
+
+User command example::
+
+ $ devlink dev info pci/0000:00:06.0
+ pci/0000:00:06.0:
+ driver mlx5_core
+ versions:
+ fixed:
+ fw.psid MT_0000000009
+ running:
+ fw.version 16.26.0100
+ stored:
+ fw.version 16.26.0100
+
+Parameters
+==========
+
+flow_steering_mode: Device flow steering mode
+---------------------------------------------
+The flow steering mode parameter controls the flow steering mode of the driver.
+Two modes are supported:
+1. 'dmfs' - Device managed flow steering.
+2. 'smfs' - Software/Driver managed flow steering.
+
+In DMFS mode, the HW steering entities are created and managed through the
+Firmware.
+In SMFS mode, the HW steering entities are created and managed though by
+the driver directly into hardware without firmware intervention.
+
+SMFS mode is faster and provides better rule insertion rate compared to default DMFS mode.
+
+User command examples:
+
+- Set SMFS flow steering mode::
+
+ $ devlink dev param set pci/0000:06:00.0 name flow_steering_mode value "smfs" cmode runtime
+
+- Read device flow steering mode::
+
+ $ devlink dev param show pci/0000:06:00.0 name flow_steering_mode
+ pci/0000:06:00.0:
+ name flow_steering_mode type driver-specific
+ values:
+ cmode runtime value smfs
+
+enable_roce: RoCE enablement state
+----------------------------------
+If the device supports RoCE disablement, RoCE enablement state controls device
+support for RoCE capability. Otherwise, the control occurs in the driver stack.
+When RoCE is disabled at the driver level, only raw ethernet QPs are supported.
+
+To change RoCE enablement state, a user must change the driverinit cmode value
+and run devlink reload.
+
+User command examples:
+
+- Disable RoCE::
+
+ $ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
+ $ devlink dev reload pci/0000:06:00.0
+
+- Read RoCE enablement state::
+
+ $ devlink dev param show pci/0000:06:00.0 name enable_roce
+ pci/0000:06:00.0:
+ name enable_roce type generic
+ values:
+ cmode driverinit value true
+
+esw_port_metadata: Eswitch port metadata state
+----------------------------------------------
+When applicable, disabling eswitch metadata can increase packet rate
+up to 20% depending on the use case and packet sizes.
+
+Eswitch port metadata state controls whether to internally tag packets with
+metadata. Metadata tagging must be enabled for multi-port RoCE, failover
+between representors and stacked devices.
+By default metadata is enabled on the supported devices in E-switch.
+Metadata is applicable only for E-switch in switchdev mode and
+users may disable it when NONE of the below use cases will be in use:
+1. HCA is in Dual/multi-port RoCE mode.
+2. VF/SF representor bonding (Usually used for Live migration)
+3. Stacked devices
+
+When metadata is disabled, the above use cases will fail to initialize if
+users try to enable them.
+
+- Show eswitch port metadata::
+
+ $ devlink dev param show pci/0000:06:00.0 name esw_port_metadata
+ pci/0000:06:00.0:
+ name esw_port_metadata type driver-specific
+ values:
+ cmode runtime value true
+
+- Disable eswitch port metadata::
+
+ $ devlink dev param set pci/0000:06:00.0 name esw_port_metadata value false cmode runtime
+
+- Change eswitch mode to switchdev mode where after choosing the metadata value::
+
+ $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
+
+Health reporters
+================
+
+tx reporter
+-----------
+The tx reporter is responsible for reporting and recovering of the following two error scenarios:
+
+- tx timeout
+ Report on kernel tx timeout detection.
+ Recover by searching lost interrupts.
+- tx error completion
+ Report on error tx completion.
+ Recover by flushing the tx queue and reset it.
+
+tx reporter also support on demand diagnose callback, on which it provides
+real time information of its send queues status.
+
+User commands examples:
+
+- Diagnose send queues status::
+
+ $ devlink health diagnose pci/0000:82:00.0 reporter tx
+
+NOTE: This command has valid output only when interface is up, otherwise the command has empty output.
+
+- Show number of tx errors indicated, number of recover flows ended successfully,
+ is autorecover enabled and graceful period from last recover::
+
+ $ devlink health show pci/0000:82:00.0 reporter tx
+
+rx reporter
+-----------
+The rx reporter is responsible for reporting and recovering of the following two error scenarios:
+
+- rx queues' initialization (population) timeout
+ Population of rx queues' descriptors on ring initialization is done
+ in napi context via triggering an irq. In case of a failure to get
+ the minimum amount of descriptors, a timeout would occur, and
+ descriptors could be recovered by polling the EQ (Event Queue).
+- rx completions with errors (reported by HW on interrupt context)
+ Report on rx completion error.
+ Recover (if needed) by flushing the related queue and reset it.
+
+rx reporter also supports on demand diagnose callback, on which it
+provides real time information of its receive queues' status.
+
+- Diagnose rx queues' status and corresponding completion queue::
+
+ $ devlink health diagnose pci/0000:82:00.0 reporter rx
+
+NOTE: This command has valid output only when interface is up. Otherwise, the command has empty output.
+
+- Show number of rx errors indicated, number of recover flows ended successfully,
+ is autorecover enabled, and graceful period from last recover::
+
+ $ devlink health show pci/0000:82:00.0 reporter rx
+
+fw reporter
+-----------
+The fw reporter implements `diagnose` and `dump` callbacks.
+It follows symptoms of fw error such as fw syndrome by triggering
+fw core dump and storing it into the dump buffer.
+The fw reporter diagnose command can be triggered any time by the user to check
+current fw status.
+
+User commands examples:
+
+- Check fw heath status::
+
+ $ devlink health diagnose pci/0000:82:00.0 reporter fw
+
+- Read FW core dump if already stored or trigger new one::
+
+ $ devlink health dump show pci/0000:82:00.0 reporter fw
+
+NOTE: This command can run only on the PF which has fw tracer ownership,
+running it on other PF or any VF will return "Operation not permitted".
+
+fw fatal reporter
+-----------------
+The fw fatal reporter implements `dump` and `recover` callbacks.
+It follows fatal errors indications by CR-space dump and recover flow.
+The CR-space dump uses vsc interface which is valid even if the FW command
+interface is not functional, which is the case in most FW fatal errors.
+The recover function runs recover flow which reloads the driver and triggers fw
+reset if needed.
+On firmware error, the health buffer is dumped into the dmesg. The log
+level is derived from the error's severity (given in health buffer).
+
+User commands examples:
+
+- Run fw recover flow manually::
+
+ $ devlink health recover pci/0000:82:00.0 reporter fw_fatal
+
+- Read FW CR-space dump if already stored or trigger new one::
+
+ $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
+
+NOTE: This command can run only on PF.
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
new file mode 100644
index 000000000000..3fdcd6b61ccf
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+.. include:: <isonum.txt>
+
+Mellanox ConnectX(R) mlx5 core VPI Network Driver
+=================================================
+
+:Copyright: |copy| 2019, Mellanox Technologies LTD.
+:Copyright: |copy| 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ kconfig
+ devlink
+ switchdev
+ tracepoints
+ counters
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
new file mode 100644
index 000000000000..43b1f7e87ec4
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
@@ -0,0 +1,168 @@
+.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+.. include:: <isonum.txt>
+
+=======================================
+Enabling the driver and kconfig options
+=======================================
+
+:Copyright: |copy| 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+| mlx5 core is modular and most of the major mlx5 core driver features can be selected (compiled in/out)
+| at build time via kernel Kconfig flags.
+| Basic features, ethernet net device rx/tx offloads and XDP, are available with the most basic flags
+| CONFIG_MLX5_CORE=y/m and CONFIG_MLX5_CORE_EN=y.
+| For the list of advanced features, please see below.
+
+**CONFIG_MLX5_BRIDGE=(y/n)**
+
+| Enable :ref:`Ethernet Bridging (BRIDGE) offloading support <mlx5_bridge_offload>`.
+| This will provide the ability to add representors of mlx5 uplink and VF
+| ports to Bridge and offloading rules for traffic between such ports.
+| Supports VLANs (trunk and access modes).
+
+
+**CONFIG_MLX5_CORE=(y/m/n)** (module mlx5_core.ko)
+
+| The driver can be enabled by choosing CONFIG_MLX5_CORE=y/m in kernel config.
+| This will provide mlx5 core driver for mlx5 ulps to interface with (mlx5e, mlx5_ib).
+
+
+**CONFIG_MLX5_CORE_EN=(y/n)**
+
+| Choosing this option will allow basic ethernet netdevice support with all of the standard rx/tx offloads.
+| mlx5e is the mlx5 ulp driver which provides netdevice kernel interface, when chosen, mlx5e will be
+| built-in into mlx5_core.ko.
+
+
+**CONFIG_MLX5_CORE_EN_DCB=(y/n)**:
+
+| Enables `Data Center Bridging (DCB) Support <https://community.mellanox.com/s/article/howto-auto-config-pfc-and-ets-on-connectx-4-via-lldp-dcbx>`_.
+
+
+**CONFIG_MLX5_CORE_IPOIB=(y/n)**
+
+| IPoIB offloads & acceleration support.
+| Requires CONFIG_MLX5_CORE_EN to provide an accelerated interface for the rdma
+| IPoIB ulp netdevice.
+
+
+**CONFIG_MLX5_CLS_ACT=(y/n)**
+
+| Enables offload support for TC classifier action (NET_CLS_ACT).
+| Works in both native NIC mode and Switchdev SRIOV mode.
+| Flow-based classifiers, such as those registered through
+| `tc-flower(8)`, are processed by the device, rather than the
+| host. Actions that would then overwrite matching classification
+| results would then be instant due to the offload.
+
+
+**CONFIG_MLX5_EN_ARFS=(y/n)**
+
+| Enables Hardware-accelerated receive flow steering (arfs) support, and ntuple filtering.
+| https://community.mellanox.com/s/article/howto-configure-arfs-on-connectx-4
+
+
+**CONFIG_MLX5_EN_IPSEC=(y/n)**
+
+| Enables `IPSec XFRM cryptography-offload acceleration <https://support.mellanox.com/s/article/ConnectX-6DX-Bluefield-2-IPsec-HW-Full-Offload-Configuration-Guide>`_.
+
+
+**CONFIG_MLX5_EN_MACSEC=(y/n)**
+
+| Build support for MACsec cryptography-offload acceleration in the NIC.
+
+
+**CONFIG_MLX5_EN_RXNFC=(y/n)**
+
+| Enables ethtool receive network flow classification, which allows user defined
+| flow rules to direct traffic into arbitrary rx queue via ethtool set/get_rxnfc API.
+
+
+**CONFIG_MLX5_EN_TLS=(y/n)**
+
+| TLS cryptography-offload acceleration.
+
+
+**CONFIG_MLX5_ESWITCH=(y/n)**
+
+| Ethernet SRIOV E-Switch support in ConnectX NIC. E-Switch provides internal SRIOV packet steering
+| and switching for the enabled VFs and PF in two available modes:
+| 1) `Legacy SRIOV mode (L2 mac vlan steering based) <https://community.mellanox.com/s/article/howto-configure-sr-iov-for-connectx-4-connectx-5-with-kvm--ethernet-x>`_.
+| 2) `Switchdev mode (eswitch offloads) <https://www.mellanox.com/related-docs/prod_software/ASAP2_Hardware_Offloading_for_vSwitches_User_Manual_v4.4.pdf>`_.
+
+
+**CONFIG_MLX5_FPGA=(y/n)**
+
+| Build support for the Innova family of network cards by Mellanox Technologies.
+| Innova network cards are comprised of a ConnectX chip and an FPGA chip on one board.
+| If you select this option, the mlx5_core driver will include the Innova FPGA core and allow
+| building sandbox-specific client drivers.
+
+
+**CONFIG_MLX5_INFINIBAND=(y/n/m)** (module mlx5_ib.ko)
+
+| Provides low-level InfiniBand/RDMA and `RoCE <https://community.mellanox.com/s/article/recommended-network-configuration-examples-for-roce-deployment>`_ support.
+
+
+**CONFIG_MLX5_MPFS=(y/n)**
+
+| Ethernet Multi-Physical Function Switch (MPFS) support in ConnectX NIC.
+| MPFs is required for when `Multi-Host <http://www.mellanox.com/page/multihost>`_ configuration is enabled to allow passing
+| user configured unicast MAC addresses to the requesting PF.
+
+
+**CONFIG_MLX5_SF=(y/n)**
+
+| Build support for subfunction.
+| Subfunctons are more light weight than PCI SRIOV VFs. Choosing this option
+| will enable support for creating subfunction devices.
+
+
+**CONFIG_MLX5_SF_MANAGER=(y/n)**
+
+| Build support for subfuction port in the NIC. A Mellanox subfunction
+| port is managed through devlink. A subfunction supports RDMA, netdevice
+| and vdpa device. It is similar to a SRIOV VF but it doesn't require
+| SRIOV support.
+
+
+**CONFIG_MLX5_SW_STEERING=(y/n)**
+
+| Build support for software-managed steering in the NIC.
+
+
+**CONFIG_MLX5_TC_CT=(y/n)**
+
+| Support offloading connection tracking rules via tc ct action.
+
+
+**CONFIG_MLX5_TC_SAMPLE=(y/n)**
+
+| Support offloading sample rules via tc sample action.
+
+
+**CONFIG_MLX5_VDPA=(y/n)**
+
+| Support library for Mellanox VDPA drivers. Provides code that is
+| common for all types of VDPA drivers. The following drivers are planned:
+| net, block.
+
+
+**CONFIG_MLX5_VDPA_NET=(y/n)**
+
+| VDPA network driver for ConnectX6 and newer. Provides offloading
+| of virtio net datapath such that descriptors put on the ring will
+| be executed by the hardware. It also supports a variety of stateless
+| offloads depending on the actual device used and firmware version.
+
+
+**CONFIG_MLX5_VFIO_PCI=(y/n)**
+
+| This provides migration support for MLX5 devices using the VFIO framework.
+
+
+**External options** ( Choose if the corresponding mlx5 feature is required )
+
+- CONFIG_MLXFW: When chosen, mlx5 firmware flashing support will be enabled (via devlink and ethtool).
+- CONFIG_PTP_1588_CLOCK: When chosen, mlx5 ptp support will be enabled
+- CONFIG_VXLAN: When chosen, mlx5 vxlan support will be enabled.
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst
new file mode 100644
index 000000000000..01deedb71597
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst
@@ -0,0 +1,239 @@
+.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+.. include:: <isonum.txt>
+
+=========
+Switchdev
+=========
+
+:Copyright: |copy| 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+.. _mlx5_bridge_offload:
+
+Bridge offload
+==============
+
+The mlx5 driver implements support for offloading bridge rules when in switchdev
+mode. Linux bridge FDBs are automatically offloaded when mlx5 switchdev
+representor is attached to bridge.
+
+- Change device to switchdev mode::
+
+ $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
+
+- Attach mlx5 switchdev representor 'enp8s0f0' to bridge netdev 'bridge1'::
+
+ $ ip link set enp8s0f0 master bridge1
+
+VLANs
+-----
+
+Following bridge VLAN functions are supported by mlx5:
+
+- VLAN filtering (including multiple VLANs per port)::
+
+ $ ip link set bridge1 type bridge vlan_filtering 1
+ $ bridge vlan add dev enp8s0f0 vid 2-3
+
+- VLAN push on bridge ingress::
+
+ $ bridge vlan add dev enp8s0f0 vid 3 pvid
+
+- VLAN pop on bridge egress::
+
+ $ bridge vlan add dev enp8s0f0 vid 3 untagged
+
+Subfunction
+===========
+
+mlx5 supports subfunction management using devlink port (see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface.
+
+A subfunction has its own function capabilities and its own resources. This
+means a subfunction has its own dedicated queues (txq, rxq, cq, eq). These
+queues are neither shared nor stolen from the parent PCI function.
+
+When a subfunction is RDMA capable, it has its own QP1, GID table, and RDMA
+resources neither shared nor stolen from the parent PCI function.
+
+A subfunction has a dedicated window in PCI BAR space that is not shared
+with the other subfunctions or the parent PCI function. This ensures that all
+devices (netdev, rdma, vdpa, etc.) of the subfunction accesses only assigned
+PCI BAR space.
+
+A subfunction supports eswitch representation through which it supports tc
+offloads. The user configures eswitch to send/receive packets from/to
+the subfunction port.
+
+Subfunctions share PCI level resources such as PCI MSI-X IRQs with
+other subfunctions and/or with its parent PCI function.
+
+Example mlx5 software, system, and device view::
+
+ _______
+ | admin |
+ | user |----------
+ |_______| |
+ | |
+ ____|____ __|______ _________________
+ | | | | | |
+ | devlink | | tc tool | | user |
+ | tool | |_________| | applications |
+ |_________| | |_________________|
+ | | | |
+ | | | | Userspace
+ +---------|-------------|-------------------|----------|--------------------+
+ | | +----------+ +----------+ Kernel
+ | | | netdev | | rdma dev |
+ | | +----------+ +----------+
+ (devlink port add/del | ^ ^
+ port function set) | | |
+ | | +---------------|
+ _____|___ | | _______|_______
+ | | | | | mlx5 class |
+ | devlink | +------------+ | | drivers |
+ | kernel | | rep netdev | | |(mlx5_core,ib) |
+ |_________| +------------+ | |_______________|
+ | | | ^
+ (devlink ops) | | (probe/remove)
+ _________|________ | | ____|________
+ | subfunction | | +---------------+ | subfunction |
+ | management driver|----- | subfunction |---| driver |
+ | (mlx5_core) | | auxiliary dev | | (mlx5_core) |
+ |__________________| +---------------+ |_____________|
+ | ^
+ (sf add/del, vhca events) |
+ | (device add/del)
+ _____|____ ____|________
+ | | | subfunction |
+ | PCI NIC |--- activate/deactivate events--->| host driver |
+ |__________| | (mlx5_core) |
+ |_____________|
+
+Subfunction is created using devlink port interface.
+
+- Change device to switchdev mode::
+
+ $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
+
+- Add a devlink port of subfunction flavour::
+
+ $ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
+ pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
+ function:
+ hw_addr 00:00:00:00:00:00 state inactive opstate detached
+
+- Show a devlink port of the subfunction::
+
+ $ devlink port show pci/0000:06:00.0/32768
+ pci/0000:06:00.0/32768: type eth netdev enp6s0pf0sf88 flavour pcisf pfnum 0 sfnum 88
+ function:
+ hw_addr 00:00:00:00:00:00 state inactive opstate detached
+
+- Delete a devlink port of subfunction after use::
+
+ $ devlink port del pci/0000:06:00.0/32768
+
+Function attributes
+===================
+
+The mlx5 driver provides a mechanism to setup PCI VF/SF function attributes in
+a unified way for SmartNIC and non-SmartNIC.
+
+This is supported only when the eswitch mode is set to switchdev. Port function
+configuration of the PCI VF/SF is supported through devlink eswitch port.
+
+Port function attributes should be set before PCI VF/SF is enumerated by the
+driver.
+
+MAC address setup
+-----------------
+
+mlx5 driver support devlink port function attr mechanism to setup MAC
+address. (refer to Documentation/networking/devlink/devlink-port.rst)
+
+RoCE capability setup
+~~~~~~~~~~~~~~~~~~~~~
+Not all mlx5 PCI devices/SFs require RoCE capability.
+
+When RoCE capability is disabled, it saves 1 Mbytes worth of system memory per
+PCI devices/SF.
+
+mlx5 driver support devlink port function attr mechanism to setup RoCE
+capability. (refer to Documentation/networking/devlink/devlink-port.rst)
+
+migratable capability setup
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+User who wants mlx5 PCI VFs to be able to perform live migration need to
+explicitly enable the VF migratable capability.
+
+mlx5 driver support devlink port function attr mechanism to setup migratable
+capability. (refer to Documentation/networking/devlink/devlink-port.rst)
+
+SF state setup
+--------------
+
+To use the SF, the user must activate the SF using the SF function state
+attribute.
+
+- Get the state of the SF identified by its unique devlink port index::
+
+ $ devlink port show ens2f0npf0sf88
+ pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
+ function:
+ hw_addr 00:00:00:00:88:88 state inactive opstate detached
+
+- Activate the function and verify its state is active::
+
+ $ devlink port function set ens2f0npf0sf88 state active
+
+ $ devlink port show ens2f0npf0sf88
+ pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
+ function:
+ hw_addr 00:00:00:00:88:88 state active opstate detached
+
+Upon function activation, the PF driver instance gets the event from the device
+that a particular SF was activated. It's the cue to put the device on bus, probe
+it and instantiate the devlink instance and class specific auxiliary devices
+for it.
+
+- Show the auxiliary device and port of the subfunction::
+
+ $ devlink dev show
+ devlink dev show auxiliary/mlx5_core.sf.4
+
+ $ devlink port show auxiliary/mlx5_core.sf.4/1
+ auxiliary/mlx5_core.sf.4/1: type eth netdev p0sf88 flavour virtual port 0 splittable false
+
+ $ rdma link show mlx5_0/1
+ link mlx5_0/1 state ACTIVE physical_state LINK_UP netdev p0sf88
+
+ $ rdma dev show
+ 8: rocep6s0f1: node_type ca fw 16.29.0550 node_guid 248a:0703:00b3:d113 sys_image_guid 248a:0703:00b3:d112
+ 13: mlx5_0: node_type ca fw 16.29.0550 node_guid 0000:00ff:fe00:8888 sys_image_guid 248a:0703:00b3:d112
+
+- Subfunction auxiliary device and class device hierarchy::
+
+ mlx5_core.sf.4
+ (subfunction auxiliary device)
+ /\
+ / \
+ / \
+ / \
+ / \
+ mlx5_core.eth.4 mlx5_core.rdma.4
+ (sf eth aux dev) (sf rdma aux dev)
+ | |
+ | |
+ p0sf88 mlx5_0
+ (sf netdev) (sf rdma device)
+
+Additionally, the SF port also gets the event when the driver attaches to the
+auxiliary device of the subfunction. This results in changing the operational
+state of the function. This provides visibility to the user to decide when is it
+safe to delete the SF port for graceful termination of the subfunction.
+
+- Show the SF port operational state::
+
+ $ devlink port show ens2f0npf0sf88
+ pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
+ function:
+ hw_addr 00:00:00:00:88:88 state active opstate attached
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/tracepoints.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/tracepoints.rst
new file mode 100644
index 000000000000..a9d3e123adc4
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/tracepoints.rst
@@ -0,0 +1,229 @@
+.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+.. include:: <isonum.txt>
+
+===========
+Tracepoints
+===========
+
+:Copyright: |copy| 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+mlx5 driver provides internal tracepoints for tracking and debugging using
+kernel tracepoints interfaces (refer to Documentation/trace/ftrace.rst).
+
+For the list of support mlx5 events, check `/sys/kernel/debug/tracing/events/mlx5/`.
+
+tc and eswitch offloads tracepoints:
+
+- mlx5e_configure_flower: trace flower filter actions and cookies offloaded to mlx5::
+
+ $ echo mlx5:mlx5e_configure_flower >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ tc-6535 [019] ...1 2672.404466: mlx5e_configure_flower: cookie=0000000067874a55 actions= REDIRECT
+
+- mlx5e_delete_flower: trace flower filter actions and cookies deleted from mlx5::
+
+ $ echo mlx5:mlx5e_delete_flower >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ tc-6569 [010] .N.1 2686.379075: mlx5e_delete_flower: cookie=0000000067874a55 actions= NULL
+
+- mlx5e_stats_flower: trace flower stats request::
+
+ $ echo mlx5:mlx5e_stats_flower >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ tc-6546 [010] ...1 2679.704889: mlx5e_stats_flower: cookie=0000000060eb3d6a bytes=0 packets=0 lastused=4295560217
+
+- mlx5e_tc_update_neigh_used_value: trace tunnel rule neigh update value offloaded to mlx5::
+
+ $ echo mlx5:mlx5e_tc_update_neigh_used_value >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u48:4-8806 [009] ...1 55117.882428: mlx5e_tc_update_neigh_used_value: netdev: ens1f0 IPv4: 1.1.1.10 IPv6: ::ffff:1.1.1.10 neigh_used=1
+
+- mlx5e_rep_neigh_update: trace neigh update tasks scheduled due to neigh state change events::
+
+ $ echo mlx5:mlx5e_rep_neigh_update >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u48:7-2221 [009] ...1 1475.387435: mlx5e_rep_neigh_update: netdev: ens1f0 MAC: 24:8a:07:9a:17:9a IPv4: 1.1.1.10 IPv6: ::ffff:1.1.1.10 neigh_connected=1
+
+Bridge offloads tracepoints:
+
+- mlx5_esw_bridge_fdb_entry_init: trace bridge FDB entry offloaded to mlx5::
+
+ $ echo mlx5:mlx5_esw_bridge_fdb_entry_init >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u20:9-2217 [003] ...1 318.582243: mlx5_esw_bridge_fdb_entry_init: net_device=enp8s0f0_0 addr=e4:fd:05:08:00:02 vid=0 flags=0 used=0
+
+- mlx5_esw_bridge_fdb_entry_cleanup: trace bridge FDB entry deleted from mlx5::
+
+ $ echo mlx5:mlx5_esw_bridge_fdb_entry_cleanup >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ ip-2581 [005] ...1 318.629871: mlx5_esw_bridge_fdb_entry_cleanup: net_device=enp8s0f0_1 addr=e4:fd:05:08:00:03 vid=0 flags=0 used=16
+
+- mlx5_esw_bridge_fdb_entry_refresh: trace bridge FDB entry offload refreshed in
+ mlx5::
+
+ $ echo mlx5:mlx5_esw_bridge_fdb_entry_refresh >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u20:8-3849 [003] ...1 466716: mlx5_esw_bridge_fdb_entry_refresh: net_device=enp8s0f0_0 addr=e4:fd:05:08:00:02 vid=3 flags=0 used=0
+
+- mlx5_esw_bridge_vlan_create: trace bridge VLAN object add on mlx5
+ representor::
+
+ $ echo mlx5:mlx5_esw_bridge_vlan_create >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ ip-2560 [007] ...1 318.460258: mlx5_esw_bridge_vlan_create: vid=1 flags=6
+
+- mlx5_esw_bridge_vlan_cleanup: trace bridge VLAN object delete from mlx5
+ representor::
+
+ $ echo mlx5:mlx5_esw_bridge_vlan_cleanup >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ bridge-2582 [007] ...1 318.653496: mlx5_esw_bridge_vlan_cleanup: vid=2 flags=8
+
+- mlx5_esw_bridge_vport_init: trace mlx5 vport assigned with bridge upper
+ device::
+
+ $ echo mlx5:mlx5_esw_bridge_vport_init >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ ip-2560 [007] ...1 318.458915: mlx5_esw_bridge_vport_init: vport_num=1
+
+- mlx5_esw_bridge_vport_cleanup: trace mlx5 vport removed from bridge upper
+ device::
+
+ $ echo mlx5:mlx5_esw_bridge_vport_cleanup >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ ip-5387 [000] ...1 573713: mlx5_esw_bridge_vport_cleanup: vport_num=1
+
+Eswitch QoS tracepoints:
+
+- mlx5_esw_vport_qos_create: trace creation of transmit scheduler arbiter for vport::
+
+ $ echo mlx5:mlx5_esw_vport_qos_create >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ <...>-23496 [018] .... 73136.838831: mlx5_esw_vport_qos_create: (0000:82:00.0) vport=2 tsar_ix=4 bw_share=0, max_rate=0 group=000000007b576bb3
+
+- mlx5_esw_vport_qos_config: trace configuration of transmit scheduler arbiter for vport::
+
+ $ echo mlx5:mlx5_esw_vport_qos_config >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ <...>-26548 [023] .... 75754.223823: mlx5_esw_vport_qos_config: (0000:82:00.0) vport=1 tsar_ix=3 bw_share=34, max_rate=10000 group=000000007b576bb3
+
+- mlx5_esw_vport_qos_destroy: trace deletion of transmit scheduler arbiter for vport::
+
+ $ echo mlx5:mlx5_esw_vport_qos_destroy >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ <...>-27418 [004] .... 76546.680901: mlx5_esw_vport_qos_destroy: (0000:82:00.0) vport=1 tsar_ix=3
+
+- mlx5_esw_group_qos_create: trace creation of transmit scheduler arbiter for rate group::
+
+ $ echo mlx5:mlx5_esw_group_qos_create >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ <...>-26578 [008] .... 75776.022112: mlx5_esw_group_qos_create: (0000:82:00.0) group=000000008dac63ea tsar_ix=5
+
+- mlx5_esw_group_qos_config: trace configuration of transmit scheduler arbiter for rate group::
+
+ $ echo mlx5:mlx5_esw_group_qos_config >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ <...>-27303 [020] .... 76461.455356: mlx5_esw_group_qos_config: (0000:82:00.0) group=000000008dac63ea tsar_ix=5 bw_share=100 max_rate=20000
+
+- mlx5_esw_group_qos_destroy: trace deletion of transmit scheduler arbiter for group::
+
+ $ echo mlx5:mlx5_esw_group_qos_destroy >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ <...>-27418 [006] .... 76547.187258: mlx5_esw_group_qos_destroy: (0000:82:00.0) group=000000007b576bb3 tsar_ix=1
+
+SF tracepoints:
+
+- mlx5_sf_add: trace addition of the SF port::
+
+ $ echo mlx5:mlx5_sf_add >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ devlink-9363 [031] ..... 24610.188722: mlx5_sf_add: (0000:06:00.0) port_index=32768 controller=0 hw_id=0x8000 sfnum=88
+
+- mlx5_sf_free: trace freeing of the SF port::
+
+ $ echo mlx5:mlx5_sf_free >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ devlink-9830 [038] ..... 26300.404749: mlx5_sf_free: (0000:06:00.0) port_index=32768 controller=0 hw_id=0x8000
+
+- mlx5_sf_activate: trace activation of the SF port::
+
+ $ echo mlx5:mlx5_sf_activate >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ devlink-29841 [008] ..... 3669.635095: mlx5_sf_activate: (0000:08:00.0) port_index=32768 controller=0 hw_id=0x8000
+
+- mlx5_sf_deactivate: trace deactivation of the SF port::
+
+ $ echo mlx5:mlx5_sf_deactivate >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ devlink-29994 [008] ..... 4015.969467: mlx5_sf_deactivate: (0000:08:00.0) port_index=32768 controller=0 hw_id=0x8000
+
+- mlx5_sf_hwc_alloc: trace allocating of the hardware SF context::
+
+ $ echo mlx5:mlx5_sf_hwc_alloc >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ devlink-9775 [031] ..... 26296.385259: mlx5_sf_hwc_alloc: (0000:06:00.0) controller=0 hw_id=0x8000 sfnum=88
+
+- mlx5_sf_hwc_free: trace freeing of the hardware SF context::
+
+ $ echo mlx5:mlx5_sf_hwc_free >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u128:3-9093 [046] ..... 24625.365771: mlx5_sf_hwc_free: (0000:06:00.0) hw_id=0x8000
+
+- mlx5_sf_hwc_deferred_free: trace deferred freeing of the hardware SF context::
+
+ $ echo mlx5:mlx5_sf_hwc_deferred_free >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ devlink-9519 [046] ..... 24624.400271: mlx5_sf_hwc_deferred_free: (0000:06:00.0) hw_id=0x8000
+
+- mlx5_sf_update_state: trace state updates for SF contexts::
+
+ $ echo mlx5:mlx5_sf_update_state >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u20:3-29490 [009] ..... 4141.453530: mlx5_sf_update_state: (0000:08:00.0) port_index=32768 controller=0 hw_id=0x8000 state=2
+
+- mlx5_sf_vhca_event: trace SF vhca event and state::
+
+ $ echo mlx5:mlx5_sf_vhca_event >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u128:3-9093 [046] ..... 24625.365525: mlx5_sf_vhca_event: (0000:06:00.0) hw_id=0x8000 sfnum=88 vhca_state=1
+
+- mlx5_sf_dev_add: trace SF device add event::
+
+ $ echo mlx5:mlx5_sf_dev_add>> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u128:3-9093 [000] ..... 24616.524495: mlx5_sf_dev_add: (0000:06:00.0) sfdev=00000000fc5d96fd aux_id=4 hw_id=0x8000 sfnum=88
+
+- mlx5_sf_dev_del: trace SF device delete event::
+
+ $ echo mlx5:mlx5_sf_dev_del >> /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u128:3-9093 [044] ..... 24624.400749: mlx5_sf_dev_del: (0000:06:00.0) sfdev=00000000fc5d96fd aux_id=4 hw_id=0x8000 sfnum=88
diff --git a/Documentation/networking/device_drivers/ethernet/pensando/ionic.rst b/Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
index 0eabbc347d6c..6ec7d686efab 100644
--- a/Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
+++ b/Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
@@ -83,7 +83,7 @@ Configuring the Driver
MTU
---
-Jumbo frame support is available with a maximim size of 9194 bytes.
+Jumbo frame support is available with a maximum size of 9194 bytes.
Interrupt coalescing
--------------------
diff --git a/Documentation/networking/device_drivers/ethernet/ti/am65_nuss_cpsw_switchdev.rst b/Documentation/networking/device_drivers/ethernet/ti/am65_nuss_cpsw_switchdev.rst
index f24adfab6a1b..25fd9aa284e2 100644
--- a/Documentation/networking/device_drivers/ethernet/ti/am65_nuss_cpsw_switchdev.rst
+++ b/Documentation/networking/device_drivers/ethernet/ti/am65_nuss_cpsw_switchdev.rst
@@ -124,7 +124,7 @@ Multicast flooding
==================
CPU port mcast_flooding is always on
-Turning flooding on/off on swithch ports:
+Turning flooding on/off on switch ports:
bridge link set dev sw0p1 mcast_flood on/off
Access and Trunk port
diff --git a/Documentation/networking/device_drivers/ethernet/ti/cpsw_switchdev.rst b/Documentation/networking/device_drivers/ethernet/ti/cpsw_switchdev.rst
index 1241ecac73bd..464dce938ed1 100644
--- a/Documentation/networking/device_drivers/ethernet/ti/cpsw_switchdev.rst
+++ b/Documentation/networking/device_drivers/ethernet/ti/cpsw_switchdev.rst
@@ -174,7 +174,7 @@ Multicast flooding
==================
CPU port mcast_flooding is always on
-Turning flooding on/off on swithch ports:
+Turning flooding on/off on switch ports:
bridge link set dev sw0p1 mcast_flood on/off
Access and Trunk port
diff --git a/Documentation/networking/device_drivers/wwan/iosm.rst b/Documentation/networking/device_drivers/wwan/iosm.rst
index aceb0223eb46..6f9e955af984 100644
--- a/Documentation/networking/device_drivers/wwan/iosm.rst
+++ b/Documentation/networking/device_drivers/wwan/iosm.rst
@@ -69,7 +69,7 @@ wwan0-X network device
The IOSM driver exposes IP link interface "wwan0-X" of type "wwan" for IP
traffic. Iproute network utility is used for creating "wwan0-X" network
interface and for associating it with MBIM IP session. The Driver supports
-upto 8 IP sessions for simultaneous IP communication.
+up to 8 IP sessions for simultaneous IP communication.
The userspace management application is responsible for creating new IP link
prior to establishing MBIM IP session where the SessionId is greater than 0.
diff --git a/Documentation/networking/devlink/devlink-health.rst b/Documentation/networking/devlink/devlink-health.rst
index e37f77734b5b..e0b8cfed610a 100644
--- a/Documentation/networking/devlink/devlink-health.rst
+++ b/Documentation/networking/devlink/devlink-health.rst
@@ -33,7 +33,7 @@ Device driver can provide specific callbacks for each "health reporter", e.g.:
* Recovery procedures
* Diagnostics procedures
* Object dump procedures
- * OOB initial parameters
+ * Out Of Box initial parameters
Different parts of the driver can register different types of health reporters
with different handlers.
@@ -46,12 +46,31 @@ Once an error is reported, devlink health will perform the following actions:
* A log is being send to the kernel trace events buffer
* Health status and statistics are being updated for the reporter instance
* Object dump is being taken and saved at the reporter instance (as long as
- there is no other dump which is already stored)
+ auto-dump is set and there is no other dump which is already stored)
* Auto recovery attempt is being done. Depends on:
- Auto-recovery configuration
- Grace period vs. time passed since last recover
+Devlink formatted message
+=========================
+
+To handle devlink health diagnose and health dump requests, devlink creates a
+formatted message structure ``devlink_fmsg`` and send it to the driver's callback
+to fill the data in using the devlink fmsg API.
+
+Devlink fmsg is a mechanism to pass descriptors between drivers and devlink, in
+json-like format. The API allows the driver to add nested attributes such as
+object, object pair and value array, in addition to attributes such as name and
+value.
+
+Driver should use this API to fill the fmsg context in a format which will be
+translated by the devlink to the netlink message later. When it needs to send
+the data using SKBs to the netlink layer, it fragments the data between
+different SKBs. In order to do this fragmentation, it uses virtual nests
+attributes, to avoid actual nesting use which cannot be divided between
+different SKBs.
+
User Interface
==============
diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
index 625efb3777d5..10f282c2117c 100644
--- a/Documentation/networking/devlink/ice.rst
+++ b/Documentation/networking/devlink/ice.rst
@@ -285,7 +285,7 @@ features are enabled after the hierarchy is exported, but before any
changes are made.
This feature is also dependent on switchdev being enabled in the system.
-It's required bacause devlink-rate requires devlink-port objects to be
+It's required because devlink-rate requires devlink-port objects to be
present, and those objects are only created in switchdev mode.
If the driver is set to the switchdev mode, it will export internal
@@ -320,7 +320,7 @@ nodes and nodes with children also can't be deleted.
* - ``tx_weight``
- allows for usage of Weighted Fair Queuing arbitration scheme among
siblings. This arbitration scheme can be used simultaneously with
- the strict priority. Range 1-200. Only relative values mater for
+ the strict priority. Range 1-200. Only relative values matter for
arbitration.
``tx_priority`` and ``tx_weight`` can be used simultaneously. In that case
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index fee4d3968309..b49749e2b9a6 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -66,3 +66,4 @@ parameters, info versions, and other features it supports.
prestera
iosm
octeontx2
+ sfc
diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
index 29ad304e6fba..3321117cf605 100644
--- a/Documentation/networking/devlink/mlx5.rst
+++ b/Documentation/networking/devlink/mlx5.rst
@@ -54,6 +54,24 @@ parameters.
- Control the number of large groups (size > 1) in the FDB table.
* The default value is 15, and the range is between 1 and 1024.
+ * - ``esw_multiport``
+ - Boolean
+ - runtime
+ - Control MultiPort E-Switch shared fdb mode.
+
+ An experimental mode where a single E-Switch is used and all the vports
+ and physical ports on the NIC are connected to it.
+
+ An example is to send traffic from a VF that is created on PF0 to an
+ uplink that is natively associated with the uplink of PF1
+
+ Note: Future devices, ConnectX-8 and onward, will eventually have this
+ as the default to allow forwarding between all NIC ports in a single
+ E-switch environment and the dual E-switch mode will likely get
+ deprecated.
+
+ Default: disabled
+
The ``mlx5`` driver supports reloading via ``DEVLINK_CMD_RELOAD``
diff --git a/Documentation/networking/devlink/netdevsim.rst b/Documentation/networking/devlink/netdevsim.rst
index ec5e6d79b2e2..88482725422c 100644
--- a/Documentation/networking/devlink/netdevsim.rst
+++ b/Documentation/networking/devlink/netdevsim.rst
@@ -95,5 +95,5 @@ Driver-specific Traps
* - ``fid_miss``
- ``exception``
- When a packet enters the device it is classified to a filtering
- indentifier (FID) based on the ingress port and VLAN. This trap is used
+ identifier (FID) based on the ingress port and VLAN. This trap is used
to trap packets for which a FID could not be found
diff --git a/Documentation/networking/devlink/prestera.rst b/Documentation/networking/devlink/prestera.rst
index 49409d1d3081..96b1124e614b 100644
--- a/Documentation/networking/devlink/prestera.rst
+++ b/Documentation/networking/devlink/prestera.rst
@@ -138,4 +138,4 @@ Driver-specific Traps
- Drops packets with zero (0) IPV4 source address.
* - ``met_red``
- ``drop``
- - Drops non-conforming packets (dropped by Ingress policer, metering drop), e.g. packet rate exceeded configured bandwith.
+ - Drops non-conforming packets (dropped by Ingress policer, metering drop), e.g. packet rate exceeded configured bandwidth.
diff --git a/Documentation/networking/devlink/sfc.rst b/Documentation/networking/devlink/sfc.rst
new file mode 100644
index 000000000000..db64a1bd9733
--- /dev/null
+++ b/Documentation/networking/devlink/sfc.rst
@@ -0,0 +1,57 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+sfc devlink support
+===================
+
+This document describes the devlink features implemented by the ``sfc``
+device driver for the ef100 device.
+
+Info versions
+=============
+
+The ``sfc`` driver reports the following versions
+
+.. list-table:: devlink info versions implemented
+ :widths: 5 5 90
+
+ * - Name
+ - Type
+ - Description
+ * - ``fw.mgmt.suc``
+ - running
+ - For boards where the management function is split between multiple
+ control units, this is the SUC control unit's firmware version.
+ * - ``fw.mgmt.cmc``
+ - running
+ - For boards where the management function is split between multiple
+ control units, this is the CMC control unit's firmware version.
+ * - ``fpga.rev``
+ - running
+ - FPGA design revision.
+ * - ``fpga.app``
+ - running
+ - Datapath programmable logic version.
+ * - ``fw.app``
+ - running
+ - Datapath software/microcode/firmware version.
+ * - ``coproc.boot``
+ - running
+ - SmartNIC application co-processor (APU) first stage boot loader version.
+ * - ``coproc.uboot``
+ - running
+ - SmartNIC application co-processor (APU) co-operating system loader version.
+ * - ``coproc.main``
+ - running
+ - SmartNIC application co-processor (APU) main operating system version.
+ * - ``coproc.recovery``
+ - running
+ - SmartNIC application co-processor (APU) recovery operating system version.
+ * - ``fw.exprom``
+ - running
+ - Expansion ROM version. For boards where the expansion ROM is split between
+ multiple images (e.g. PXE and UEFI), this is the specifically the PXE boot
+ ROM version.
+ * - ``fw.uefi``
+ - running
+ - UEFI driver version (No UNDI support).
diff --git a/Documentation/networking/dsa/configuration.rst b/Documentation/networking/dsa/configuration.rst
index 827701f8cbfe..d2934c40f0f1 100644
--- a/Documentation/networking/dsa/configuration.rst
+++ b/Documentation/networking/dsa/configuration.rst
@@ -5,7 +5,7 @@ DSA switch configuration from userspace
=======================================
The DSA switch configuration is not integrated into the main userspace
-network configuration suites by now and has to be performed manualy.
+network configuration suites by now and has to be performed manually.
.. _dsa-config-showcases:
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index f10f8eb44255..e1bc6186d7ea 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -106,7 +106,7 @@ modifying a bitmap, the former changes the bit set in mask to values set in
value and preserves the rest; the latter sets the bits set in the bitmap and
clears the rest.
-Compact form: nested (bitset) atrribute contents:
+Compact form: nested (bitset) attribute contents:
============================ ====== ============================
``ETHTOOL_A_BITSET_NOMASK`` flag no mask, only a list
@@ -223,6 +223,8 @@ Userspace to kernel:
``ETHTOOL_MSG_PSE_SET`` set PSE parameters
``ETHTOOL_MSG_PSE_GET`` get PSE parameters
``ETHTOOL_MSG_RSS_GET`` get RSS settings
+ ``ETHTOOL_MSG_MM_GET`` get MAC merge layer state
+ ``ETHTOOL_MSG_MM_SET`` set MAC merge layer parameters
===================================== =================================
Kernel to userspace:
@@ -265,6 +267,7 @@ Kernel to userspace:
``ETHTOOL_MSG_MODULE_GET_REPLY`` transceiver module parameters
``ETHTOOL_MSG_PSE_GET_REPLY`` PSE parameters
``ETHTOOL_MSG_RSS_GET_REPLY`` RSS settings
+ ``ETHTOOL_MSG_MM_GET_REPLY`` MAC merge layer status
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -780,7 +783,7 @@ Kernel response contents:
``ETHTOOL_A_FEATURES_ACTIVE`` bitset diff old vs. new active
==================================== ====== ==========================
-Request constains only one bitset which can be either value/mask pair (request
+Request contains only one bitset which can be either value/mask pair (request
to change specific feature bits and leave the rest) or only a value (request
to set all features to specified set).
@@ -871,6 +874,7 @@ Kernel response contents:
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
+ ``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
==================================== ====== ===========================
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
@@ -880,8 +884,8 @@ separate buffers. The device configuration must make it possible to receive
full memory pages of data, for example because MTU is high enough or through
HW-GRO.
-``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast
-path to send packets. In ordinary path, driver fills descriptors in DRAM and
+``ETHTOOL_A_RINGS_[RX|TX]_PUSH`` flag is used to enable descriptor fast
+path to send or receive packets. In ordinary path, driver fills descriptors in DRAM and
notifies NIC hardware. In fast path, driver pushes descriptors to the device
through MMIO writes, thus reducing the latency. However, enabling this feature
may increase the CPU cost. Drivers may enforce additional per-packet
@@ -903,6 +907,7 @@ Request contents:
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
+ ``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
==================================== ====== ===========================
Kernel checks that requested ring sizes do not exceed limits reported by
@@ -1004,6 +1009,9 @@ Kernel response contents:
``ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL`` u32 rate sampling interval
``ETHTOOL_A_COALESCE_USE_CQE_TX`` bool timer reset mode, Tx
``ETHTOOL_A_COALESCE_USE_CQE_RX`` bool timer reset mode, Rx
+ ``ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES`` u32 max aggr size, Tx
+ ``ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES`` u32 max aggr packets, Tx
+ ``ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS`` u32 time (us), aggr, Tx
=========================================== ====== =======================
Attributes are only included in reply if their value is not zero or the
@@ -1022,6 +1030,17 @@ each packet event resets the timer. In this mode timer is used to force
the interrupt if queue goes idle, while busy queues depend on the packet
limit to trigger interrupts.
+Tx aggregation consists of copying frames into a contiguous buffer so that they
+can be submitted as a single IO operation. ``ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES``
+describes the maximum size in bytes for the submitted buffer.
+``ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES`` describes the maximum number of frames
+that can be aggregated into a single buffer.
+``ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS`` describes the amount of time in usecs,
+counted since the first packet arrival in an aggregated block, after which the
+block should be sent.
+This feature is mainly of interest for specific USB devices which does not cope
+well with frequent small-sized URBs transmissions.
+
COALESCE_SET
============
@@ -1055,6 +1074,9 @@ Request contents:
``ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL`` u32 rate sampling interval
``ETHTOOL_A_COALESCE_USE_CQE_TX`` bool timer reset mode, Tx
``ETHTOOL_A_COALESCE_USE_CQE_RX`` bool timer reset mode, Rx
+ ``ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES`` u32 max aggr size, Tx
+ ``ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES`` u32 max aggr packets, Tx
+ ``ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS`` u32 time (us), aggr, Tx
=========================================== ====== =======================
Request is rejected if it attributes declared as unsupported by driver (i.e.
@@ -1072,8 +1094,18 @@ Request contents:
===================================== ====== ==========================
``ETHTOOL_A_PAUSE_HEADER`` nested request header
+ ``ETHTOOL_A_PAUSE_STATS_SRC`` u32 source of statistics
===================================== ====== ==========================
+``ETHTOOL_A_PAUSE_STATS_SRC`` is optional. It takes values from:
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_mac_stats_src
+
+If absent from the request, stats will be provided with
+an ``ETHTOOL_A_PAUSE_STATS_SRC`` attribute in the response equal to
+``ETHTOOL_MAC_STATS_SRC_AGGREGATE``.
+
Kernel response contents:
===================================== ====== ==========================
@@ -1488,6 +1520,7 @@ Request contents:
======================================= ====== ==========================
``ETHTOOL_A_STATS_HEADER`` nested request header
+ ``ETHTOOL_A_STATS_SRC`` u32 source of statistics
``ETHTOOL_A_STATS_GROUPS`` bitset requested groups of stats
======================================= ====== ==========================
@@ -1496,6 +1529,8 @@ Kernel response contents:
+-----------------------------------+--------+--------------------------------+
| ``ETHTOOL_A_STATS_HEADER`` | nested | reply header |
+-----------------------------------+--------+--------------------------------+
+ | ``ETHTOOL_A_STATS_SRC`` | u32 | source of statistics |
+ +-----------------------------------+--------+--------------------------------+
| ``ETHTOOL_A_STATS_GRP`` | nested | one or more group of stats |
+-+---------------------------------+--------+--------------------------------+
| | ``ETHTOOL_A_STATS_GRP_ID`` | u32 | group ID - ``ETHTOOL_STATS_*`` |
@@ -1557,6 +1592,11 @@ Low and high bounds are inclusive, for example:
etherStatsPkts512to1023Octets 512 1023
============================= ==== ====
+``ETHTOOL_A_STATS_SRC`` is optional. Similar to ``PAUSE_GET``, it takes values
+from ``enum ethtool_mac_stats_src``. If absent from the request, stats will be
+provided with an ``ETHTOOL_A_STATS_SRC`` attribute in the response equal to
+``ETHTOOL_MAC_STATS_SRC_AGGREGATE``.
+
PHC_VCLOCKS_GET
===============
@@ -1716,6 +1756,225 @@ being used. Current supported options are toeplitz, xor or crc32.
ETHTOOL_A_RSS_INDIR attribute returns RSS indrection table where each byte
indicates queue number.
+PLCA_GET_CFG
+============
+
+Gets the IEEE 802.3cg-2019 Clause 148 Physical Layer Collision Avoidance
+(PLCA) Reconciliation Sublayer (RS) attributes.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_PLCA_HEADER`` nested request header
+ ===================================== ====== ==========================
+
+Kernel response contents:
+
+ ====================================== ====== =============================
+ ``ETHTOOL_A_PLCA_HEADER`` nested reply header
+ ``ETHTOOL_A_PLCA_VERSION`` u16 Supported PLCA management
+ interface standard/version
+ ``ETHTOOL_A_PLCA_ENABLED`` u8 PLCA Admin State
+ ``ETHTOOL_A_PLCA_NODE_ID`` u32 PLCA unique local node ID
+ ``ETHTOOL_A_PLCA_NODE_CNT`` u32 Number of PLCA nodes on the
+ network, including the
+ coordinator
+ ``ETHTOOL_A_PLCA_TO_TMR`` u32 Transmit Opportunity Timer
+ value in bit-times (BT)
+ ``ETHTOOL_A_PLCA_BURST_CNT`` u32 Number of additional packets
+ the node is allowed to send
+ within a single TO
+ ``ETHTOOL_A_PLCA_BURST_TMR`` u32 Time to wait for the MAC to
+ transmit a new frame before
+ terminating the burst
+ ====================================== ====== =============================
+
+When set, the optional ``ETHTOOL_A_PLCA_VERSION`` attribute indicates which
+standard and version the PLCA management interface complies to. When not set,
+the interface is vendor-specific and (possibly) supplied by the driver.
+The OPEN Alliance SIG specifies a standard register map for 10BASE-T1S PHYs
+embedding the PLCA Reconcialiation Sublayer. See "10BASE-T1S PLCA Management
+Registers" at https://www.opensig.org/about/specifications/.
+
+When set, the optional ``ETHTOOL_A_PLCA_ENABLED`` attribute indicates the
+administrative state of the PLCA RS. When not set, the node operates in "plain"
+CSMA/CD mode. This option is corresponding to ``IEEE 802.3cg-2019`` 30.16.1.1.1
+aPLCAAdminState / 30.16.1.2.1 acPLCAAdminControl.
+
+When set, the optional ``ETHTOOL_A_PLCA_NODE_ID`` attribute indicates the
+configured local node ID of the PHY. This ID determines which transmit
+opportunity (TO) is reserved for the node to transmit into. This option is
+corresponding to ``IEEE 802.3cg-2019`` 30.16.1.1.4 aPLCALocalNodeID. The valid
+range for this attribute is [0 .. 255] where 255 means "not configured".
+
+When set, the optional ``ETHTOOL_A_PLCA_NODE_CNT`` attribute indicates the
+configured maximum number of PLCA nodes on the mixing-segment. This number
+determines the total number of transmit opportunities generated during a
+PLCA cycle. This attribute is relevant only for the PLCA coordinator, which is
+the node with aPLCALocalNodeID set to 0. Follower nodes ignore this setting.
+This option is corresponding to ``IEEE 802.3cg-2019`` 30.16.1.1.3
+aPLCANodeCount. The valid range for this attribute is [1 .. 255].
+
+When set, the optional ``ETHTOOL_A_PLCA_TO_TMR`` attribute indicates the
+configured value of the transmit opportunity timer in bit-times. This value
+must be set equal across all nodes sharing the medium for PLCA to work
+correctly. This option is corresponding to ``IEEE 802.3cg-2019`` 30.16.1.1.5
+aPLCATransmitOpportunityTimer. The valid range for this attribute is
+[0 .. 255].
+
+When set, the optional ``ETHTOOL_A_PLCA_BURST_CNT`` attribute indicates the
+configured number of extra packets that the node is allowed to send during a
+single transmit opportunity. By default, this attribute is 0, meaning that
+the node can only send a single frame per TO. When greater than 0, the PLCA RS
+keeps the TO after any transmission, waiting for the MAC to send a new frame
+for up to aPLCABurstTimer BTs. This can only happen a number of times per PLCA
+cycle up to the value of this parameter. After that, the burst is over and the
+normal counting of TOs resumes. This option is corresponding to
+``IEEE 802.3cg-2019`` 30.16.1.1.6 aPLCAMaxBurstCount. The valid range for this
+attribute is [0 .. 255].
+
+When set, the optional ``ETHTOOL_A_PLCA_BURST_TMR`` attribute indicates how
+many bit-times the PLCA RS waits for the MAC to initiate a new transmission
+when aPLCAMaxBurstCount is greater than 0. If the MAC fails to send a new
+frame within this time, the burst ends and the counting of TOs resumes.
+Otherwise, the new frame is sent as part of the current burst. This option
+is corresponding to ``IEEE 802.3cg-2019`` 30.16.1.1.7 aPLCABurstTimer. The
+valid range for this attribute is [0 .. 255]. Although, the value should be
+set greater than the Inter-Frame-Gap (IFG) time of the MAC (plus some margin)
+for PLCA burst mode to work as intended.
+
+PLCA_SET_CFG
+============
+
+Sets PLCA RS parameters.
+
+Request contents:
+
+ ====================================== ====== =============================
+ ``ETHTOOL_A_PLCA_HEADER`` nested request header
+ ``ETHTOOL_A_PLCA_ENABLED`` u8 PLCA Admin State
+ ``ETHTOOL_A_PLCA_NODE_ID`` u8 PLCA unique local node ID
+ ``ETHTOOL_A_PLCA_NODE_CNT`` u8 Number of PLCA nodes on the
+ netkork, including the
+ coordinator
+ ``ETHTOOL_A_PLCA_TO_TMR`` u8 Transmit Opportunity Timer
+ value in bit-times (BT)
+ ``ETHTOOL_A_PLCA_BURST_CNT`` u8 Number of additional packets
+ the node is allowed to send
+ within a single TO
+ ``ETHTOOL_A_PLCA_BURST_TMR`` u8 Time to wait for the MAC to
+ transmit a new frame before
+ terminating the burst
+ ====================================== ====== =============================
+
+For a description of each attribute, see ``PLCA_GET_CFG``.
+
+PLCA_GET_STATUS
+===============
+
+Gets PLCA RS status information.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_PLCA_HEADER`` nested request header
+ ===================================== ====== ==========================
+
+Kernel response contents:
+
+ ====================================== ====== =============================
+ ``ETHTOOL_A_PLCA_HEADER`` nested reply header
+ ``ETHTOOL_A_PLCA_STATUS`` u8 PLCA RS operational status
+ ====================================== ====== =============================
+
+When set, the ``ETHTOOL_A_PLCA_STATUS`` attribute indicates whether the node is
+detecting the presence of the BEACON on the network. This flag is
+corresponding to ``IEEE 802.3cg-2019`` 30.16.1.1.2 aPLCAStatus.
+
+MM_GET
+======
+
+Retrieve 802.3 MAC Merge parameters.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_MM_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ================================= ====== ===================================
+ ``ETHTOOL_A_MM_HEADER`` nested request header
+ ``ETHTOOL_A_MM_PMAC_ENABLED`` bool set if RX of preemptible and SMD-V
+ frames is enabled
+ ``ETHTOOL_A_MM_TX_ENABLED`` bool set if TX of preemptible frames is
+ administratively enabled (might be
+ inactive if verification failed)
+ ``ETHTOOL_A_MM_TX_ACTIVE`` bool set if TX of preemptible frames is
+ operationally enabled
+ ``ETHTOOL_A_MM_TX_MIN_FRAG_SIZE`` u32 minimum size of transmitted
+ non-final fragments, in octets
+ ``ETHTOOL_A_MM_RX_MIN_FRAG_SIZE`` u32 minimum size of received non-final
+ fragments, in octets
+ ``ETHTOOL_A_MM_VERIFY_ENABLED`` bool set if TX of SMD-V frames is
+ administratively enabled
+ ``ETHTOOL_A_MM_VERIFY_STATUS`` u8 state of the verification function
+ ``ETHTOOL_A_MM_VERIFY_TIME`` u32 delay between verification attempts
+ ``ETHTOOL_A_MM_MAX_VERIFY_TIME``` u32 maximum verification interval
+ supported by device
+ ``ETHTOOL_A_MM_STATS`` nested IEEE 802.3-2018 subclause 30.14.1
+ oMACMergeEntity statistics counters
+ ================================= ====== ===================================
+
+The attributes are populated by the device driver through the following
+structure:
+
+.. kernel-doc:: include/linux/ethtool.h
+ :identifiers: ethtool_mm_state
+
+The ``ETHTOOL_A_MM_VERIFY_STATUS`` will report one of the values from
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_mm_verify_status
+
+If ``ETHTOOL_A_MM_VERIFY_ENABLED`` was passed as false in the ``MM_SET``
+command, ``ETHTOOL_A_MM_VERIFY_STATUS`` will report either
+``ETHTOOL_MM_VERIFY_STATUS_INITIAL`` or ``ETHTOOL_MM_VERIFY_STATUS_DISABLED``,
+otherwise it should report one of the other states.
+
+It is recommended that drivers start with the pMAC disabled, and enable it upon
+user space request. It is also recommended that user space does not depend upon
+the default values from ``ETHTOOL_MSG_MM_GET`` requests.
+
+``ETHTOOL_A_MM_STATS`` are reported if ``ETHTOOL_FLAG_STATS`` was set in
+``ETHTOOL_A_HEADER_FLAGS``. The attribute will be empty if driver did not
+report any statistics. Drivers fill in the statistics in the following
+structure:
+
+.. kernel-doc:: include/linux/ethtool.h
+ :identifiers: ethtool_mm_stats
+
+MM_SET
+======
+
+Modifies the configuration of the 802.3 MAC Merge layer.
+
+Request contents:
+
+ ================================= ====== ==========================
+ ``ETHTOOL_A_MM_VERIFY_TIME`` u32 see MM_GET description
+ ``ETHTOOL_A_MM_VERIFY_ENABLED`` bool see MM_GET description
+ ``ETHTOOL_A_MM_TX_ENABLED`` bool see MM_GET description
+ ``ETHTOOL_A_MM_PMAC_ENABLED`` bool see MM_GET description
+ ``ETHTOOL_A_MM_TX_MIN_FRAG_SIZE`` u32 see MM_GET description
+ ================================= ====== ==========================
+
+The attributes are propagated to the driver through the following structure:
+
+.. kernel-doc:: include/linux/ethtool.h
+ :identifiers: ethtool_mm_cfg
+
Request translation
===================
@@ -1817,4 +2076,9 @@ are netlink only.
n/a ``ETHTOOL_MSG_PHC_VCLOCKS_GET``
n/a ``ETHTOOL_MSG_MODULE_GET``
n/a ``ETHTOOL_MSG_MODULE_SET``
+ n/a ``ETHTOOL_MSG_PLCA_GET_CFG``
+ n/a ``ETHTOOL_MSG_PLCA_SET_CFG``
+ n/a ``ETHTOOL_MSG_PLCA_GET_STATUS``
+ n/a ``ETHTOOL_MSG_MM_GET``
+ n/a ``ETHTOOL_MSG_MM_SET``
=================================== =====================================
diff --git a/Documentation/networking/gtp.rst b/Documentation/networking/gtp.rst
index 1563fb94b289..9a7835cc1437 100644
--- a/Documentation/networking/gtp.rst
+++ b/Documentation/networking/gtp.rst
@@ -162,7 +162,7 @@ Local GTP-U entity and tunnel identification
GTP-U uses UDP for transporting PDU's. The receiving UDP port is 2152
for GTPv1-U and 3386 for GTPv0-U.
-There is only one GTP-U entity (and therefor SGSN/GGSN/S-GW/PDN-GW
+There is only one GTP-U entity (and therefore SGSN/GGSN/S-GW/PDN-GW
instance) per IP address. Tunnel Endpoint Identifier (TEID) are unique
per GTP-U entity.
diff --git a/Documentation/networking/ieee802154.rst b/Documentation/networking/ieee802154.rst
index f27856d77c8b..c652d383fe10 100644
--- a/Documentation/networking/ieee802154.rst
+++ b/Documentation/networking/ieee802154.rst
@@ -70,7 +70,7 @@ Like with WiFi, there are several types of devices implementing IEEE 802.15.4.
exports a management (e.g. MLME) and data API.
2) 'SoftMAC' or just radio. These types of devices are just radio transceivers
possibly with some kinds of acceleration like automatic CRC computation and
-comparation, automagic ACK handling, address matching, etc.
+comparison, automagic ACK handling, address matching, etc.
Those types of devices require different approach to be hooked into Linux kernel.
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 4f2d1f682a18..4ddcae33c336 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -120,6 +120,7 @@ Contents:
xfrm_proc
xfrm_sync
xfrm_sysctl
+ xdp-rx-metadata
.. only:: subproject and html
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 7fbd060d6047..87dd1c5283e6 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -50,7 +50,7 @@ ip_no_pmtu_disc - INTEGER
Default: FALSE
min_pmtu - INTEGER
- default 552 - minimum Path MTU. Unless this is changed mannually,
+ default 552 - minimum Path MTU. Unless this is changed manually,
each cached pmtu will never be lower than this setting.
ip_forward_use_pmtu - BOOLEAN
@@ -156,6 +156,9 @@ route/max_size - INTEGER
From linux kernel 3.6 onwards, this is deprecated for ipv4
as route cache is no longer used.
+ From linux kernel 6.3 onwards, this is deprecated for ipv6
+ as garbage collection manages cached route entries.
+
neigh/default/gc_thresh1 - INTEGER
Minimum number of entries to keep. Garbage collector will not
purge entries if there are fewer than this number.
@@ -1589,6 +1592,14 @@ proxy_arp_pvlan - BOOLEAN
Hewlett-Packard call it Source-Port filtering or port-isolation.
Ericsson call it MAC-Forced Forwarding (RFC Draft).
+proxy_delay - INTEGER
+ Delay proxy response.
+
+ Delay response to a neighbor solicitation when proxy_arp
+ or proxy_ndp is enabled. A random value between [0, proxy_delay)
+ will be chosen, setting to zero means reply with no delay.
+ Value in jiffies. Defaults to 80.
+
shared_media - BOOLEAN
Send(router) or accept(host) RFC1620 shared media redirects.
Overrides secure_redirects.
@@ -2075,7 +2086,7 @@ skip_notify_on_dev_down - BOOLEAN
nexthop_compat_mode - BOOLEAN
New nexthop API provides a means for managing nexthops independent of
- prefixes. Backwards compatibilty with old route format is enabled by
+ prefixes. Backwards compatibility with old route format is enabled by
default which means route dumps and notifications contain the new
nexthop attribute but also the full, expanded nexthop definition.
Further, updates or deletes of a nexthop configuration generate route
@@ -2808,7 +2819,7 @@ pf_expose - INTEGER
can be got via SCTP_GET_PEER_ADDR_INFO sockopt; When it's enabled,
a SCTP_PEER_ADDR_CHANGE event will be sent for a transport becoming
SCTP_PF state and a SCTP_PF-state transport info can be got via
- SCTP_GET_PEER_ADDR_INFO sockopt; When it's diabled, no
+ SCTP_GET_PEER_ADDR_INFO sockopt; When it's disabled, no
SCTP_PEER_ADDR_CHANGE event will be sent and it returns -EACCES when
trying to get a SCTP_PF-state transport info via SCTP_GET_PEER_ADDR_INFO
sockopt.
diff --git a/Documentation/networking/ipvlan.rst b/Documentation/networking/ipvlan.rst
index 0000c1d383bc..895d0ccfd596 100644
--- a/Documentation/networking/ipvlan.rst
+++ b/Documentation/networking/ipvlan.rst
@@ -61,7 +61,7 @@ e.g.
IPvlan has two modes of operation - L2 and L3. For a given master device,
you can select one of these two modes and all slaves on that master will
operate in the same (selected) mode. The RX mode is almost identical except
-that in L3 mode the slaves wont receive any multicast / broadcast traffic.
+that in L3 mode the slaves won't receive any multicast / broadcast traffic.
L3 mode is more restrictive since routing is controlled from the other (mostly)
default namespace.
diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst
index b705d2801e9c..e4bd7aa1f5aa 100644
--- a/Documentation/networking/j1939.rst
+++ b/Documentation/networking/j1939.rst
@@ -116,7 +116,7 @@ format, the Group Extension is set in the PS-field.
----------------------------------------
23 ... 16 15 ... 8
============== ========================
- F0h ... FFh GE (Group Extenstion)
+ F0h ... FFh GE (Group Extension)
============== ========================
On the other hand, when using PDU1 format, the PS-field contains a so-called
diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst
index 3a662f2b4d6e..f4e1b4e07adc 100644
--- a/Documentation/networking/net_failover.rst
+++ b/Documentation/networking/net_failover.rst
@@ -90,7 +90,7 @@ virtio-net interface, and ens11 is the slave 'primary' VF passthrough interface.
One point to note here is that some user space network configuration daemons
like systemd-networkd, ifupdown, etc, do not understand the 'net_failover'
device; and on the first boot, the VM might end up with both 'failover' device
-and VF accquiring IP addresses (either same or different) from the DHCP server.
+and VF acquiring IP addresses (either same or different) from the DHCP server.
This will result in lack of connectivity to the VM. So some tweaks might be
needed to these network configuration daemons to make sure that an IP is
received only on the 'failover' device.
diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst
index 1f5c4a04027c..dd0518e002f6 100644
--- a/Documentation/networking/netconsole.rst
+++ b/Documentation/networking/netconsole.rst
@@ -167,7 +167,7 @@ following format which is the same as /dev/kmsg::
Non printable characters in <message text> are escaped using "\xff"
notation. If the message contains optional dictionary, verbatim
-newline is used as the delimeter.
+newline is used as the delimiter.
If a message doesn't fit in certain number of bytes (currently 1000),
the message is split into multiple fragments by netconsole. These
diff --git a/Documentation/networking/page_pool.rst b/Documentation/networking/page_pool.rst
index 5db8c263b0c6..30f1344e7cca 100644
--- a/Documentation/networking/page_pool.rst
+++ b/Documentation/networking/page_pool.rst
@@ -11,7 +11,7 @@ Basic use involves replacing alloc_pages() calls with the
page_pool_alloc_pages() call. Drivers should use page_pool_dev_alloc_pages()
replacing dev_alloc_pages().
-API keeps track of inflight pages, in order to let API user know
+API keeps track of in-flight pages, in order to let API user know
when it is safe to free a page_pool object. Thus, API users
must run page_pool_release_page() when a page is leaving the page_pool or
call page_pool_put_page() where appropriate in order to maintain correct
@@ -19,7 +19,7 @@ accounting.
API user must call page_pool_put_page() once on a page, as it
will either recycle the page, or in case of refcnt > 1, it will
-release the DMA mapping and inflight state accounting.
+release the DMA mapping and in-flight state accounting.
Architecture overview
=====================
@@ -88,7 +88,7 @@ a page will cause no race conditions is enough.
directly into the pool fast cache.
* page_pool_release_page(): Unmap the page (if mapped) and account for it on
- inflight counters.
+ in-flight counters.
* page_pool_dev_alloc_pages(): Get a page from the page allocator or page_pool
caches.
diff --git a/Documentation/networking/phonet.rst b/Documentation/networking/phonet.rst
index 8668dcbc5e6a..d705cc5b09fc 100644
--- a/Documentation/networking/phonet.rst
+++ b/Documentation/networking/phonet.rst
@@ -131,7 +131,7 @@ Phonet resources, as follow::
Subscription is similarly cancelled using the SIOCPNDELRESOURCE I/O
control request, or when the socket is closed.
-Note that no more than one socket can be subcribed to any given
+Note that no more than one socket can be subscribed to any given
resource at a time. If not, ioctl() will return EBUSY.
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index d11329a08984..b7ac4c64cf67 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -315,7 +315,7 @@ Some of the interface modes are described below:
only the port id, but also so-called "extensions". The only documented
extension so-far in the specification is the inclusion of timestamps, for
PTP-enabled PHYs. This mode isn't compatible with QSGMII, but offers the
- same capabilities in terms of link speed and negociation.
+ same capabilities in terms of link speed and negotiation.
``PHY_INTERFACE_MODE_1000BASEKX``
This is 1000BASE-X as defined by IEEE 802.3 Clause 36 with Clause 73
diff --git a/Documentation/networking/regulatory.rst b/Documentation/networking/regulatory.rst
index 16782a95b74a..5e42c8a175c3 100644
--- a/Documentation/networking/regulatory.rst
+++ b/Documentation/networking/regulatory.rst
@@ -66,7 +66,7 @@ An example::
iw reg set CR
This will request the kernel to set the regulatory domain to
-the specificied alpha2. The kernel in turn will then ask userspace
+the specified alpha2. The kernel in turn will then ask userspace
to provide a regulatory domain for the alpha2 specified by the user
by sending a uevent.
@@ -158,7 +158,7 @@ kmalloc() a structure big enough to hold your regulatory domain
structure and you should then fill it with your data. Finally you simply
call regulatory_hint() with the regulatory domain structure in it.
-Bellow is a simple example, with a regulatory domain cached using the stack.
+Below is a simple example, with a regulatory domain cached using the stack.
Your implementation may vary (read EEPROM cache instead, for example).
Example cache of some regulatory domain::
diff --git a/Documentation/networking/rxrpc.rst b/Documentation/networking/rxrpc.rst
index e1af54424192..ec1323d92c96 100644
--- a/Documentation/networking/rxrpc.rst
+++ b/Documentation/networking/rxrpc.rst
@@ -1069,7 +1069,7 @@ The kernel interface functions are as follows:
This value can be used to determine if the remote client has been
restarted as it shouldn't change otherwise.
- (#) Set the maxmimum lifespan on a call::
+ (#) Set the maximum lifespan on a call::
void rxrpc_kernel_set_max_life(struct socket *sock,
struct rxrpc_call *call,
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
index 423d138b5ff3..213637474478 100644
--- a/Documentation/networking/snmp_counter.rst
+++ b/Documentation/networking/snmp_counter.rst
@@ -980,7 +980,7 @@ How many reply packets of the SYN cookies the TCP stack receives.
The MSS decoded from the SYN cookie is invalid. When this counter is
updated, the received packet won't be treated as a SYN cookie and the
-TcpExtSyncookiesRecv counter wont be updated.
+TcpExtSyncookiesRecv counter won't be updated.
Challenge ACK
=============
@@ -1681,7 +1681,7 @@ RST to nstat-b::
nstatuser@nstat-a:~$ sudo iptables -A INPUT -p tcp --sport 9000 -j DROP
-Send 3 SYN repeatly to nstat-b::
+Send 3 SYN repeatedly to nstat-b::
nstatuser@nstat-a:~$ for i in {1..3}; do sudo tcpreplay -i ens3 /tmp/syn_fixcsum.pcap; done
diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst
index c9aeb70dafa2..551b3cc29a41 100644
--- a/Documentation/networking/statistics.rst
+++ b/Documentation/networking/statistics.rst
@@ -171,6 +171,7 @@ statistics are supported in the following commands:
- `ETHTOOL_MSG_PAUSE_GET`
- `ETHTOOL_MSG_FEC_GET`
+ - `ETHTOOL_MSG_MM_GET`
debugfs
-------
diff --git a/Documentation/networking/sysfs-tagging.rst b/Documentation/networking/sysfs-tagging.rst
index 83647e10c207..65307130ab63 100644
--- a/Documentation/networking/sysfs-tagging.rst
+++ b/Documentation/networking/sysfs-tagging.rst
@@ -43,6 +43,6 @@ Users of this interface:
- current_ns() which returns current's namespace
- netlink_ns() which returns a socket's namespace
- - initial_ns() which returns the initial namesapce
+ - initial_ns() which returns the initial namespace
- call kobj_ns_exit() when an individual tag is no longer valid
diff --git a/Documentation/networking/xdp-rx-metadata.rst b/Documentation/networking/xdp-rx-metadata.rst
new file mode 100644
index 000000000000..aac63fc2d08b
--- /dev/null
+++ b/Documentation/networking/xdp-rx-metadata.rst
@@ -0,0 +1,110 @@
+===============
+XDP RX Metadata
+===============
+
+This document describes how an eXpress Data Path (XDP) program can access
+hardware metadata related to a packet using a set of helper functions,
+and how it can pass that metadata on to other consumers.
+
+General Design
+==============
+
+XDP has access to a set of kfuncs to manipulate the metadata in an XDP frame.
+Every device driver that wishes to expose additional packet metadata can
+implement these kfuncs. The set of kfuncs is declared in ``include/net/xdp.h``
+via ``XDP_METADATA_KFUNC_xxx``.
+
+Currently, the following kfuncs are supported. In the future, as more
+metadata is supported, this set will grow:
+
+.. kernel-doc:: net/core/xdp.c
+ :identifiers: bpf_xdp_metadata_rx_timestamp bpf_xdp_metadata_rx_hash
+
+An XDP program can use these kfuncs to read the metadata into stack
+variables for its own consumption. Or, to pass the metadata on to other
+consumers, an XDP program can store it into the metadata area carried
+ahead of the packet.
+
+Not all kfuncs have to be implemented by the device driver; when not
+implemented, the default ones that return ``-EOPNOTSUPP`` will be used.
+
+Within an XDP frame, the metadata layout (accessed via ``xdp_buff``) is
+as follows::
+
+ +----------+-----------------+------+
+ | headroom | custom metadata | data |
+ +----------+-----------------+------+
+ ^ ^
+ | |
+ xdp_buff->data_meta xdp_buff->data
+
+An XDP program can store individual metadata items into this ``data_meta``
+area in whichever format it chooses. Later consumers of the metadata
+will have to agree on the format by some out of band contract (like for
+the AF_XDP use case, see below).
+
+AF_XDP
+======
+
+:doc:`af_xdp` use-case implies that there is a contract between the BPF
+program that redirects XDP frames into the ``AF_XDP`` socket (``XSK``) and
+the final consumer. Thus the BPF program manually allocates a fixed number of
+bytes out of metadata via ``bpf_xdp_adjust_meta`` and calls a subset
+of kfuncs to populate it. The userspace ``XSK`` consumer computes
+``xsk_umem__get_data() - METADATA_SIZE`` to locate that metadata.
+Note, ``xsk_umem__get_data`` is defined in ``libxdp`` and
+``METADATA_SIZE`` is an application-specific constant (``AF_XDP`` receive
+descriptor does _not_ explicitly carry the size of the metadata).
+
+Here is the ``AF_XDP`` consumer layout (note missing ``data_meta`` pointer)::
+
+ +----------+-----------------+------+
+ | headroom | custom metadata | data |
+ +----------+-----------------+------+
+ ^
+ |
+ rx_desc->address
+
+XDP_PASS
+========
+
+This is the path where the packets processed by the XDP program are passed
+into the kernel. The kernel creates the ``skb`` out of the ``xdp_buff``
+contents. Currently, every driver has custom kernel code to parse
+the descriptors and populate ``skb`` metadata when doing this ``xdp_buff->skb``
+conversion, and the XDP metadata is not used by the kernel when building
+``skbs``. However, TC-BPF programs can access the XDP metadata area using
+the ``data_meta`` pointer.
+
+In the future, we'd like to support a case where an XDP program
+can override some of the metadata used for building ``skbs``.
+
+bpf_redirect_map
+================
+
+``bpf_redirect_map`` can redirect the frame to a different device.
+Some devices (like virtual ethernet links) support running a second XDP
+program after the redirect. However, the final consumer doesn't have
+access to the original hardware descriptor and can't access any of
+the original metadata. The same applies to XDP programs installed
+into devmaps and cpumaps.
+
+This means that for redirected packets only custom metadata is
+currently supported, which has to be prepared by the initial XDP program
+before redirect. If the frame is eventually passed to the kernel, the
+``skb`` created from such a frame won't have any hardware metadata populated
+in its ``skb``. If such a packet is later redirected into an ``XSK``,
+that will also only have access to the custom metadata.
+
+bpf_tail_call
+=============
+
+Adding programs that access metadata kfuncs to the ``BPF_MAP_TYPE_PROG_ARRAY``
+is currently not supported.
+
+Example
+=======
+
+See ``tools/testing/selftests/bpf/progs/xdp_metadata.c`` and
+``tools/testing/selftests/bpf/prog_tests/xdp_metadata.c`` for an example of
+BPF program that handles XDP metadata.
diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm_device.rst
index c43ace79e320..83abdfef4ec3 100644
--- a/Documentation/networking/xfrm_device.rst
+++ b/Documentation/networking/xfrm_device.rst
@@ -64,7 +64,7 @@ Callbacks to implement
/* from include/linux/netdevice.h */
struct xfrmdev_ops {
/* Crypto and Packet offload callbacks */
- int (*xdo_dev_state_add) (struct xfrm_state *x);
+ int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
void (*xdo_dev_state_delete) (struct xfrm_state *x);
void (*xdo_dev_state_free) (struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
@@ -73,7 +73,7 @@ Callbacks to implement
/* Solely packet offload callbacks */
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
- int (*xdo_dev_policy_add) (struct xfrm_policy *x);
+ int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
};
diff --git a/Documentation/userspace-api/netlink/c-code-gen.rst b/Documentation/userspace-api/netlink/c-code-gen.rst
new file mode 100644
index 000000000000..89de42c13350
--- /dev/null
+++ b/Documentation/userspace-api/netlink/c-code-gen.rst
@@ -0,0 +1,107 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+
+==============================
+Netlink spec C code generation
+==============================
+
+This document describes how Netlink specifications are used to render
+C code (uAPI, policies etc.). It also defines the additional properties
+allowed in older families by the ``genetlink-c`` protocol level,
+to control the naming.
+
+For brevity this document refers to ``name`` properties of various
+objects by the object type. For example ``$attr`` is the value
+of ``name`` in an attribute, and ``$family`` is the name of the
+family (the global ``name`` property).
+
+The upper case is used to denote literal values, e.g. ``$family-CMD``
+means the concatenation of ``$family``, a dash character, and the literal
+``CMD``.
+
+The names of ``#defines`` and enum values are always converted to upper case,
+and with dashes (``-``) replaced by underscores (``_``).
+
+If the constructed name is a C keyword, an extra underscore is
+appended (``do`` -> ``do_``).
+
+Globals
+=======
+
+``c-family-name`` controls the name of the ``#define`` for the family
+name, default is ``$family-FAMILY-NAME``.
+
+``c-version-name`` controls the name of the ``#define`` for the version
+of the family, default is ``$family-FAMILY-VERSION``.
+
+``max-by-define`` selects if max values for enums are defined as a
+``#define`` rather than inside the enum.
+
+Definitions
+===========
+
+Constants
+---------
+
+Every constant is rendered as a ``#define``.
+The name of the constant is ``$family-$constant`` and the value
+is rendered as a string or integer according to its type in the spec.
+
+Enums and flags
+---------------
+
+Enums are named ``$family-$enum``. The full name can be set directly
+or suppressed by specifying the ``enum-name`` property.
+Default entry name is ``$family-$enum-$entry``.
+If ``name-prefix`` is specified it replaces the ``$family-$enum``
+portion of the entry name.
+
+Boolean ``render-max`` controls creation of the max values
+(which are enabled by default for attribute enums).
+
+Attributes
+==========
+
+Each attribute set (excluding fractional sets) is rendered as an enum.
+
+Attribute enums are traditionally unnamed in netlink headers.
+If naming is desired ``enum-name`` can be used to specify the name.
+
+The default attribute name prefix is ``$family-A`` if the name of the set
+is the same as the name of the family and ``$family-A-$set`` if the names
+differ. The prefix can be overridden by the ``name-prefix`` property of a set.
+The rest of the section will refer to the prefix as ``$pfx``.
+
+Attributes are named ``$pfx-$attribute``.
+
+Attribute enums end with two special values ``__$pfx-MAX`` and ``$pfx-MAX``
+which are used for sizing attribute tables.
+These two names can be specified directly with the ``attr-cnt-name``
+and ``attr-max-name`` properties respectively.
+
+If ``max-by-define`` is set to ``true`` at the global level ``attr-max-name``
+will be specified as a ``#define`` rather than an enum value.
+
+Operations
+==========
+
+Operations are named ``$family-CMD-$operation``.
+If ``name-prefix`` is specified it replaces the ``$family-CMD``
+portion of the name.
+
+Similarly to attribute enums operation enums end with special count and max
+attributes. For operations those attributes can be renamed with
+``cmd-cnt-name`` and ``cmd-max-name``. Max will be a define if ``max-by-define``
+is ``true``.
+
+Multicast groups
+================
+
+Each multicast group gets a define rendered into the kernel uAPI header.
+The name of the define is ``$family-MCGRP-$group``, and can be overwritten
+with the ``c-define-name`` property.
+
+Code generation
+===============
+
+uAPI header is assumed to come from ``<linux/$family.h>`` in the default header
+search path. It can be changed using the ``uapi-header`` global property.
diff --git a/Documentation/userspace-api/netlink/genetlink-legacy.rst b/Documentation/userspace-api/netlink/genetlink-legacy.rst
new file mode 100644
index 000000000000..3bf0bcdf21d8
--- /dev/null
+++ b/Documentation/userspace-api/netlink/genetlink-legacy.rst
@@ -0,0 +1,178 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+
+=================================================================
+Netlink specification support for legacy Generic Netlink families
+=================================================================
+
+This document describes the many additional quirks and properties
+required to describe older Generic Netlink families which form
+the ``genetlink-legacy`` protocol level.
+
+The spec is a work in progress, some of the quirks are just documented
+for future reference.
+
+Specification (defined)
+=======================
+
+Attribute type nests
+--------------------
+
+New Netlink families should use ``multi-attr`` to define arrays.
+Older families (e.g. ``genetlink`` control family) attempted to
+define array types reusing attribute type to carry information.
+
+For reference the ``multi-attr`` array may look like this::
+
+ [ARRAY-ATTR]
+ [INDEX (optionally)]
+ [MEMBER1]
+ [MEMBER2]
+ [SOME-OTHER-ATTR]
+ [ARRAY-ATTR]
+ [INDEX (optionally)]
+ [MEMBER1]
+ [MEMBER2]
+
+where ``ARRAY-ATTR`` is the array entry type.
+
+array-nest
+~~~~~~~~~~
+
+``array-nest`` creates the following structure::
+
+ [SOME-OTHER-ATTR]
+ [ARRAY-ATTR]
+ [ENTRY]
+ [MEMBER1]
+ [MEMBER2]
+ [ENTRY]
+ [MEMBER1]
+ [MEMBER2]
+
+It wraps the entire array in an extra attribute (hence limiting its size
+to 64kB). The ``ENTRY`` nests are special and have the index of the entry
+as their type instead of normal attribute type.
+
+type-value
+~~~~~~~~~~
+
+``type-value`` is a construct which uses attribute types to carry
+information about a single object (often used when array is dumped
+entry-by-entry).
+
+``type-value`` can have multiple levels of nesting, for example
+genetlink's policy dumps create the following structures::
+
+ [POLICY-IDX]
+ [ATTR-IDX]
+ [POLICY-INFO-ATTR1]
+ [POLICY-INFO-ATTR2]
+
+Where the first level of nest has the policy index as it's attribute
+type, it contains a single nest which has the attribute index as its
+type. Inside the attr-index nest are the policy attributes. Modern
+Netlink families should have instead defined this as a flat structure,
+the nesting serves no good purpose here.
+
+Operations
+==========
+
+Enum (message ID) model
+-----------------------
+
+unified
+~~~~~~~
+
+Modern families use the ``unified`` message ID model, which uses
+a single enumeration for all messages within family. Requests and
+responses share the same message ID. Notifications have separate
+IDs from the same space. For example given the following list
+of operations:
+
+.. code-block:: yaml
+
+ -
+ name: a
+ value: 1
+ do: ...
+ -
+ name: b
+ do: ...
+ -
+ name: c
+ value: 4
+ notify: a
+ -
+ name: d
+ do: ...
+
+Requests and responses for operation ``a`` will have the ID of 1,
+the requests and responses of ``b`` - 2 (since there is no explicit
+``value`` it's previous operation ``+ 1``). Notification ``c`` will
+use the ID of 4, operation ``d`` 5 etc.
+
+directional
+~~~~~~~~~~~
+
+The ``directional`` model splits the ID assignment by the direction of
+the message. Messages from and to the kernel can't be confused with
+each other so this conserves the ID space (at the cost of making
+the programming more cumbersome).
+
+In this case ``value`` attribute should be specified in the ``request``
+``reply`` sections of the operations (if an operation has both ``do``
+and ``dump`` the IDs are shared, ``value`` should be set in ``do``).
+For notifications the ``value`` is provided at the op level but it
+only allocates a ``reply`` (i.e. a "from-kernel" ID). Let's look
+at an example:
+
+.. code-block:: yaml
+
+ -
+ name: a
+ do:
+ request:
+ value: 2
+ attributes: ...
+ reply:
+ value: 1
+ attributes: ...
+ -
+ name: b
+ notify: a
+ -
+ name: c
+ notify: a
+ value: 7
+ -
+ name: d
+ do: ...
+
+In this case ``a`` will use 2 when sending the message to the kernel
+and expects message with ID 1 in response. Notification ``b`` allocates
+a "from-kernel" ID which is 2. ``c`` allocates "from-kernel" ID of 7.
+If operation ``d`` does not set ``values`` explicitly in the spec
+it will be allocated 3 for the request (``a`` is the previous operation
+with a request section and the value of 2) and 8 for response (``c`` is
+the previous operation in the "from-kernel" direction).
+
+Other quirks (todo)
+===================
+
+Structures
+----------
+
+Legacy families can define C structures both to be used as the contents
+of an attribute and as a fixed message header. The plan is to define
+the structs in ``definitions`` and link the appropriate attrs.
+
+Multi-message DO
+----------------
+
+New Netlink families should never respond to a DO operation with multiple
+replies, with ``NLM_F_MULTI`` set. Use a filtered dump instead.
+
+At the spec level we can define a ``dumps`` property for the ``do``,
+perhaps with values of ``combine`` and ``multi-object`` depending
+on how the parsing should be implemented (parse into a single reply
+vs list of objects i.e. pretty much a dump).
diff --git a/Documentation/userspace-api/netlink/index.rst b/Documentation/userspace-api/netlink/index.rst
index b0c21538d97d..26f3720cb3be 100644
--- a/Documentation/userspace-api/netlink/index.rst
+++ b/Documentation/userspace-api/netlink/index.rst
@@ -10,3 +10,9 @@ Netlink documentation for users.
:maxdepth: 2
intro
+ intro-specs
+ specs
+ c-code-gen
+ genetlink-legacy
+
+See also :ref:`Documentation/core-api/netlink.rst <kernel_netlink>`.
diff --git a/Documentation/userspace-api/netlink/intro-specs.rst b/Documentation/userspace-api/netlink/intro-specs.rst
new file mode 100644
index 000000000000..a3b847eafff7
--- /dev/null
+++ b/Documentation/userspace-api/netlink/intro-specs.rst
@@ -0,0 +1,80 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+
+=====================================
+Using Netlink protocol specifications
+=====================================
+
+This document is a quick starting guide for using Netlink protocol
+specifications. For more detailed description of the specs see :doc:`specs`.
+
+Simple CLI
+==========
+
+Kernel comes with a simple CLI tool which should be useful when
+developing Netlink related code. The tool is implemented in Python
+and can use a YAML specification to issue Netlink requests
+to the kernel. Only Generic Netlink is supported.
+
+The tool is located at ``tools/net/ynl/cli.py``. It accepts
+a handul of arguments, the most important ones are:
+
+ - ``--spec`` - point to the spec file
+ - ``--do $name`` / ``--dump $name`` - issue request ``$name``
+ - ``--json $attrs`` - provide attributes for the request
+ - ``--subscribe $group`` - receive notifications from ``$group``
+
+YAML specs can be found under ``Documentation/netlink/specs/``.
+
+Example use::
+
+ $ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/ethtool.yaml \
+ --do rings-get \
+ --json '{"header":{"dev-index": 18}}'
+ {'header': {'dev-index': 18, 'dev-name': 'eni1np1'},
+ 'rx': 0,
+ 'rx-jumbo': 0,
+ 'rx-jumbo-max': 4096,
+ 'rx-max': 4096,
+ 'rx-mini': 0,
+ 'rx-mini-max': 4096,
+ 'tx': 0,
+ 'tx-max': 4096,
+ 'tx-push': 0}
+
+The input arguments are parsed as JSON, while the output is only
+Python-pretty-printed. This is because some Netlink types can't
+be expressed as JSON directly. If such attributes are needed in
+the input some hacking of the script will be necessary.
+
+The spec and Netlink internals are factored out as a standalone
+library - it should be easy to write Python tools / tests reusing
+code from ``cli.py``.
+
+Generating kernel code
+======================
+
+``tools/net/ynl/ynl-regen.sh`` scans the kernel tree in search of
+auto-generated files which need to be updated. Using this tool is the easiest
+way to generate / update auto-generated code.
+
+By default code is re-generated only if spec is newer than the source,
+to force regeneration use ``-f``.
+
+``ynl-regen.sh`` searches for ``YNL-GEN`` in the contents of files
+(note that it only scans files in the git index, that is only files
+tracked by git!) For instance the ``fou_nl.c`` kernel source contains::
+
+ /* Documentation/netlink/specs/fou.yaml */
+ /* YNL-GEN kernel source */
+
+``ynl-regen.sh`` will find this marker and replace the file with
+kernel source based on fou.yaml.
+
+The simplest way to generate a new file based on a spec is to add
+the two marker lines like above to a file, add that file to git,
+and run the regeneration tool. Grep the tree for ``YNL-GEN``
+to see other examples.
+
+The code generation itself is performed by ``tools/net/ynl/ynl-gen-c.py``
+but it takes a few arguments so calling it directly for each file
+quickly becomes tedious.
diff --git a/Documentation/userspace-api/netlink/specs.rst b/Documentation/userspace-api/netlink/specs.rst
new file mode 100644
index 000000000000..6ffe8137cd90
--- /dev/null
+++ b/Documentation/userspace-api/netlink/specs.rst
@@ -0,0 +1,425 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+
+=========================================
+Netlink protocol specifications (in YAML)
+=========================================
+
+Netlink protocol specifications are complete, machine readable descriptions of
+Netlink protocols written in YAML. The goal of the specifications is to allow
+separating Netlink parsing from user space logic and minimize the amount of
+hand written Netlink code for each new family, command, attribute.
+Netlink specs should be complete and not depend on any other spec
+or C header file, making it easy to use in languages which can't include
+kernel headers directly.
+
+Internally kernel uses the YAML specs to generate:
+
+ - the C uAPI header
+ - documentation of the protocol as a ReST file
+ - policy tables for input attribute validation
+ - operation tables
+
+YAML specifications can be found under ``Documentation/netlink/specs/``
+
+This document describes details of the schema.
+See :doc:`intro-specs` for a practical starting guide.
+
+Compatibility levels
+====================
+
+There are four schema levels for Netlink specs, from the simplest used
+by new families to the most complex covering all the quirks of the old ones.
+Each next level inherits the attributes of the previous level, meaning that
+user capable of parsing more complex ``genetlink`` schemas is also compatible
+with simpler ones. The levels are:
+
+ - ``genetlink`` - most streamlined, should be used by all new families
+ - ``genetlink-c`` - superset of ``genetlink`` with extra attributes allowing
+ customization of define and enum type and value names; this schema should
+ be equivalent to ``genetlink`` for all implementations which don't interact
+ directly with C uAPI headers
+ - ``genetlink-legacy`` - Generic Netlink catch all schema supporting quirks of
+ all old genetlink families, strange attribute formats, binary structures etc.
+ - ``netlink-raw`` - catch all schema supporting pre-Generic Netlink protocols
+ such as ``NETLINK_ROUTE``
+
+The definition of the schemas (in ``jsonschema``) can be found
+under ``Documentation/netlink/``.
+
+Schema structure
+================
+
+YAML schema has the following conceptual sections:
+
+ - globals
+ - definitions
+ - attributes
+ - operations
+ - multicast groups
+
+Most properties in the schema accept (or in fact require) a ``doc``
+sub-property documenting the defined object.
+
+The following sections describe the properties of the most modern ``genetlink``
+schema. See the documentation of :doc:`genetlink-c <c-code-gen>`
+for information on how C names are derived from name properties.
+
+genetlink
+=========
+
+Globals
+-------
+
+Attributes listed directly at the root level of the spec file.
+
+name
+~~~~
+
+Name of the family. Name identifies the family in a unique way, since
+the Family IDs are allocated dynamically.
+
+version
+~~~~~~~
+
+Generic Netlink family version, default is 1.
+
+protocol
+~~~~~~~~
+
+The schema level, default is ``genetlink``, which is the only value
+allowed for new ``genetlink`` families.
+
+definitions
+-----------
+
+Array of type and constant definitions.
+
+name
+~~~~
+
+Name of the type / constant.
+
+type
+~~~~
+
+One of the following types:
+
+ - const - a single, standalone constant
+ - enum - defines an integer enumeration, with values for each entry
+ incrementing by 1, (e.g. 0, 1, 2, 3)
+ - flags - defines an integer enumeration, with values for each entry
+ occupying a bit, starting from bit 0, (e.g. 1, 2, 4, 8)
+
+value
+~~~~~
+
+The value for the ``const``.
+
+value-start
+~~~~~~~~~~~
+
+The first value for ``enum`` and ``flags``, allows overriding the default
+start value of ``0`` (for ``enum``) and starting bit (for ``flags``).
+For ``flags`` ``value-start`` selects the starting bit, not the shifted value.
+
+Sparse enumerations are not supported.
+
+entries
+~~~~~~~
+
+Array of names of the entries for ``enum`` and ``flags``.
+
+header
+~~~~~~
+
+For C-compatible languages, header which already defines this value.
+In case the definition is shared by multiple families (e.g. ``IFNAMSIZ``)
+code generators for C-compatible languages may prefer to add an appropriate
+include instead of rendering a new definition.
+
+attribute-sets
+--------------
+
+This property contains information about netlink attributes of the family.
+All families have at least one attribute set, most have multiple.
+``attribute-sets`` is an array, with each entry describing a single set.
+
+Note that the spec is "flattened" and is not meant to visually resemble
+the format of the netlink messages (unlike certain ad-hoc documentation
+formats seen in kernel comments). In the spec subordinate attribute sets
+are not defined inline as a nest, but defined in a separate attribute set
+referred to with a ``nested-attributes`` property of the container.
+
+Spec may also contain fractional sets - sets which contain a ``subset-of``
+property. Such sets describe a section of a full set, allowing narrowing down
+which attributes are allowed in a nest or refining the validation criteria.
+Fractional sets can only be used in nests. They are not rendered to the uAPI
+in any fashion.
+
+name
+~~~~
+
+Uniquely identifies the attribute set, operations and nested attributes
+refer to the sets by the ``name``.
+
+subset-of
+~~~~~~~~~
+
+Re-defines a portion of another set (a fractional set).
+Allows narrowing down fields and changing validation criteria
+or even types of attributes depending on the nest in which they
+are contained. The ``value`` of each attribute in the fractional
+set is implicitly the same as in the main set.
+
+attributes
+~~~~~~~~~~
+
+List of attributes in the set.
+
+Attribute properties
+--------------------
+
+name
+~~~~
+
+Identifies the attribute, unique within the set.
+
+type
+~~~~
+
+Netlink attribute type, see :ref:`attr_types`.
+
+.. _assign_val:
+
+value
+~~~~~
+
+Numerical attribute ID, used in serialized Netlink messages.
+The ``value`` property can be skipped, in which case the attribute ID
+will be the value of the previous attribute plus one (recursively)
+and ``0`` for the first attribute in the attribute set.
+
+Note that the ``value`` of an attribute is defined only in its main set.
+
+enum
+~~~~
+
+For integer types specifies that values in the attribute belong
+to an ``enum`` or ``flags`` from the ``definitions`` section.
+
+enum-as-flags
+~~~~~~~~~~~~~
+
+Treat ``enum`` as ``flags`` regardless of its type in ``definitions``.
+When both ``enum`` and ``flags`` forms are needed ``definitions`` should
+contain an ``enum`` and attributes which need the ``flags`` form should
+use this attribute.
+
+nested-attributes
+~~~~~~~~~~~~~~~~~
+
+Identifies the attribute space for attributes nested within given attribute.
+Only valid for complex attributes which may have sub-attributes.
+
+multi-attr (arrays)
+~~~~~~~~~~~~~~~~~~~
+
+Boolean property signifying that the attribute may be present multiple times.
+Allowing an attribute to repeat is the recommended way of implementing arrays
+(no extra nesting).
+
+byte-order
+~~~~~~~~~~
+
+For integer types specifies attribute byte order - ``little-endian``
+or ``big-endian``.
+
+checks
+~~~~~~
+
+Input validation constraints used by the kernel. User space should query
+the policy of the running kernel using Generic Netlink introspection,
+rather than depend on what is specified in the spec file.
+
+The validation policy in the kernel is formed by combining the type
+definition (``type`` and ``nested-attributes``) and the ``checks``.
+
+operations
+----------
+
+This section describes messages passed between the kernel and the user space.
+There are three types of entries in this section - operations, notifications
+and events.
+
+Operations describe the most common request - response communication. User
+sends a request and kernel replies. Each operation may contain any combination
+of the two modes familiar to netlink users - ``do`` and ``dump``.
+``do`` and ``dump`` in turn contain a combination of ``request`` and
+``response`` properties. If no explicit message with attributes is passed
+in a given direction (e.g. a ``dump`` which does not accept filter, or a ``do``
+of a SET operation to which the kernel responds with just the netlink error
+code) ``request`` or ``response`` section can be skipped.
+``request`` and ``response`` sections list the attributes allowed in a message.
+The list contains only the names of attributes from a set referred
+to by the ``attribute-set`` property.
+
+Notifications and events both refer to the asynchronous messages sent by
+the kernel to members of a multicast group. The difference between the
+two is that a notification shares its contents with a GET operation
+(the name of the GET operation is specified in the ``notify`` property).
+This arrangement is commonly used for notifications about
+objects where the notification carries the full object definition.
+
+Events are more focused and carry only a subset of information rather than full
+object state (a made up example would be a link state change event with just
+the interface name and the new link state). Events contain the ``event``
+property. Events are considered less idiomatic for netlink and notifications
+should be preferred.
+
+list
+~~~~
+
+The only property of ``operations`` for ``genetlink``, holds the list of
+operations, notifications etc.
+
+Operation properties
+--------------------
+
+name
+~~~~
+
+Identifies the operation.
+
+value
+~~~~~
+
+Numerical message ID, used in serialized Netlink messages.
+The same enumeration rules are applied as to
+:ref:`attribute values<assign_val>`.
+
+attribute-set
+~~~~~~~~~~~~~
+
+Specifies the attribute set contained within the message.
+
+do
+~~~
+
+Specification for the ``doit`` request. Should contain ``request``, ``reply``
+or both of these properties, each holding a :ref:`attr_list`.
+
+dump
+~~~~
+
+Specification for the ``dumpit`` request. Should contain ``request``, ``reply``
+or both of these properties, each holding a :ref:`attr_list`.
+
+notify
+~~~~~~
+
+Designates the message as a notification. Contains the name of the operation
+(possibly the same as the operation holding this property) which shares
+the contents with the notification (``do``).
+
+event
+~~~~~
+
+Specification of attributes in the event, holds a :ref:`attr_list`.
+``event`` property is mutually exclusive with ``notify``.
+
+mcgrp
+~~~~~
+
+Used with ``event`` and ``notify``, specifies which multicast group
+message belongs to.
+
+.. _attr_list:
+
+Message attribute list
+----------------------
+
+``request``, ``reply`` and ``event`` properties have a single ``attributes``
+property which holds the list of attribute names.
+
+Messages can also define ``pre`` and ``post`` properties which will be rendered
+as ``pre_doit`` and ``post_doit`` calls in the kernel (these properties should
+be ignored by user space).
+
+mcast-groups
+------------
+
+This section lists the multicast groups of the family.
+
+list
+~~~~
+
+The only property of ``mcast-groups`` for ``genetlink``, holds the list
+of groups.
+
+Multicast group properties
+--------------------------
+
+name
+~~~~
+
+Uniquely identifies the multicast group in the family. Similarly to
+Family ID, Multicast Group ID needs to be resolved at runtime, based
+on the name.
+
+.. _attr_types:
+
+Attribute types
+===============
+
+This section describes the attribute types supported by the ``genetlink``
+compatibility level. Refer to documentation of different levels for additional
+attribute types.
+
+Scalar integer types
+--------------------
+
+Fixed-width integer types:
+``u8``, ``u16``, ``u32``, ``u64``, ``s8``, ``s16``, ``s32``, ``s64``.
+
+Note that types smaller than 32 bit should be avoided as using them
+does not save any memory in Netlink messages (due to alignment).
+See :ref:`pad_type` for padding of 64 bit attributes.
+
+The payload of the attribute is the integer in host order unless ``byte-order``
+specifies otherwise.
+
+.. _pad_type:
+
+pad
+---
+
+Special attribute type used for padding attributes which require alignment
+bigger than standard 4B alignment required by netlink (e.g. 64 bit integers).
+There can only be a single attribute of the ``pad`` type in any attribute set
+and it should be automatically used for padding when needed.
+
+flag
+----
+
+Attribute with no payload, its presence is the entire information.
+
+binary
+------
+
+Raw binary data attribute, the contents are opaque to generic code.
+
+string
+------
+
+Character string. Unless ``checks`` has ``unterminated-ok`` set to ``true``
+the string is required to be null terminated.
+``max-len`` in ``checks`` indicates the longest possible string,
+if not present the length of the string is unbounded.
+
+Note that ``max-len`` does not count the terminating character.
+
+nest
+----
+
+Attribute containing other (nested) attributes.
+``nested-attributes`` specifies which attribute set is used inside.
diff --git a/MAINTAINERS b/MAINTAINERS
index b4d198cca5aa..fbff490379ad 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3858,6 +3858,13 @@ L: bpf@vger.kernel.org
S: Maintained
F: tools/testing/selftests/bpf/
+BPF [DOCUMENTATION] (Related to Standardization)
+R: David Vernet <void@manifault.com>
+L: bpf@vger.kernel.org
+L: bpf@ietf.org
+S: Maintained
+F: Documentation/bpf/instruction-set.rst
+
BPF [MISC]
L: bpf@vger.kernel.org
S: Odd Fixes
@@ -4642,12 +4649,11 @@ F: net/sched/sch_etf.c
F: net/sched/sch_taprio.c
CC2520 IEEE-802.15.4 RADIO DRIVER
-M: Varka Bhadram <varkabhadram@gmail.com>
+M: Stefan Schmidt <stefan@datenfreihafen.org>
L: linux-wpan@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
F: drivers/net/ieee802154/cc2520.c
-F: include/linux/spi/cc2520.h
CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER
M: Gilad Ben-Yossef <gilad@benyossef.com>
@@ -5944,7 +5950,7 @@ S: Supported
F: Documentation/networking/devlink
F: include/net/devlink.h
F: include/uapi/linux/devlink.h
-F: net/core/devlink.c
+F: net/devlink/
DH ELECTRONICS IMX6 DHCOM/DHCOR BOARD SUPPORT
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
@@ -9851,6 +9857,7 @@ F: drivers/clk/clk-versaclock5.c
IEEE 802.15.4 SUBSYSTEM
M: Alexander Aring <alex.aring@gmail.com>
M: Stefan Schmidt <stefan@datenfreihafen.org>
+M: Miquel Raynal <miquel.raynal@bootlin.com>
L: linux-wpan@vger.kernel.org
S: Maintained
W: https://linux-wpan.org/
@@ -12675,9 +12682,9 @@ F: drivers/iio/potentiometer/mcp4018.c
F: drivers/iio/potentiometer/mcp4531.c
MCR20A IEEE-802.15.4 RADIO DRIVER
-M: Xue Liu <liuxuenetmail@gmail.com>
+M: Stefan Schmidt <stefan@datenfreihafen.org>
L: linux-wpan@vger.kernel.org
-S: Maintained
+S: Odd Fixes
W: https://github.com/xueliu/mcr20a-linux
F: Documentation/devicetree/bindings/net/ieee802154/mcr20a.txt
F: drivers/net/ieee802154/mcr20a.c
@@ -13570,6 +13577,7 @@ S: Maintained
F: Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
F: Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml
F: drivers/net/dsa/microchip/*
+F: include/linux/dsa/ksz_common.h
F: include/linux/platform_data/microchip-ksz.h
F: net/dsa/tag_ksz.c
@@ -14022,6 +14030,7 @@ M: Peter Geis <pgwipeout@gmail.com>
M: Frank <Frank.Sae@motor-comm.com>
L: netdev@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/net/motorcomm,yt8xxx.yaml
F: drivers/net/phy/motorcomm.c
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
@@ -14038,9 +14047,9 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/radio/radio-mr800.c
MRF24J40 IEEE 802.15.4 RADIO DRIVER
-M: Alan Ott <alan@signal11.us>
+M: Stefan Schmidt <stefan@datenfreihafen.org>
L: linux-wpan@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
F: drivers/net/ieee802154/mrf24j40.c
@@ -14402,6 +14411,8 @@ M: Florian Fainelli <f.fainelli@gmail.com>
M: Vladimir Oltean <olteanv@gmail.com>
S: Maintained
F: Documentation/devicetree/bindings/net/dsa/
+F: Documentation/devicetree/bindings/net/ethernet-switch-port.yaml
+F: Documentation/devicetree/bindings/net/ethernet-switch.yaml
F: drivers/net/dsa/
F: include/linux/dsa/
F: include/linux/platform_data/dsa.h
@@ -14420,8 +14431,10 @@ Q: https://patchwork.kernel.org/project/netdevbpf/list/
B: mailto:netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
+F: Documentation/core-api/netlink.rst
F: Documentation/networking/
F: Documentation/process/maintainer-netdev.rst
+F: Documentation/userspace-api/netlink/
F: include/linux/in.h
F: include/linux/net.h
F: include/linux/netdevice.h
@@ -14433,6 +14446,7 @@ F: include/uapi/linux/netdevice.h
F: lib/net_utils.c
F: lib/random32.c
F: net/
+F: tools/net/
F: tools/testing/selftests/net/
NETWORKING [IPSEC]
@@ -15010,6 +15024,7 @@ M: Colin Foster <colin.foster@in-advantage.com>
S: Supported
F: Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
F: drivers/mfd/ocelot*
+F: drivers/net/dsa/ocelot/ocelot_ext.c
F: include/linux/mfd/ocelot.h
OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
@@ -15441,6 +15456,13 @@ L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/boot/dts/ralink/omega2p.dts
+ONSEMI ETHERNET PHY DRIVERS
+M: Piergiorgio Beruto <piergiorgio.beruto@gmail.com>
+L: netdev@vger.kernel.org
+S: Supported
+W: http://www.onsemi.com
+F: drivers/net/phy/ncn*
+
OP-TEE DRIVER
M: Jens Wiklander <jens.wiklander@linaro.org>
L: op-tee@lists.trustedfirmware.org
@@ -16486,6 +16508,13 @@ S: Maintained
F: Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
F: drivers/iio/chemical/pms7003.c
+PLCA RECONCILIATION SUBLAYER (IEEE802.3 Clause 148)
+M: Piergiorgio Beruto <piergiorgio.beruto@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/phy/mdio-open-alliance.h
+F: net/ethtool/plca.c
+
PLDMFW LIBRARY
M: Jacob Keller <jacob.e.keller@intel.com>
S: Maintained
@@ -17083,6 +17112,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
F: drivers/net/wireless/ath/ath11k/
+QUALCOMM ATH12K WIRELESS DRIVER
+M: Kalle Valo <kvalo@kernel.org>
+L: ath12k@lists.infradead.org
+S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
+F: drivers/net/wireless/ath/ath12k/
+
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
M: Toke Høiland-Jørgensen <toke@toke.dk>
L: linux-wireless@vger.kernel.org
@@ -18759,6 +18795,7 @@ M: Edward Cree <ecree.xilinx@gmail.com>
M: Martin Habets <habetsm.xilinx@gmail.com>
L: netdev@vger.kernel.org
S: Supported
+F: Documentation/networking/devlink/sfc.rst
F: drivers/net/ethernet/sfc/
SFF/SFP/SFP+ MODULE SUPPORT
@@ -20639,7 +20676,7 @@ M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Yehezkel Bernat <YehezkelShB@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/thunderbolt.c
+F: drivers/net/thunderbolt/
THUNDERX GPIO DRIVER
M: Robert Richter <rric@kernel.org>
diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
index f0f54aef3724..d8a13959bff0 100644
--- a/arch/arm/include/asm/checksum.h
+++ b/arch/arm/include/asm/checksum.h
@@ -11,6 +11,7 @@
#define __ASM_ARM_CHECKSUM_H
#include <linux/in6.h>
+#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
index c928b6824e41..fefb93487291 100644
--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
@@ -47,6 +47,46 @@
status = "okay";
};
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy1>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <5000000>;
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ eee-broken-1000t;
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy2>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <5000000>;
+
+ ethphy2: ethernet-phy@2 {
+ reg = <2>;
+ eee-broken-1000t;
+ };
+ };
+};
+
&lpuart1 { /* console */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -77,6 +117,44 @@
};
&iomuxc {
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <
+ MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x57e
+ MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x57e
+ MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e
+ MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e
+ MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x57e
+ MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x57e
+ MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x5fe
+ MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e
+ MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x57e
+ MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x57e
+ MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x57e
+ MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x57e
+ MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x5fe
+ MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x57e
+ >;
+ };
+
+ pinctrl_fec: fecgrp {
+ fsl,pins = <
+ MX93_PAD_ENET2_MDC__ENET1_MDC 0x57e
+ MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x57e
+ MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x57e
+ MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x57e
+ MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x57e
+ MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x57e
+ MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x5fe
+ MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x57e
+ MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x57e
+ MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x57e
+ MX93_PAD_ENET2_TD2__ENET1_RGMII_TD2 0x57e
+ MX93_PAD_ENET2_TD3__ENET1_RGMII_TD3 0x57e
+ MX93_PAD_ENET2_TXC__ENET1_RGMII_TXC 0x5fe
+ MX93_PAD_ENET2_TX_CTL__ENET1_RGMII_TX_CTL 0x57e
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX93_PAD_UART1_RXD__LPUART1_RX 0x31e
diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
index abb3fbe4ba22..2076f9c9983a 100644
--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
@@ -577,6 +577,54 @@
status = "disabled";
};
+ eqos: ethernet@428a0000 {
+ compatible = "nxp,imx93-dwmac-eqos", "snps,dwmac-5.10a";
+ reg = <0x428a0000 0x10000>;
+ interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eth_wake_irq", "macirq";
+ clocks = <&clk IMX93_CLK_ENET_QOS_GATE>,
+ <&clk IMX93_CLK_ENET_QOS_GATE>,
+ <&clk IMX93_CLK_ENET_TIMER2>,
+ <&clk IMX93_CLK_ENET>,
+ <&clk IMX93_CLK_ENET_QOS_GATE>;
+ clock-names = "stmmaceth", "pclk", "ptp_ref", "tx", "mem";
+ assigned-clocks = <&clk IMX93_CLK_ENET_TIMER2>,
+ <&clk IMX93_CLK_ENET>;
+ assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>,
+ <&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>;
+ assigned-clock-rates = <100000000>, <250000000>;
+ intf_mode = <&wakeupmix_gpr 0x28>;
+ clk_csr = <0>;
+ status = "disabled";
+ };
+
+ fec: ethernet@42890000 {
+ compatible = "fsl,imx93-fec", "fsl,imx8mq-fec", "fsl,imx6sx-fec";
+ reg = <0x42890000 0x10000>;
+ interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX93_CLK_ENET1_GATE>,
+ <&clk IMX93_CLK_ENET1_GATE>,
+ <&clk IMX93_CLK_ENET_TIMER1>,
+ <&clk IMX93_CLK_ENET_REF>,
+ <&clk IMX93_CLK_ENET_REF_PHY>;
+ clock-names = "ipg", "ahb", "ptp",
+ "enet_clk_ref", "enet_out";
+ assigned-clocks = <&clk IMX93_CLK_ENET_TIMER1>,
+ <&clk IMX93_CLK_ENET_REF>,
+ <&clk IMX93_CLK_ENET_REF_PHY>;
+ assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>,
+ <&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>,
+ <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
+ assigned-clock-rates = <100000000>, <250000000>, <50000000>;
+ fsl,num-tx-queues = <3>;
+ fsl,num-rx-queues = <3>;
+ status = "disabled";
+ };
+
usdhc3: mmc@428b0000 {
compatible = "fsl,imx93-usdhc", "fsl,imx8mm-usdhc";
reg = <0x428b0000 0x10000>;
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index c4b1947ebf76..288003a9f0ca 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -841,7 +841,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
if (ret < 0)
return ret;
- move_imm(ctx, t1, func_addr, is32);
+ move_addr(ctx, t1, func_addr);
emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
break;
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
index ca708024fdd3..c335dc4eed37 100644
--- a/arch/loongarch/net/bpf_jit.h
+++ b/arch/loongarch/net/bpf_jit.h
@@ -82,6 +82,27 @@ static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, boo
emit_insn(ctx, addiw, reg, reg, 0);
}
+static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
+{
+ u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
+
+ /* lu12iw rd, imm_31_12 */
+ imm_31_12 = (addr >> 12) & 0xfffff;
+ emit_insn(ctx, lu12iw, rd, imm_31_12);
+
+ /* ori rd, rd, imm_11_0 */
+ imm_11_0 = addr & 0xfff;
+ emit_insn(ctx, ori, rd, rd, imm_11_0);
+
+ /* lu32id rd, imm_51_32 */
+ imm_51_32 = (addr >> 32) & 0xfffff;
+ emit_insn(ctx, lu32id, rd, imm_51_32);
+
+ /* lu52id rd, rd, imm_63_52 */
+ imm_63_52 = (addr >> 52) & 0xfff;
+ emit_insn(ctx, lu52id, rd, rd, imm_63_52);
+}
+
static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
{
long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
index 9a7d7346001e..f433121774c0 100644
--- a/arch/riscv/include/asm/patch.h
+++ b/arch/riscv/include/asm/patch.h
@@ -7,6 +7,6 @@
#define _ASM_RISCV_PATCH_H
int patch_text_nosync(void *addr, const void *insns, size_t len);
-int patch_text(void *addr, u32 insn);
+int patch_text(void *addr, u32 *insns, int ninsns);
#endif /* _ASM_RISCV_PATCH_H */
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 765004b60513..8086d1a281cd 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -15,7 +15,8 @@
struct patch_insn {
void *addr;
- u32 insn;
+ u32 *insns;
+ int ninsns;
atomic_t cpu_count;
};
@@ -102,12 +103,15 @@ NOKPROBE_SYMBOL(patch_text_nosync);
static int patch_text_cb(void *data)
{
struct patch_insn *patch = data;
- int ret = 0;
+ unsigned long len;
+ int i, ret = 0;
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
- ret =
- patch_text_nosync(patch->addr, &patch->insn,
- GET_INSN_LENGTH(patch->insn));
+ for (i = 0; ret == 0 && i < patch->ninsns; i++) {
+ len = GET_INSN_LENGTH(patch->insns[i]);
+ ret = patch_text_nosync(patch->addr + i * len,
+ &patch->insns[i], len);
+ }
atomic_inc(&patch->cpu_count);
} else {
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
@@ -119,11 +123,12 @@ static int patch_text_cb(void *data)
}
NOKPROBE_SYMBOL(patch_text_cb);
-int patch_text(void *addr, u32 insn)
+int patch_text(void *addr, u32 *insns, int ninsns)
{
struct patch_insn patch = {
.addr = addr,
- .insn = insn,
+ .insns = insns,
+ .ninsns = ninsns,
.cpu_count = ATOMIC_INIT(0),
};
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index 2bedec37d092..2f08c14a933d 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -23,13 +23,14 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
+ u32 insn = __BUG_INSN_32;
unsigned long offset = GET_INSN_LENGTH(p->opcode);
p->ainsn.api.restore = (unsigned long)p->addr + offset;
- patch_text(p->ainsn.api.insn, p->opcode);
+ patch_text(p->ainsn.api.insn, &p->opcode, 1);
patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
- __BUG_INSN_32);
+ &insn, 1);
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
@@ -116,16 +117,16 @@ void *alloc_insn_page(void)
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
- if ((p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
- patch_text(p->addr, __BUG_INSN_32);
- else
- patch_text(p->addr, __BUG_INSN_16);
+ u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ?
+ __BUG_INSN_32 : __BUG_INSN_16;
+
+ patch_text(p->addr, &insn, 1);
}
/* remove breakpoint from text */
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
- patch_text(p->addr, p->opcode);
+ patch_text(p->addr, &p->opcode, 1);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index d926e0f7ef57..bf9802a63061 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -573,6 +573,11 @@ static inline u32 rv_fence(u8 pred, u8 succ)
return rv_i_insn(imm11_0, 0, 0, 0, 0xf);
}
+static inline u32 rv_nop(void)
+{
+ return rv_i_insn(0, 0, 0, 0, 0x13);
+}
+
/* RVC instrutions. */
static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index f2417ac54edd..f5a668736c79 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -8,6 +8,8 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/memory.h>
+#include <linux/stop_machine.h>
#include "bpf_jit.h"
#define RV_REG_TCC RV_REG_A6
@@ -238,7 +240,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
if (!is_tail_call)
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
- is_tail_call ? 4 : 0, /* skip TCC init */
+ is_tail_call ? 20 : 0, /* skip reserved nops and TCC init */
ctx);
}
@@ -428,12 +430,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
*rd = RV_REG_T2;
}
-static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
+static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
struct rv_jit_context *ctx)
{
s64 upper, lower;
- if (rvoff && is_21b_int(rvoff) && !force_jalr) {
+ if (rvoff && fixed_addr && is_21b_int(rvoff)) {
emit(rv_jal(rd, rvoff >> 1), ctx);
return 0;
} else if (in_auipc_jalr_range(rvoff)) {
@@ -454,24 +456,17 @@ static bool is_signed_bpf_cond(u8 cond)
cond == BPF_JSGE || cond == BPF_JSLE;
}
-static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
+static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
{
s64 off = 0;
u64 ip;
- u8 rd;
- int ret;
if (addr && ctx->insns) {
ip = (u64)(long)(ctx->insns + ctx->ninsns);
off = addr - ip;
}
- ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
- if (ret)
- return ret;
- rd = bpf_to_rv_reg(BPF_REG_0, ctx);
- emit_mv(rd, RV_REG_A0, ctx);
- return 0;
+ return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
}
static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
@@ -622,6 +617,401 @@ static int add_exception_handler(const struct bpf_insn *insn,
return 0;
}
+static int gen_call_or_nops(void *target, void *ip, u32 *insns)
+{
+ s64 rvoff;
+ int i, ret;
+ struct rv_jit_context ctx;
+
+ ctx.ninsns = 0;
+ ctx.insns = (u16 *)insns;
+
+ if (!target) {
+ for (i = 0; i < 4; i++)
+ emit(rv_nop(), &ctx);
+ return 0;
+ }
+
+ rvoff = (s64)(target - (ip + 4));
+ emit(rv_sd(RV_REG_SP, -8, RV_REG_RA), &ctx);
+ ret = emit_jump_and_link(RV_REG_RA, rvoff, false, &ctx);
+ if (ret)
+ return ret;
+ emit(rv_ld(RV_REG_RA, -8, RV_REG_SP), &ctx);
+
+ return 0;
+}
+
+static int gen_jump_or_nops(void *target, void *ip, u32 *insns)
+{
+ s64 rvoff;
+ struct rv_jit_context ctx;
+
+ ctx.ninsns = 0;
+ ctx.insns = (u16 *)insns;
+
+ if (!target) {
+ emit(rv_nop(), &ctx);
+ emit(rv_nop(), &ctx);
+ return 0;
+ }
+
+ rvoff = (s64)(target - ip);
+ return emit_jump_and_link(RV_REG_ZERO, rvoff, false, &ctx);
+}
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
+ void *old_addr, void *new_addr)
+{
+ u32 old_insns[4], new_insns[4];
+ bool is_call = poke_type == BPF_MOD_CALL;
+ int (*gen_insns)(void *target, void *ip, u32 *insns);
+ int ninsns = is_call ? 4 : 2;
+ int ret;
+
+ if (!is_bpf_text_address((unsigned long)ip))
+ return -ENOTSUPP;
+
+ gen_insns = is_call ? gen_call_or_nops : gen_jump_or_nops;
+
+ ret = gen_insns(old_addr, ip, old_insns);
+ if (ret)
+ return ret;
+
+ if (memcmp(ip, old_insns, ninsns * 4))
+ return -EFAULT;
+
+ ret = gen_insns(new_addr, ip, new_insns);
+ if (ret)
+ return ret;
+
+ cpus_read_lock();
+ mutex_lock(&text_mutex);
+ if (memcmp(ip, new_insns, ninsns * 4))
+ ret = patch_text(ip, new_insns, ninsns);
+ mutex_unlock(&text_mutex);
+ cpus_read_unlock();
+
+ return ret;
+}
+
+static void store_args(int nregs, int args_off, struct rv_jit_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < nregs; i++) {
+ emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx);
+ args_off -= 8;
+ }
+}
+
+static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < nregs; i++) {
+ emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx);
+ args_off -= 8;
+ }
+}
+
+static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off,
+ int run_ctx_off, bool save_ret, struct rv_jit_context *ctx)
+{
+ int ret, branch_off;
+ struct bpf_prog *p = l->link.prog;
+ int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
+
+ if (l->cookie) {
+ emit_imm(RV_REG_T1, l->cookie, ctx);
+ emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_T1, ctx);
+ } else {
+ emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_ZERO, ctx);
+ }
+
+ /* arg1: prog */
+ emit_imm(RV_REG_A0, (const s64)p, ctx);
+ /* arg2: &run_ctx */
+ emit_addi(RV_REG_A1, RV_REG_FP, -run_ctx_off, ctx);
+ ret = emit_call((const u64)bpf_trampoline_enter(p), true, ctx);
+ if (ret)
+ return ret;
+
+ /* if (__bpf_prog_enter(prog) == 0)
+ * goto skip_exec_of_prog;
+ */
+ branch_off = ctx->ninsns;
+ /* nop reserved for conditional jump */
+ emit(rv_nop(), ctx);
+
+ /* store prog start time */
+ emit_mv(RV_REG_S1, RV_REG_A0, ctx);
+
+ /* arg1: &args_off */
+ emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
+ if (!p->jited)
+ /* arg2: progs[i]->insnsi for interpreter */
+ emit_imm(RV_REG_A1, (const s64)p->insnsi, ctx);
+ ret = emit_call((const u64)p->bpf_func, true, ctx);
+ if (ret)
+ return ret;
+
+ if (save_ret)
+ emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
+
+ /* update branch with beqz */
+ if (ctx->insns) {
+ int offset = ninsns_rvoff(ctx->ninsns - branch_off);
+ u32 insn = rv_beq(RV_REG_A0, RV_REG_ZERO, offset >> 1);
+ *(u32 *)(ctx->insns + branch_off) = insn;
+ }
+
+ /* arg1: prog */
+ emit_imm(RV_REG_A0, (const s64)p, ctx);
+ /* arg2: prog start time */
+ emit_mv(RV_REG_A1, RV_REG_S1, ctx);
+ /* arg3: &run_ctx */
+ emit_addi(RV_REG_A2, RV_REG_FP, -run_ctx_off, ctx);
+ ret = emit_call((const u64)bpf_trampoline_exit(p), true, ctx);
+
+ return ret;
+}
+
+static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ const struct btf_func_model *m,
+ struct bpf_tramp_links *tlinks,
+ void *func_addr, u32 flags,
+ struct rv_jit_context *ctx)
+{
+ int i, ret, offset;
+ int *branches_off = NULL;
+ int stack_size = 0, nregs = m->nr_args;
+ int retaddr_off, fp_off, retval_off, args_off;
+ int nregs_off, ip_off, run_ctx_off, sreg_off;
+ struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
+ struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
+ struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ void *orig_call = func_addr;
+ bool save_ret;
+ u32 insn;
+
+ /* Generated trampoline stack layout:
+ *
+ * FP - 8 [ RA of parent func ] return address of parent
+ * function
+ * FP - retaddr_off [ RA of traced func ] return address of traced
+ * function
+ * FP - fp_off [ FP of parent func ]
+ *
+ * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
+ * BPF_TRAMP_F_RET_FENTRY_RET
+ * [ argN ]
+ * [ ... ]
+ * FP - args_off [ arg1 ]
+ *
+ * FP - nregs_off [ regs count ]
+ *
+ * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
+ *
+ * FP - run_ctx_off [ bpf_tramp_run_ctx ]
+ *
+ * FP - sreg_off [ callee saved reg ]
+ *
+ * [ pads ] pads for 16 bytes alignment
+ */
+
+ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
+ return -ENOTSUPP;
+
+ /* extra regiters for struct arguments */
+ for (i = 0; i < m->nr_args; i++)
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+ nregs += round_up(m->arg_size[i], 8) / 8 - 1;
+
+ /* 8 arguments passed by registers */
+ if (nregs > 8)
+ return -ENOTSUPP;
+
+ /* room for parent function return address */
+ stack_size += 8;
+
+ stack_size += 8;
+ retaddr_off = stack_size;
+
+ stack_size += 8;
+ fp_off = stack_size;
+
+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+ if (save_ret) {
+ stack_size += 8;
+ retval_off = stack_size;
+ }
+
+ stack_size += nregs * 8;
+ args_off = stack_size;
+
+ stack_size += 8;
+ nregs_off = stack_size;
+
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ stack_size += 8;
+ ip_off = stack_size;
+ }
+
+ stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
+ run_ctx_off = stack_size;
+
+ stack_size += 8;
+ sreg_off = stack_size;
+
+ stack_size = round_up(stack_size, 16);
+
+ emit_addi(RV_REG_SP, RV_REG_SP, -stack_size, ctx);
+
+ emit_sd(RV_REG_SP, stack_size - retaddr_off, RV_REG_RA, ctx);
+ emit_sd(RV_REG_SP, stack_size - fp_off, RV_REG_FP, ctx);
+
+ emit_addi(RV_REG_FP, RV_REG_SP, stack_size, ctx);
+
+ /* callee saved register S1 to pass start time */
+ emit_sd(RV_REG_FP, -sreg_off, RV_REG_S1, ctx);
+
+ /* store ip address of the traced function */
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ emit_imm(RV_REG_T1, (const s64)func_addr, ctx);
+ emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx);
+ }
+
+ emit_li(RV_REG_T1, nregs, ctx);
+ emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx);
+
+ store_args(nregs, args_off, ctx);
+
+ /* skip to actual body of traced function */
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ orig_call += 16;
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ emit_imm(RV_REG_A0, (const s64)im, ctx);
+ ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < fentry->nr_links; i++) {
+ ret = invoke_bpf_prog(fentry->links[i], args_off, retval_off, run_ctx_off,
+ flags & BPF_TRAMP_F_RET_FENTRY_RET, ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (fmod_ret->nr_links) {
+ branches_off = kcalloc(fmod_ret->nr_links, sizeof(int), GFP_KERNEL);
+ if (!branches_off)
+ return -ENOMEM;
+
+ /* cleanup to avoid garbage return value confusion */
+ emit_sd(RV_REG_FP, -retval_off, RV_REG_ZERO, ctx);
+ for (i = 0; i < fmod_ret->nr_links; i++) {
+ ret = invoke_bpf_prog(fmod_ret->links[i], args_off, retval_off,
+ run_ctx_off, true, ctx);
+ if (ret)
+ goto out;
+ emit_ld(RV_REG_T1, -retval_off, RV_REG_FP, ctx);
+ branches_off[i] = ctx->ninsns;
+ /* nop reserved for conditional jump */
+ emit(rv_nop(), ctx);
+ }
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ restore_args(nregs, args_off, ctx);
+ ret = emit_call((const u64)orig_call, true, ctx);
+ if (ret)
+ goto out;
+ emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
+ im->ip_after_call = ctx->insns + ctx->ninsns;
+ /* 2 nops reserved for auipc+jalr pair */
+ emit(rv_nop(), ctx);
+ emit(rv_nop(), ctx);
+ }
+
+ /* update branches saved in invoke_bpf_mod_ret with bnez */
+ for (i = 0; ctx->insns && i < fmod_ret->nr_links; i++) {
+ offset = ninsns_rvoff(ctx->ninsns - branches_off[i]);
+ insn = rv_bne(RV_REG_T1, RV_REG_ZERO, offset >> 1);
+ *(u32 *)(ctx->insns + branches_off[i]) = insn;
+ }
+
+ for (i = 0; i < fexit->nr_links; i++) {
+ ret = invoke_bpf_prog(fexit->links[i], args_off, retval_off,
+ run_ctx_off, false, ctx);
+ if (ret)
+ goto out;
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ im->ip_epilogue = ctx->insns + ctx->ninsns;
+ emit_imm(RV_REG_A0, (const s64)im, ctx);
+ ret = emit_call((const u64)__bpf_tramp_exit, true, ctx);
+ if (ret)
+ goto out;
+ }
+
+ if (flags & BPF_TRAMP_F_RESTORE_REGS)
+ restore_args(nregs, args_off, ctx);
+
+ if (save_ret)
+ emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
+
+ emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
+
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ /* return address of parent function */
+ emit_ld(RV_REG_RA, stack_size - 8, RV_REG_SP, ctx);
+ else
+ /* return address of traced function */
+ emit_ld(RV_REG_RA, stack_size - retaddr_off, RV_REG_SP, ctx);
+
+ emit_ld(RV_REG_FP, stack_size - fp_off, RV_REG_SP, ctx);
+ emit_addi(RV_REG_SP, RV_REG_SP, stack_size, ctx);
+
+ emit_jalr(RV_REG_ZERO, RV_REG_RA, 0, ctx);
+
+ ret = ctx->ninsns;
+out:
+ kfree(branches_off);
+ return ret;
+}
+
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
+ void *image_end, const struct btf_func_model *m,
+ u32 flags, struct bpf_tramp_links *tlinks,
+ void *func_addr)
+{
+ int ret;
+ struct rv_jit_context ctx;
+
+ ctx.ninsns = 0;
+ ctx.insns = NULL;
+ ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ninsns_rvoff(ret) > (long)image_end - (long)image)
+ return -EFBIG;
+
+ ctx.ninsns = 0;
+ ctx.insns = image;
+ ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
+ if (ret < 0)
+ return ret;
+
+ bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns);
+
+ return ninsns_rvoff(ret);
+}
+
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
bool extra_pass)
{
@@ -913,7 +1303,7 @@ out_be:
/* JUMP off */
case BPF_JMP | BPF_JA:
rvoff = rv_offset(i, off, ctx);
- ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;
@@ -1032,17 +1422,20 @@ out_be:
/* function call */
case BPF_JMP | BPF_CALL:
{
- bool fixed;
+ bool fixed_addr;
u64 addr;
mark_call(ctx);
- ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
- &fixed);
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
+ &addr, &fixed_addr);
if (ret < 0)
return ret;
- ret = emit_call(fixed, addr, ctx);
+
+ ret = emit_call(addr, fixed_addr, ctx);
if (ret)
return ret;
+
+ emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
break;
}
/* tail call */
@@ -1057,7 +1450,7 @@ out_be:
break;
rvoff = epilogue_offset(ctx);
- ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;
@@ -1270,7 +1663,7 @@ out_be:
void bpf_jit_build_prologue(struct rv_jit_context *ctx)
{
- int stack_adjust = 0, store_offset, bpf_stack_adjust;
+ int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
if (bpf_stack_adjust)
@@ -1297,6 +1690,10 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx)
store_offset = stack_adjust - 8;
+ /* reserve 4 nop insns */
+ for (i = 0; i < 4; i++)
+ emit(rv_nop(), ctx);
+
/* First instruction is always setting the tail-call-counter
* (TCC) register. This instruction is skipped for tail calls.
* Force using a 4-byte (non-compressed) instruction.
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index af35052d06ed..d0846ba818ee 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -30,6 +30,7 @@
#include <asm/facility.h>
#include <asm/nospec-branch.h>
#include <asm/set_memory.h>
+#include <asm/text-patching.h>
#include "bpf_jit.h"
struct bpf_jit {
@@ -50,12 +51,13 @@ struct bpf_jit {
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */
int excnt; /* Number of exception table entries */
+ int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
+ int prologue_plt; /* Start of prologue hotpatch PLT */
};
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
#define SEEN_LITERAL BIT(1) /* code uses literals */
#define SEEN_FUNC BIT(2) /* calls C functions */
-#define SEEN_TAIL_CALL BIT(3) /* code uses tail calls */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
/*
@@ -68,6 +70,10 @@ struct bpf_jit {
#define REG_0 REG_W0 /* Register 0 */
#define REG_1 REG_W1 /* Register 1 */
#define REG_2 BPF_REG_1 /* Register 2 */
+#define REG_3 BPF_REG_2 /* Register 3 */
+#define REG_4 BPF_REG_3 /* Register 4 */
+#define REG_7 BPF_REG_6 /* Register 7 */
+#define REG_8 BPF_REG_7 /* Register 8 */
#define REG_14 BPF_REG_0 /* Register 14 */
/*
@@ -507,20 +513,58 @@ static void bpf_skip(struct bpf_jit *jit, int size)
}
/*
+ * PLT for hotpatchable calls. The calling convention is the same as for the
+ * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
+ */
+extern const char bpf_plt[];
+extern const char bpf_plt_ret[];
+extern const char bpf_plt_target[];
+extern const char bpf_plt_end[];
+#define BPF_PLT_SIZE 32
+asm(
+ ".pushsection .rodata\n"
+ " .align 8\n"
+ "bpf_plt:\n"
+ " lgrl %r0,bpf_plt_ret\n"
+ " lgrl %r1,bpf_plt_target\n"
+ " br %r1\n"
+ " .align 8\n"
+ "bpf_plt_ret: .quad 0\n"
+ "bpf_plt_target: .quad 0\n"
+ "bpf_plt_end:\n"
+ " .popsection\n"
+);
+
+static void bpf_jit_plt(void *plt, void *ret, void *target)
+{
+ memcpy(plt, bpf_plt, BPF_PLT_SIZE);
+ *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
+ *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target;
+}
+
+/*
* Emit function prologue
*
* Save registers and create stack frame if necessary.
- * See stack frame layout desription in "bpf_jit.h"!
+ * See stack frame layout description in "bpf_jit.h"!
*/
-static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
+static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+ u32 stack_depth)
{
- if (jit->seen & SEEN_TAIL_CALL) {
+ /* No-op for hotpatching */
+ /* brcl 0,prologue_plt */
+ EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
+ jit->prologue_plt_ret = jit->prg;
+
+ if (fp->aux->func_idx == 0) {
+ /* Initialize the tail call counter in the main program. */
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
} else {
/*
- * There are no tail calls. Insert nops in order to have
- * tail_call_start at a predictable offset.
+ * Skip the tail call counter initialization in subprograms.
+ * Insert nops in order to have tail_call_start at a
+ * predictable offset.
*/
bpf_skip(jit, 6);
}
@@ -558,6 +602,43 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
}
/*
+ * Emit an expoline for a jump that follows
+ */
+static void emit_expoline(struct bpf_jit *jit)
+{
+ /* exrl %r0,.+10 */
+ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+ /* j . */
+ EMIT4_PCREL(0xa7f40000, 0);
+}
+
+/*
+ * Emit __s390_indirect_jump_r1 thunk if necessary
+ */
+static void emit_r1_thunk(struct bpf_jit *jit)
+{
+ if (nospec_uses_trampoline()) {
+ jit->r1_thunk_ip = jit->prg;
+ emit_expoline(jit);
+ /* br %r1 */
+ _EMIT2(0x07f1);
+ }
+}
+
+/*
+ * Call r1 either directly or via __s390_indirect_jump_r1 thunk
+ */
+static void call_r1(struct bpf_jit *jit)
+{
+ if (nospec_uses_trampoline())
+ /* brasl %r14,__s390_indirect_jump_r1 */
+ EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
+ else
+ /* basr %r14,%r1 */
+ EMIT2(0x0d00, REG_14, REG_1);
+}
+
+/*
* Function epilogue
*/
static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
@@ -570,25 +651,20 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
if (nospec_uses_trampoline()) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
- /* exrl %r0,.+10 */
- EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
- /* j . */
- EMIT4_PCREL(0xa7f40000, 0);
+ emit_expoline(jit);
}
/* br %r14 */
_EMIT2(0x07fe);
- if ((nospec_uses_trampoline()) &&
- (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
- jit->r1_thunk_ip = jit->prg;
- /* Generate __s390_indirect_jump_r1 thunk */
- /* exrl %r0,.+10 */
- EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
- /* j . */
- EMIT4_PCREL(0xa7f40000, 0);
- /* br %r1 */
- _EMIT2(0x07f1);
- }
+ if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
+ emit_r1_thunk(jit);
+
+ jit->prg = ALIGN(jit->prg, 8);
+ jit->prologue_plt = jit->prg;
+ if (jit->prg_buf)
+ bpf_jit_plt(jit->prg_buf + jit->prg,
+ jit->prg_buf + jit->prologue_plt_ret, NULL);
+ jit->prg += BPF_PLT_SIZE;
}
static int get_probe_mem_regno(const u8 *insn)
@@ -663,6 +739,34 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
}
/*
+ * Sign-extend the register if necessary
+ */
+static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
+{
+ if (!(flags & BTF_FMODEL_SIGNED_ARG))
+ return 0;
+
+ switch (size) {
+ case 1:
+ /* lgbr %r,%r */
+ EMIT4(0xb9060000, r, r);
+ return 0;
+ case 2:
+ /* lghr %r,%r */
+ EMIT4(0xb9070000, r, r);
+ return 0;
+ case 4:
+ /* lgfr %r,%r */
+ EMIT4(0xb9140000, r, r);
+ return 0;
+ case 8:
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+/*
* Compile one eBPF instruction into s390x code
*
* NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
@@ -1297,9 +1401,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
*/
case BPF_JMP | BPF_CALL:
{
- u64 func;
+ const struct btf_func_model *m;
bool func_addr_fixed;
- int ret;
+ int j, ret;
+ u64 func;
ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
&func, &func_addr_fixed);
@@ -1308,15 +1413,38 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
REG_SET_SEEN(BPF_REG_5);
jit->seen |= SEEN_FUNC;
+ /*
+ * Copy the tail call counter to where the callee expects it.
+ *
+ * Note 1: The callee can increment the tail call counter, but
+ * we do not load it back, since the x86 JIT does not do this
+ * either.
+ *
+ * Note 2: We assume that the verifier does not let us call the
+ * main program, which clears the tail call counter on entry.
+ */
+ /* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
+ _EMIT6(0xd203f000 | STK_OFF_TCCNT,
+ 0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
+
+ /* Sign-extend the kfunc arguments. */
+ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+ m = bpf_jit_find_kfunc_model(fp, insn);
+ if (!m)
+ return -1;
+
+ for (j = 0; j < m->nr_args; j++) {
+ if (sign_extend(jit, BPF_REG_1 + j,
+ m->arg_size[j],
+ m->arg_flags[j]))
+ return -1;
+ }
+ }
+
/* lgrl %w1,func */
EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
- if (nospec_uses_trampoline()) {
- /* brasl %r14,__s390_indirect_jump_r1 */
- EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
- } else {
- /* basr %r14,%w1 */
- EMIT2(0x0d00, REG_14, REG_W1);
- }
+ /* %r1() */
+ call_r1(jit);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
break;
@@ -1329,10 +1457,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
* B1: pointer to ctx
* B2: pointer to bpf_array
* B3: index in bpf_array
- */
- jit->seen |= SEEN_TAIL_CALL;
-
- /*
+ *
* if (index >= array->map.max_entries)
* goto out;
*/
@@ -1393,8 +1518,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* lg %r1,bpf_func(%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
offsetof(struct bpf_prog, bpf_func));
- /* bc 0xf,tail_call_start(%r1) */
- _EMIT4(0x47f01000 + jit->tail_call_start);
+ if (nospec_uses_trampoline()) {
+ jit->seen |= SEEN_FUNC;
+ /* aghi %r1,tail_call_start */
+ EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
+ /* brcl 0xf,__s390_indirect_jump_r1 */
+ EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
+ } else {
+ /* bc 0xf,tail_call_start(%r1) */
+ _EMIT4(0x47f01000 + jit->tail_call_start);
+ }
/* out: */
if (jit->prg_buf) {
*(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
@@ -1688,7 +1821,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
jit->prg = 0;
jit->excnt = 0;
- bpf_jit_prologue(jit, stack_depth);
+ bpf_jit_prologue(jit, fp, stack_depth);
if (bpf_set_addr(jit, 0) < 0)
return -1;
for (i = 0; i < fp->len; i += insn_count) {
@@ -1768,6 +1901,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_jit jit;
int pass;
+ if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
+ return orig_fp;
+
if (!fp->jit_requested)
return orig_fp;
@@ -1859,3 +1995,508 @@ out:
tmp : orig_fp);
return fp;
}
+
+bool bpf_jit_supports_kfunc_call(void)
+{
+ return true;
+}
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *old_addr, void *new_addr)
+{
+ struct {
+ u16 opc;
+ s32 disp;
+ } __packed insn;
+ char expected_plt[BPF_PLT_SIZE];
+ char current_plt[BPF_PLT_SIZE];
+ char *plt;
+ int err;
+
+ /* Verify the branch to be patched. */
+ err = copy_from_kernel_nofault(&insn, ip, sizeof(insn));
+ if (err < 0)
+ return err;
+ if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0)))
+ return -EINVAL;
+
+ if (t == BPF_MOD_JUMP &&
+ insn.disp == ((char *)new_addr - (char *)ip) >> 1) {
+ /*
+ * The branch already points to the destination,
+ * there is no PLT.
+ */
+ } else {
+ /* Verify the PLT. */
+ plt = (char *)ip + (insn.disp << 1);
+ err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
+ if (err < 0)
+ return err;
+ bpf_jit_plt(expected_plt, (char *)ip + 6, old_addr);
+ if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
+ return -EINVAL;
+ /* Adjust the call address. */
+ s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
+ &new_addr, sizeof(void *));
+ }
+
+ /* Adjust the mask of the branch. */
+ insn.opc = 0xc004 | (new_addr ? 0xf0 : 0);
+ s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1);
+
+ /* Make the new code visible to the other CPUs. */
+ text_poke_sync_lock();
+
+ return 0;
+}
+
+struct bpf_tramp_jit {
+ struct bpf_jit common;
+ int orig_stack_args_off;/* Offset of arguments placed on stack by the
+ * func_addr's original caller
+ */
+ int stack_size; /* Trampoline stack size */
+ int stack_args_off; /* Offset of stack arguments for calling
+ * func_addr, has to be at the top
+ */
+ int reg_args_off; /* Offset of register arguments for calling
+ * func_addr
+ */
+ int ip_off; /* For bpf_get_func_ip(), has to be at
+ * (ctx - 16)
+ */
+ int arg_cnt_off; /* For bpf_get_func_arg_cnt(), has to be at
+ * (ctx - 8)
+ */
+ int bpf_args_off; /* Offset of BPF_PROG context, which consists
+ * of BPF arguments followed by return value
+ */
+ int retval_off; /* Offset of return value (see above) */
+ int r7_r8_off; /* Offset of saved %r7 and %r8, which are used
+ * for __bpf_prog_enter() return value and
+ * func_addr respectively
+ */
+ int r14_off; /* Offset of saved %r14 */
+ int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
+ int do_fexit; /* do_fexit: label */
+};
+
+static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val)
+{
+ /* llihf %dst_reg,val_hi */
+ EMIT6_IMM(0xc00e0000, dst_reg, (val >> 32));
+ /* oilf %rdst_reg,val_lo */
+ EMIT6_IMM(0xc00d0000, dst_reg, val);
+}
+
+static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
+ const struct btf_func_model *m,
+ struct bpf_tramp_link *tlink, bool save_ret)
+{
+ struct bpf_jit *jit = &tjit->common;
+ int cookie_off = tjit->run_ctx_off +
+ offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
+ struct bpf_prog *p = tlink->link.prog;
+ int patch;
+
+ /*
+ * run_ctx.cookie = tlink->cookie;
+ */
+
+ /* %r0 = tlink->cookie */
+ load_imm64(jit, REG_W0, tlink->cookie);
+ /* stg %r0,cookie_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off);
+
+ /*
+ * if ((start = __bpf_prog_enter(p, &run_ctx)) == 0)
+ * goto skip;
+ */
+
+ /* %r1 = __bpf_prog_enter */
+ load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p));
+ /* %r2 = p */
+ load_imm64(jit, REG_2, (u64)p);
+ /* la %r3,run_ctx_off(%r15) */
+ EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off);
+ /* %r1() */
+ call_r1(jit);
+ /* ltgr %r7,%r2 */
+ EMIT4(0xb9020000, REG_7, REG_2);
+ /* brcl 8,skip */
+ patch = jit->prg;
+ EMIT6_PCREL_RILC(0xc0040000, 8, 0);
+
+ /*
+ * retval = bpf_func(args, p->insnsi);
+ */
+
+ /* %r1 = p->bpf_func */
+ load_imm64(jit, REG_1, (u64)p->bpf_func);
+ /* la %r2,bpf_args_off(%r15) */
+ EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off);
+ /* %r3 = p->insnsi */
+ if (!p->jited)
+ load_imm64(jit, REG_3, (u64)p->insnsi);
+ /* %r1() */
+ call_r1(jit);
+ /* stg %r2,retval_off(%r15) */
+ if (save_ret) {
+ if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags))
+ return -1;
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
+ tjit->retval_off);
+ }
+
+ /* skip: */
+ if (jit->prg_buf)
+ *(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1;
+
+ /*
+ * __bpf_prog_exit(p, start, &run_ctx);
+ */
+
+ /* %r1 = __bpf_prog_exit */
+ load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p));
+ /* %r2 = p */
+ load_imm64(jit, REG_2, (u64)p);
+ /* lgr %r3,%r7 */
+ EMIT4(0xb9040000, REG_3, REG_7);
+ /* la %r4,run_ctx_off(%r15) */
+ EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off);
+ /* %r1() */
+ call_r1(jit);
+
+ return 0;
+}
+
+static int alloc_stack(struct bpf_tramp_jit *tjit, size_t size)
+{
+ int stack_offset = tjit->stack_size;
+
+ tjit->stack_size += size;
+ return stack_offset;
+}
+
+/* ABI uses %r2 - %r6 for parameter passing. */
+#define MAX_NR_REG_ARGS 5
+
+/* The "L" field of the "mvc" instruction is 8 bits. */
+#define MAX_MVC_SIZE 256
+#define MAX_NR_STACK_ARGS (MAX_MVC_SIZE / sizeof(u64))
+
+/* -mfentry generates a 6-byte nop on s390x. */
+#define S390X_PATCH_SIZE 6
+
+static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ struct bpf_tramp_jit *tjit,
+ const struct btf_func_model *m,
+ u32 flags,
+ struct bpf_tramp_links *tlinks,
+ void *func_addr)
+{
+ struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
+ struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
+ int nr_bpf_args, nr_reg_args, nr_stack_args;
+ struct bpf_jit *jit = &tjit->common;
+ int arg, bpf_arg_off;
+ int i, j;
+
+ /* Support as many stack arguments as "mvc" instruction can handle. */
+ nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS);
+ nr_stack_args = m->nr_args - nr_reg_args;
+ if (nr_stack_args > MAX_NR_STACK_ARGS)
+ return -ENOTSUPP;
+
+ /* Return to %r14, since func_addr and %r0 are not available. */
+ if (!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK))
+ flags |= BPF_TRAMP_F_SKIP_FRAME;
+
+ /*
+ * Compute how many arguments we need to pass to BPF programs.
+ * BPF ABI mirrors that of x86_64: arguments that are 16 bytes or
+ * smaller are packed into 1 or 2 registers; larger arguments are
+ * passed via pointers.
+ * In s390x ABI, arguments that are 8 bytes or smaller are packed into
+ * a register; larger arguments are passed via pointers.
+ * We need to deal with this difference.
+ */
+ nr_bpf_args = 0;
+ for (i = 0; i < m->nr_args; i++) {
+ if (m->arg_size[i] <= 8)
+ nr_bpf_args += 1;
+ else if (m->arg_size[i] <= 16)
+ nr_bpf_args += 2;
+ else
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Calculate the stack layout.
+ */
+
+ /* Reserve STACK_FRAME_OVERHEAD bytes for the callees. */
+ tjit->stack_size = STACK_FRAME_OVERHEAD;
+ tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
+ tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
+ tjit->ip_off = alloc_stack(tjit, sizeof(u64));
+ tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64));
+ tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
+ tjit->retval_off = alloc_stack(tjit, sizeof(u64));
+ tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
+ tjit->r14_off = alloc_stack(tjit, sizeof(u64));
+ tjit->run_ctx_off = alloc_stack(tjit,
+ sizeof(struct bpf_tramp_run_ctx));
+ /* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
+ tjit->stack_size -= STACK_FRAME_OVERHEAD;
+ tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
+
+ /* aghi %r15,-stack_size */
+ EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
+ /* stmg %r2,%rN,fwd_reg_args_off(%r15) */
+ if (nr_reg_args)
+ EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
+ REG_2 + (nr_reg_args - 1), REG_15,
+ tjit->reg_args_off);
+ for (i = 0, j = 0; i < m->nr_args; i++) {
+ if (i < MAX_NR_REG_ARGS)
+ arg = REG_2 + i;
+ else
+ arg = tjit->orig_stack_args_off +
+ (i - MAX_NR_REG_ARGS) * sizeof(u64);
+ bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64);
+ if (m->arg_size[i] <= 8) {
+ if (i < MAX_NR_REG_ARGS)
+ /* stg %arg,bpf_arg_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, arg,
+ REG_0, REG_15, bpf_arg_off);
+ else
+ /* mvc bpf_arg_off(8,%r15),arg(%r15) */
+ _EMIT6(0xd207f000 | bpf_arg_off,
+ 0xf000 | arg);
+ j += 1;
+ } else {
+ if (i < MAX_NR_REG_ARGS) {
+ /* mvc bpf_arg_off(16,%r15),0(%arg) */
+ _EMIT6(0xd20ff000 | bpf_arg_off,
+ reg2hex[arg] << 12);
+ } else {
+ /* lg %r1,arg(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_0,
+ REG_15, arg);
+ /* mvc bpf_arg_off(16,%r15),0(%r1) */
+ _EMIT6(0xd20ff000 | bpf_arg_off, 0x1000);
+ }
+ j += 2;
+ }
+ }
+ /* stmg %r7,%r8,r7_r8_off(%r15) */
+ EMIT6_DISP_LH(0xeb000000, 0x0024, REG_7, REG_8, REG_15,
+ tjit->r7_r8_off);
+ /* stg %r14,r14_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off);
+
+ if (flags & BPF_TRAMP_F_ORIG_STACK) {
+ /*
+ * The ftrace trampoline puts the return address (which is the
+ * address of the original function + S390X_PATCH_SIZE) into
+ * %r0; see ftrace_shared_hotpatch_trampoline_br and
+ * ftrace_init_nop() for details.
+ */
+
+ /* lgr %r8,%r0 */
+ EMIT4(0xb9040000, REG_8, REG_0);
+ } else {
+ /* %r8 = func_addr + S390X_PATCH_SIZE */
+ load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE);
+ }
+
+ /*
+ * ip = func_addr;
+ * arg_cnt = m->nr_args;
+ */
+
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ /* %r0 = func_addr */
+ load_imm64(jit, REG_0, (u64)func_addr);
+ /* stg %r0,ip_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
+ tjit->ip_off);
+ }
+ /* lghi %r0,nr_bpf_args */
+ EMIT4_IMM(0xa7090000, REG_0, nr_bpf_args);
+ /* stg %r0,arg_cnt_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
+ tjit->arg_cnt_off);
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ /*
+ * __bpf_tramp_enter(im);
+ */
+
+ /* %r1 = __bpf_tramp_enter */
+ load_imm64(jit, REG_1, (u64)__bpf_tramp_enter);
+ /* %r2 = im */
+ load_imm64(jit, REG_2, (u64)im);
+ /* %r1() */
+ call_r1(jit);
+ }
+
+ for (i = 0; i < fentry->nr_links; i++)
+ if (invoke_bpf_prog(tjit, m, fentry->links[i],
+ flags & BPF_TRAMP_F_RET_FENTRY_RET))
+ return -EINVAL;
+
+ if (fmod_ret->nr_links) {
+ /*
+ * retval = 0;
+ */
+
+ /* xc retval_off(8,%r15),retval_off(%r15) */
+ _EMIT6(0xd707f000 | tjit->retval_off,
+ 0xf000 | tjit->retval_off);
+
+ for (i = 0; i < fmod_ret->nr_links; i++) {
+ if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true))
+ return -EINVAL;
+
+ /*
+ * if (retval)
+ * goto do_fexit;
+ */
+
+ /* ltg %r0,retval_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0002, REG_0, REG_0, REG_15,
+ tjit->retval_off);
+ /* brcl 7,do_fexit */
+ EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit);
+ }
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ /*
+ * retval = func_addr(args);
+ */
+
+ /* lmg %r2,%rN,reg_args_off(%r15) */
+ if (nr_reg_args)
+ EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
+ REG_2 + (nr_reg_args - 1), REG_15,
+ tjit->reg_args_off);
+ /* mvc stack_args_off(N,%r15),orig_stack_args_off(%r15) */
+ if (nr_stack_args)
+ _EMIT6(0xd200f000 |
+ (nr_stack_args * sizeof(u64) - 1) << 16 |
+ tjit->stack_args_off,
+ 0xf000 | tjit->orig_stack_args_off);
+ /* lgr %r1,%r8 */
+ EMIT4(0xb9040000, REG_1, REG_8);
+ /* %r1() */
+ call_r1(jit);
+ /* stg %r2,retval_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
+ tjit->retval_off);
+
+ im->ip_after_call = jit->prg_buf + jit->prg;
+
+ /*
+ * The following nop will be patched by bpf_tramp_image_put().
+ */
+
+ /* brcl 0,im->ip_epilogue */
+ EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue);
+ }
+
+ /* do_fexit: */
+ tjit->do_fexit = jit->prg;
+ for (i = 0; i < fexit->nr_links; i++)
+ if (invoke_bpf_prog(tjit, m, fexit->links[i], false))
+ return -EINVAL;
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ im->ip_epilogue = jit->prg_buf + jit->prg;
+
+ /*
+ * __bpf_tramp_exit(im);
+ */
+
+ /* %r1 = __bpf_tramp_exit */
+ load_imm64(jit, REG_1, (u64)__bpf_tramp_exit);
+ /* %r2 = im */
+ load_imm64(jit, REG_2, (u64)im);
+ /* %r1() */
+ call_r1(jit);
+ }
+
+ /* lmg %r2,%rN,reg_args_off(%r15) */
+ if ((flags & BPF_TRAMP_F_RESTORE_REGS) && nr_reg_args)
+ EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
+ REG_2 + (nr_reg_args - 1), REG_15,
+ tjit->reg_args_off);
+ /* lgr %r1,%r8 */
+ if (!(flags & BPF_TRAMP_F_SKIP_FRAME))
+ EMIT4(0xb9040000, REG_1, REG_8);
+ /* lmg %r7,%r8,r7_r8_off(%r15) */
+ EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15,
+ tjit->r7_r8_off);
+ /* lg %r14,r14_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off);
+ /* lg %r2,retval_off(%r15) */
+ if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
+ tjit->retval_off);
+ /* aghi %r15,stack_size */
+ EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
+ /* Emit an expoline for the following indirect jump. */
+ if (nospec_uses_trampoline())
+ emit_expoline(jit);
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ /* br %r14 */
+ _EMIT2(0x07fe);
+ else
+ /* br %r1 */
+ _EMIT2(0x07f1);
+
+ emit_r1_thunk(jit);
+
+ return 0;
+}
+
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
+ void *image_end, const struct btf_func_model *m,
+ u32 flags, struct bpf_tramp_links *tlinks,
+ void *func_addr)
+{
+ struct bpf_tramp_jit tjit;
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ /* Compute offsets, check whether the code fits. */
+ memset(&tjit, 0, sizeof(tjit));
+ } else {
+ /* Generate the code. */
+ tjit.common.prg = 0;
+ tjit.common.prg_buf = image;
+ }
+ ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
+ tlinks, func_addr);
+ if (ret < 0)
+ return ret;
+ if (tjit.common.prg > (char *)image_end - (char *)image)
+ /*
+ * Use the same error code as for exceeding
+ * BPF_MAX_TRAMP_LINKS.
+ */
+ return -E2BIG;
+ }
+
+ return ret;
+}
+
+bool bpf_jit_supports_subprog_tailcalls(void)
+{
+ return true;
+}
diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h
index a6501b856f3e..2b5fa75b4651 100644
--- a/arch/sh/include/asm/checksum_32.h
+++ b/arch/sh/include/asm/checksum_32.h
@@ -7,6 +7,7 @@
*/
#include <linux/in6.h>
+#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index 407beebadaf4..4d4a47a3a8ab 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -9,7 +9,6 @@
*/
#include <linux/compiler.h>
-#include <linux/uaccess.h>
#include <asm/byteorder.h>
/**
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index b808be77635e..1056bbf55b17 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1003,6 +1003,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
u8 b2 = 0, b3 = 0;
u8 *start_of_ldx;
s64 jmp_offset;
+ s16 insn_off;
u8 jmp_cond;
u8 *func;
int nops;
@@ -1369,57 +1370,52 @@ st: if (is_imm8(insn->off))
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ insn_off = insn->off;
+
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
- /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
- * add abs(insn->off) to the limit to make sure that negative
- * offset won't be an issue.
- * insn->off is s16, so it won't affect valid pointers.
+ /* Conservatively check that src_reg + insn->off is a kernel address:
+ * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
+ * src_reg is used as scratch for src_reg += insn->off and restored
+ * after emit_ldx if necessary
*/
- u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
- u8 *end_of_jmp1, *end_of_jmp2;
- /* Conservatively check that src_reg + insn->off is a kernel address:
- * 1. src_reg + insn->off >= limit
- * 2. src_reg + insn->off doesn't become small positive.
- * Cannot do src_reg + insn->off >= limit in one branch,
- * since it needs two spare registers, but JIT has only one.
+ u64 limit = TASK_SIZE_MAX + PAGE_SIZE;
+ u8 *end_of_jmp;
+
+ /* At end of these emitted checks, insn->off will have been added
+ * to src_reg, so no need to do relative load with insn->off offset
*/
+ insn_off = 0;
/* movabsq r11, limit */
EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
EMIT((u32)limit, 4);
EMIT(limit >> 32, 4);
+
+ if (insn->off) {
+ /* add src_reg, insn->off */
+ maybe_emit_1mod(&prog, src_reg, true);
+ EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off);
+ }
+
/* cmp src_reg, r11 */
maybe_emit_mod(&prog, src_reg, AUX_REG, true);
EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
- /* if unsigned '<' goto end_of_jmp2 */
- EMIT2(X86_JB, 0);
- end_of_jmp1 = prog;
-
- /* mov r11, src_reg */
- emit_mov_reg(&prog, true, AUX_REG, src_reg);
- /* add r11, insn->off */
- maybe_emit_1mod(&prog, AUX_REG, true);
- EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
- /* jmp if not carry to start_of_ldx
- * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
- * that has to be rejected.
- */
- EMIT2(0x73 /* JNC */, 0);
- end_of_jmp2 = prog;
+
+ /* if unsigned '>=', goto load */
+ EMIT2(X86_JAE, 0);
+ end_of_jmp = prog;
/* xor dst_reg, dst_reg */
emit_mov_imm32(&prog, false, dst_reg, 0);
/* jmp byte_after_ldx */
EMIT2(0xEB, 0);
- /* populate jmp_offset for JB above to jump to xor dst_reg */
- end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
- /* populate jmp_offset for JNC above to jump to start_of_ldx */
+ /* populate jmp_offset for JAE above to jump to start_of_ldx */
start_of_ldx = prog;
- end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
+ end_of_jmp[-1] = start_of_ldx - end_of_jmp;
}
- emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+ emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
struct exception_table_entry *ex;
u8 *_insn = image + proglen + (start_of_ldx - temp);
@@ -1428,6 +1424,18 @@ st: if (is_imm8(insn->off))
/* populate jmp_offset for JMP above */
start_of_ldx[-1] = prog - start_of_ldx;
+ if (insn->off && src_reg != dst_reg) {
+ /* sub src_reg, insn->off
+ * Restore src_reg after "add src_reg, insn->off" in prev
+ * if statement. But if src_reg == dst_reg, emit_ldx
+ * above already clobbered src_reg, so no need to restore.
+ * If add src_reg, insn->off was unnecessary, no need to
+ * restore either.
+ */
+ maybe_emit_1mod(&prog, src_reg, true);
+ EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off);
+ }
+
if (!bpf_prog->aux->extable)
break;
@@ -1849,62 +1857,59 @@ emit_jmp:
return proglen;
}
-static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
+static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_regs,
int stack_size)
{
- int i, j, arg_size, nr_regs;
+ int i, j, arg_size;
+ bool next_same_struct = false;
+
/* Store function arguments to stack.
* For a function that accepts two pointers the sequence will be:
* mov QWORD PTR [rbp-0x10],rdi
* mov QWORD PTR [rbp-0x8],rsi
*/
- for (i = 0, j = 0; i < min(nr_args, 6); i++) {
- if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
- nr_regs = (m->arg_size[i] + 7) / 8;
+ for (i = 0, j = 0; i < min(nr_regs, 6); i++) {
+ /* The arg_size is at most 16 bytes, enforced by the verifier. */
+ arg_size = m->arg_size[j];
+ if (arg_size > 8) {
arg_size = 8;
- } else {
- nr_regs = 1;
- arg_size = m->arg_size[i];
+ next_same_struct = !next_same_struct;
}
- while (nr_regs) {
- emit_stx(prog, bytes_to_bpf_size(arg_size),
- BPF_REG_FP,
- j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
- -(stack_size - j * 8));
- nr_regs--;
- j++;
- }
+ emit_stx(prog, bytes_to_bpf_size(arg_size),
+ BPF_REG_FP,
+ i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
+ -(stack_size - i * 8));
+
+ j = next_same_struct ? j : j + 1;
}
}
-static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
+static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_regs,
int stack_size)
{
- int i, j, arg_size, nr_regs;
+ int i, j, arg_size;
+ bool next_same_struct = false;
/* Restore function arguments from stack.
* For a function that accepts two pointers the sequence will be:
* EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
* EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
*/
- for (i = 0, j = 0; i < min(nr_args, 6); i++) {
- if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
- nr_regs = (m->arg_size[i] + 7) / 8;
+ for (i = 0, j = 0; i < min(nr_regs, 6); i++) {
+ /* The arg_size is at most 16 bytes, enforced by the verifier. */
+ arg_size = m->arg_size[j];
+ if (arg_size > 8) {
arg_size = 8;
- } else {
- nr_regs = 1;
- arg_size = m->arg_size[i];
+ next_same_struct = !next_same_struct;
}
- while (nr_regs) {
- emit_ldx(prog, bytes_to_bpf_size(arg_size),
- j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
- BPF_REG_FP,
- -(stack_size - j * 8));
- nr_regs--;
- j++;
- }
+ emit_ldx(prog, bytes_to_bpf_size(arg_size),
+ i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
+ BPF_REG_FP,
+ -(stack_size - i * 8));
+
+ j = next_same_struct ? j : j + 1;
}
}
@@ -2130,8 +2135,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
struct bpf_tramp_links *tlinks,
void *func_addr)
{
- int ret, i, nr_args = m->nr_args, extra_nregs = 0;
- int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
+ int i, ret, nr_regs = m->nr_args, stack_size = 0;
+ int regs_off, nregs_off, ip_off, run_ctx_off;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
@@ -2140,17 +2145,14 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
u8 *prog;
bool save_ret;
- /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
- if (nr_args > 6)
- return -ENOTSUPP;
-
- for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
+ /* extra registers for struct arguments */
+ for (i = 0; i < m->nr_args; i++)
if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
- extra_nregs += (m->arg_size[i] + 7) / 8 - 1;
- }
- if (nr_args + extra_nregs > 6)
+ nr_regs += (m->arg_size[i] + 7) / 8 - 1;
+
+ /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
+ if (nr_regs > 6)
return -ENOTSUPP;
- stack_size += extra_nregs * 8;
/* Generated trampoline stack layout:
*
@@ -2164,7 +2166,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
* [ ... ]
* RBP - regs_off [ reg_arg1 ] program's ctx pointer
*
- * RBP - args_off [ arg regs count ] always
+ * RBP - nregs_off [ regs count ] always
*
* RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
*
@@ -2176,11 +2178,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
if (save_ret)
stack_size += 8;
+ stack_size += nr_regs * 8;
regs_off = stack_size;
- /* args count */
+ /* regs count */
stack_size += 8;
- args_off = stack_size;
+ nregs_off = stack_size;
if (flags & BPF_TRAMP_F_IP_ARG)
stack_size += 8; /* room for IP address argument */
@@ -2213,11 +2216,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
EMIT1(0x53); /* push rbx */
/* Store number of argument registers of the traced function:
- * mov rax, nr_args + extra_nregs
- * mov QWORD PTR [rbp - args_off], rax
+ * mov rax, nr_regs
+ * mov QWORD PTR [rbp - nregs_off], rax
*/
- emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs);
- emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
+ emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
if (flags & BPF_TRAMP_F_IP_ARG) {
/* Store IP address of the traced function:
@@ -2228,7 +2231,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
}
- save_regs(m, &prog, nr_args, regs_off);
+ save_regs(m, &prog, nr_regs, regs_off);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */
@@ -2258,7 +2261,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- restore_regs(m, &prog, nr_args, regs_off);
+ restore_regs(m, &prog, nr_regs, regs_off);
if (flags & BPF_TRAMP_F_ORIG_STACK) {
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
@@ -2299,7 +2302,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
}
if (flags & BPF_TRAMP_F_RESTORE_REGS)
- restore_regs(m, &prog, nr_args, regs_off);
+ restore_regs(m, &prog, nr_regs, regs_off);
/* This needs to be done regardless. If there were fmod_ret programs,
* the return value is only updated on the stack and still needs to be
diff --git a/crypto/asymmetric_keys/x509_loader.c b/crypto/asymmetric_keys/x509_loader.c
index 1bc169dee22e..a41741326998 100644
--- a/crypto/asymmetric_keys/x509_loader.c
+++ b/crypto/asymmetric_keys/x509_loader.c
@@ -55,3 +55,4 @@ dodgy_cert:
pr_err("Problem parsing in-kernel X.509 certificate list\n");
return 0;
}
+EXPORT_SYMBOL_GPL(x509_load_certificate_list);
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
index f7293040a2b1..6aa6a2409478 100644
--- a/drivers/base/regmap/regmap-mdio.c
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -10,31 +10,21 @@
/* Clause-45 mask includes the device type (5 bit) and actual register number (16 bit) */
#define REGNUM_C45_MASK GENMASK(20, 0)
-static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int *val)
+static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
{
+ struct mdio_device *mdio_dev = context;
int ret;
+ if (unlikely(reg & ~REGNUM_C22_MASK))
+ return -ENXIO;
+
ret = mdiodev_read(mdio_dev, reg);
if (ret < 0)
return ret;
*val = ret & REGVAL_MASK;
- return 0;
-}
-
-static int regmap_mdio_write(struct mdio_device *mdio_dev, u32 reg, unsigned int val)
-{
- return mdiodev_write(mdio_dev, reg, val);
-}
-
-static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
-{
- struct mdio_device *mdio_dev = context;
-
- if (unlikely(reg & ~REGNUM_C22_MASK))
- return -ENXIO;
- return regmap_mdio_read(mdio_dev, reg, val);
+ return 0;
}
static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int val)
@@ -55,21 +45,36 @@ static const struct regmap_bus regmap_mdio_c22_bus = {
static int regmap_mdio_c45_read(void *context, unsigned int reg, unsigned int *val)
{
struct mdio_device *mdio_dev = context;
+ unsigned int devad;
+ int ret;
if (unlikely(reg & ~REGNUM_C45_MASK))
return -ENXIO;
- return regmap_mdio_read(mdio_dev, MII_ADDR_C45 | reg, val);
+ devad = reg >> REGMAP_MDIO_C45_DEVAD_SHIFT;
+ reg = reg & REGMAP_MDIO_C45_REGNUM_MASK;
+
+ ret = mdiodev_c45_read(mdio_dev, devad, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & REGVAL_MASK;
+
+ return 0;
}
static int regmap_mdio_c45_write(void *context, unsigned int reg, unsigned int val)
{
struct mdio_device *mdio_dev = context;
+ unsigned int devad;
if (unlikely(reg & ~REGNUM_C45_MASK))
return -ENXIO;
- return regmap_mdio_write(mdio_dev, MII_ADDR_C45 | reg, val);
+ devad = reg >> REGMAP_MDIO_C45_DEVAD_SHIFT;
+ reg = reg & REGMAP_MDIO_C45_REGNUM_MASK;
+
+ return mdiodev_c45_write(mdio_dev, devad, reg, val);
}
static const struct regmap_bus regmap_mdio_c45_bus = {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index d4e2cb9a4eb4..bede8b005594 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <linux/acpi.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -24,6 +25,9 @@
#define ECDSA_OFFSET 644
#define ECDSA_HEADER_LEN 320
+#define BTINTEL_PPAG_NAME "PPAG"
+#define BTINTEL_PPAG_PREFIX "\\_SB_.PCI0.XHCI.RHUB"
+
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
__le32 boot_addr;
@@ -1278,6 +1282,63 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
return 0;
}
+static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data,
+ void **ret)
+{
+ acpi_status status;
+ size_t len;
+ struct btintel_ppag *ppag = data;
+ union acpi_object *p, *elements;
+ struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct hci_dev *hdev = ppag->hdev;
+
+ status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+ if (ACPI_FAILURE(status)) {
+ bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+ return status;
+ }
+
+ if (strncmp(BTINTEL_PPAG_PREFIX, string.pointer,
+ strlen(BTINTEL_PPAG_PREFIX))) {
+ kfree(string.pointer);
+ return AE_OK;
+ }
+
+ len = strlen(string.pointer);
+ if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
+ kfree(string.pointer);
+ return AE_OK;
+ }
+ kfree(string.pointer);
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+ return status;
+ }
+
+ p = buffer.pointer;
+ ppag = (struct btintel_ppag *)data;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
+ kfree(buffer.pointer);
+ bt_dev_warn(hdev, "Invalid object type: %d or package count: %d",
+ p->type, p->package.count);
+ return AE_ERROR;
+ }
+
+ elements = p->package.elements;
+
+ /* PPAG table is located at element[1] */
+ p = &elements[1];
+
+ ppag->domain = (u32)p->package.elements[0].integer.value;
+ ppag->mode = (u32)p->package.elements[1].integer.value;
+ kfree(buffer.pointer);
+ return AE_CTRL_TERMINATE;
+}
+
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
@@ -2251,6 +2312,58 @@ error:
return err;
}
+static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
+{
+ acpi_status status;
+ struct btintel_ppag ppag;
+ struct sk_buff *skb;
+ struct btintel_loc_aware_reg ppag_cmd;
+
+ /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
+ switch (ver->cnvr_top & 0xFFF) {
+ case 0x504: /* Hrp2 */
+ case 0x202: /* Jfp2 */
+ case 0x201: /* Jfp1 */
+ return;
+ }
+
+ memset(&ppag, 0, sizeof(ppag));
+
+ ppag.hdev = hdev;
+ status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, NULL,
+ btintel_ppag_callback, &ppag, NULL);
+
+ if (ACPI_FAILURE(status)) {
+ /* Do not log warning message if ACPI entry is not found */
+ if (status == AE_NOT_FOUND)
+ return;
+ bt_dev_warn(hdev, "PPAG: ACPI Failure: %s", acpi_format_exception(status));
+ return;
+ }
+
+ if (ppag.domain != 0x12) {
+ bt_dev_warn(hdev, "PPAG-BT Domain disabled");
+ return;
+ }
+
+ /* PPAG mode, BIT0 = 0 Disabled, BIT0 = 1 Enabled */
+ if (!(ppag.mode & BIT(0))) {
+ bt_dev_dbg(hdev, "PPAG disabled");
+ return;
+ }
+
+ ppag_cmd.mcc = cpu_to_le32(0);
+ ppag_cmd.sel = cpu_to_le32(0); /* 0 - Enable , 1 - Disable, 2 - Testing mode */
+ ppag_cmd.delta = cpu_to_le32(0);
+ skb = __hci_cmd_sync(hdev, 0xfe19, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb));
+ return;
+ }
+ kfree_skb(skb);
+}
+
static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2297,6 +2410,9 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
+ /* Set PPAG feature */
+ btintel_set_ppag(hdev, ver);
+
/* Read the Intel version information after loading the FW */
err = btintel_read_version_tlv(hdev, &new_ver);
if (err)
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index e0060e58573c..8e7da877efae 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -137,6 +137,19 @@ struct intel_offload_use_cases {
__u8 preset[8];
} __packed;
+/* structure to store the PPAG data read from ACPI table */
+struct btintel_ppag {
+ u32 domain;
+ u32 mode;
+ struct hci_dev *hdev;
+};
+
+struct btintel_loc_aware_reg {
+ __le32 mcc;
+ __le32 sel;
+ __le32 delta;
+} __packed;
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2ad4efdd9e40..18bc94718711 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -64,6 +64,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24)
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
+#define BTUSB_ACTIONS_SEMI BIT(27)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -492,6 +493,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_IGNORE },
+ /* Realtek 8821CE Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -566,6 +571,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -677,6 +685,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Actions Semiconductor ATS2851 based devices */
+ { USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
+
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@ -4098,6 +4109,11 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags);
}
+ if (id->driver_info & BTUSB_ACTIONS_SEMI) {
+ /* Support is advertised, but not implemented */
+ set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
+ }
+
if (!reset)
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index bbe9cf1cae27..3df8c3606e93 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -128,13 +128,13 @@ struct qca_memdump_event_hdr {
__u8 evt;
__u8 plen;
__u16 opcode;
- __u16 seq_no;
+ __le16 seq_no;
__u8 reserved;
} __packed;
struct qca_dump_size {
- u32 dump_size;
+ __le32 dump_size;
} __packed;
struct qca_data {
@@ -1588,10 +1588,11 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
- /* UART driver handles the interrupt from BT SoC.So we need to use
- * device handle of UART driver to get the status of device may wakeup.
+ /* BT SoC attached through the serial bus is handled by the serdev driver.
+ * So we need to use the device handle of the serdev driver to get the
+ * status of device may wakeup.
*/
- wakeup = device_may_wakeup(hu->serdev->ctrl->dev.parent);
+ wakeup = device_may_wakeup(&hu->serdev->ctrl->dev);
bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup);
return wakeup;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 4dd777cc0c89..d6037a328669 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -442,18 +442,7 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
- struct device *dev;
- struct i2c_client *client;
-
- dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
- if (!dev)
- return NULL;
-
- client = i2c_verify_client(dev);
- if (!client)
- put_device(dev);
-
- return client;
+ return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev));
}
static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 087e480b624c..b47e255fc6f1 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1012,6 +1012,35 @@ void i2c_unregister_device(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
+/**
+ * i2c_find_device_by_fwnode() - find an i2c_client for the fwnode
+ * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_client
+ *
+ * Look up and return the &struct i2c_client corresponding to the @fwnode.
+ * If no client can be found, or @fwnode is NULL, this returns NULL.
+ *
+ * The user must call put_device(&client->dev) once done with the i2c client.
+ */
+struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct i2c_client *client;
+ struct device *dev;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = bus_find_device_by_fwnode(&i2c_bus_type, fwnode);
+ if (!dev)
+ return NULL;
+
+ client = i2c_verify_client(dev);
+ if (!client)
+ put_device(dev);
+
+ return client;
+}
+EXPORT_SYMBOL(i2c_find_device_by_fwnode);
+
static const struct i2c_device_id dummy_id[] = {
{ "dummy", 0 },
@@ -1761,6 +1790,75 @@ int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter)
}
EXPORT_SYMBOL_GPL(devm_i2c_add_adapter);
+static int i2c_dev_or_parent_fwnode_match(struct device *dev, const void *data)
+{
+ if (dev_fwnode(dev) == data)
+ return 1;
+
+ if (dev->parent && dev_fwnode(dev->parent) == data)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * i2c_find_adapter_by_fwnode() - find an i2c_adapter for the fwnode
+ * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter
+ *
+ * Look up and return the &struct i2c_adapter corresponding to the @fwnode.
+ * If no adapter can be found, or @fwnode is NULL, this returns NULL.
+ *
+ * The user must call put_device(&adapter->dev) once done with the i2c adapter.
+ */
+struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct i2c_adapter *adapter;
+ struct device *dev;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, fwnode,
+ i2c_dev_or_parent_fwnode_match);
+ if (!dev)
+ return NULL;
+
+ adapter = i2c_verify_adapter(dev);
+ if (!adapter)
+ put_device(dev);
+
+ return adapter;
+}
+EXPORT_SYMBOL(i2c_find_adapter_by_fwnode);
+
+/**
+ * i2c_get_adapter_by_fwnode() - find an i2c_adapter for the fwnode
+ * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter
+ *
+ * Look up and return the &struct i2c_adapter corresponding to the @fwnode,
+ * and increment the adapter module's use count. If no adapter can be found,
+ * or @fwnode is NULL, this returns NULL.
+ *
+ * The user must call i2c_put_adapter(adapter) once done with the i2c adapter.
+ * Note that this is different from i2c_find_adapter_by_node().
+ */
+struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct i2c_adapter *adapter;
+
+ adapter = i2c_find_adapter_by_fwnode(fwnode);
+ if (!adapter)
+ return NULL;
+
+ if (!try_module_get(adapter->owner)) {
+ put_device(&adapter->dev);
+ adapter = NULL;
+ }
+
+ return adapter;
+}
+EXPORT_SYMBOL(i2c_get_adapter_by_fwnode);
+
static void i2c_parse_timing(struct device *dev, char *prop_name, u32 *cur_val_p,
u32 def_val, bool use_def)
{
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 3ed74aa4b44b..bce6b796e04c 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -113,72 +113,6 @@ void of_i2c_register_devices(struct i2c_adapter *adap)
of_node_put(bus);
}
-static int of_dev_or_parent_node_match(struct device *dev, const void *data)
-{
- if (dev->of_node == data)
- return 1;
-
- if (dev->parent)
- return dev->parent->of_node == data;
-
- return 0;
-}
-
-/* must call put_device() when done with returned i2c_client device */
-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
-{
- struct device *dev;
- struct i2c_client *client;
-
- dev = bus_find_device_by_of_node(&i2c_bus_type, node);
- if (!dev)
- return NULL;
-
- client = i2c_verify_client(dev);
- if (!client)
- put_device(dev);
-
- return client;
-}
-EXPORT_SYMBOL(of_find_i2c_device_by_node);
-
-/* must call put_device() when done with returned i2c_adapter device */
-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
-{
- struct device *dev;
- struct i2c_adapter *adapter;
-
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_or_parent_node_match);
- if (!dev)
- return NULL;
-
- adapter = i2c_verify_adapter(dev);
- if (!adapter)
- put_device(dev);
-
- return adapter;
-}
-EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
-
-/* must call i2c_put_adapter() when done with returned i2c_adapter device */
-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
-{
- struct i2c_adapter *adapter;
-
- adapter = of_find_i2c_adapter_by_node(node);
- if (!adapter)
- return NULL;
-
- if (!try_module_get(adapter->owner)) {
- put_device(&adapter->dev);
- adapter = NULL;
- }
-
- return adapter;
-}
-EXPORT_SYMBOL(of_get_i2c_adapter_by_node);
-
static const struct of_device_id*
i2c_of_match_device_sysfs(const struct of_device_id *matches,
struct i2c_client *client)
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 785c37cae3c0..5a2baf49ecaa 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -89,13 +89,6 @@ struct bnxt_re_ring_attr {
u8 mode;
};
-struct bnxt_re_work {
- struct work_struct work;
- unsigned long event;
- struct bnxt_re_dev *rdev;
- struct net_device *vlan_dev;
-};
-
struct bnxt_re_sqp_entries {
struct bnxt_qplib_sge sge;
u64 wrid;
@@ -132,10 +125,10 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
+ struct notifier_block nb;
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix;
int id;
@@ -194,5 +187,4 @@ static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
return &rdev->ibdev.dev;
return NULL;
}
-
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 8c0c80a8d338..c5867e78f231 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -48,6 +48,7 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <linux/if_ether.h>
+#include <linux/auxiliary_bus.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -74,14 +75,14 @@ MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
MODULE_LICENSE("Dual BSD/GPL");
/* globals */
-static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
-/* Mutex to protect the list of bnxt_re devices added */
-static DEFINE_MUTEX(bnxt_re_dev_lock);
-static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
-static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
+static DEFINE_MUTEX(bnxt_re_mutex);
+
static void bnxt_re_stop_irq(void *handle);
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
+static int bnxt_re_netdev_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr);
+static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev);
static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
{
@@ -111,16 +112,14 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- struct bnxt *bp;
en_dev = rdev->en_dev;
- bp = netdev_priv(en_dev->net);
chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
if (!chip_ctx)
return -ENOMEM;
- chip_ctx->chip_num = bp->chip_num;
- chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
+ chip_ctx->chip_num = en_dev->chip_num;
+ chip_ctx->hw_stats_size = en_dev->hw_ring_stats_size;
rdev->chip_ctx = chip_ctx;
/* rest members to follow eventually */
@@ -128,7 +127,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
rdev->qplib_res.dattr = &rdev->dev_attr;
- rdev->qplib_res.is_vf = BNXT_VF(bp);
+ rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
bnxt_re_set_drv_mode(rdev, wqe_mode);
if (bnxt_qplib_determine_atomics(en_dev->pdev))
@@ -141,10 +140,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
{
- struct bnxt *bp;
-
- bp = netdev_priv(rdev->en_dev->net);
- if (BNXT_VF(bp))
+ if (BNXT_EN_VF(rdev->en_dev))
rdev->is_virtfn = 1;
}
@@ -225,56 +221,12 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
}
-/* for handling bnxt_en callbacks later */
-static void bnxt_re_stop(void *p)
-{
- struct bnxt_re_dev *rdev = p;
- struct bnxt *bp;
-
- if (!rdev)
- return;
- ASSERT_RTNL();
-
- /* L2 driver invokes this callback during device error/crash or device
- * reset. Current RoCE driver doesn't recover the device in case of
- * error. Handle the error by dispatching fatal events to all qps
- * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
- * L2 driver want to modify the MSIx table.
- */
- bp = netdev_priv(rdev->netdev);
-
- ibdev_info(&rdev->ibdev, "Handle device stop call from L2 driver");
- /* Check the current device state from L2 structure and move the
- * device to detached state if FW_FATAL_COND is set.
- * This prevents more commands to HW during clean-up,
- * in case the device is already in error.
- */
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
- set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
-
- bnxt_re_dev_stop(rdev);
- bnxt_re_stop_irq(rdev);
- /* Move the device states to detached and avoid sending any more
- * commands to HW
- */
- set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
- set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
-}
-
-static void bnxt_re_start(void *p)
-{
-}
-
-static void bnxt_re_sriov_config(void *p, int num_vfs)
+static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
{
- struct bnxt_re_dev *rdev = p;
-
- if (!rdev)
- return;
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
return;
- rdev->num_vfs = num_vfs;
+ rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
@@ -282,16 +234,14 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
}
}
-static void bnxt_re_shutdown(void *p)
+static void bnxt_re_shutdown(struct auxiliary_device *adev)
{
- struct bnxt_re_dev *rdev = p;
+ struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
if (!rdev)
return;
- ASSERT_RTNL();
- /* Release the MSIx vectors before queuing unregister */
- bnxt_re_stop_irq(rdev);
- ib_unregister_device_queued(&rdev->ibdev);
+ ib_unregister_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev);
}
static void bnxt_re_stop_irq(void *handle)
@@ -312,7 +262,7 @@ static void bnxt_re_stop_irq(void *handle)
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
{
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
- struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
+ struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
struct bnxt_qplib_nq *nq;
int indx, rc;
@@ -331,7 +281,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
* in device sctructure.
*/
for (indx = 0; indx < rdev->num_msix; indx++)
- rdev->msix_entries[indx].vector = ent[indx].vector;
+ rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
false);
@@ -346,93 +296,22 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
}
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
- .ulp_async_notifier = NULL,
- .ulp_stop = bnxt_re_stop,
- .ulp_start = bnxt_re_start,
- .ulp_sriov_config = bnxt_re_sriov_config,
- .ulp_shutdown = bnxt_re_shutdown,
.ulp_irq_stop = bnxt_re_stop_irq,
.ulp_irq_restart = bnxt_re_start_irq
};
/* RoCE -> Net driver */
-/* Driver registration routines used to let the networking driver (bnxt_en)
- * to know that the RoCE driver is now installed
- */
-static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
-{
- struct bnxt_en_dev *en_dev;
- int rc;
-
- if (!rdev)
- return -EINVAL;
-
- en_dev = rdev->en_dev;
-
- rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
- BNXT_ROCE_ULP);
- return rc;
-}
-
static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
int rc = 0;
- if (!rdev)
- return -EINVAL;
-
en_dev = rdev->en_dev;
- rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
- &bnxt_re_ulp_ops, rdev);
- rdev->qplib_res.pdev = rdev->en_dev->pdev;
- return rc;
-}
-
-static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
-{
- struct bnxt_en_dev *en_dev;
- int rc;
-
- if (!rdev)
- return -EINVAL;
-
- en_dev = rdev->en_dev;
-
-
- rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
-
- return rc;
-}
-
-static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
-{
- int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
- struct bnxt_en_dev *en_dev;
-
- if (!rdev)
- return -EINVAL;
-
- en_dev = rdev->en_dev;
-
- num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
-
- num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
- rdev->msix_entries,
- num_msix_want);
- if (num_msix_got < BNXT_RE_MIN_MSIX) {
- rc = -EINVAL;
- goto done;
- }
- if (num_msix_got != num_msix_want) {
- ibdev_warn(&rdev->ibdev,
- "Requested %d MSI-X vectors, got %d\n",
- num_msix_want, num_msix_got);
- }
- rdev->num_msix = num_msix_got;
-done:
+ rc = bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev);
+ if (!rc)
+ rdev->qplib_res.pdev = rdev->en_dev->pdev;
return rc;
}
@@ -458,12 +337,17 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
u16 fw_ring_id, int type)
{
- struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_en_dev *en_dev;
struct hwrm_ring_free_input req = {0};
struct hwrm_ring_free_output resp;
struct bnxt_fw_msg fw_msg;
int rc = -EINVAL;
+ if (!rdev)
+ return rc;
+
+ en_dev = rdev->en_dev;
+
if (!en_dev)
return rc;
@@ -477,7 +361,7 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
req.ring_id = cpu_to_le16(fw_ring_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
req.ring_id, rc);
@@ -514,7 +398,7 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
req.int_mode = ring_attr->mode;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
*fw_ring_id = le16_to_cpu(resp.ring_id);
@@ -542,7 +426,7 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
rc);
@@ -575,7 +459,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
*fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
@@ -584,21 +468,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
/* Device */
-static bool is_bnxt_re_dev(struct net_device *netdev)
-{
- struct ethtool_drvinfo drvinfo;
-
- if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) {
- memset(&drvinfo, 0, sizeof(drvinfo));
- netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo);
-
- if (strcmp(drvinfo.driver, "bnxt_en"))
- return false;
- return true;
- }
- return false;
-}
-
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
{
struct ib_device *ibdev =
@@ -609,31 +478,6 @@ static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
return container_of(ibdev, struct bnxt_re_dev, ibdev);
}
-static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
-{
- struct bnxt_en_dev *en_dev;
- struct pci_dev *pdev;
-
- en_dev = bnxt_ulp_probe(netdev);
- if (IS_ERR(en_dev))
- return en_dev;
-
- pdev = en_dev->pdev;
- if (!pdev)
- return ERR_PTR(-EINVAL);
-
- if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
- dev_info(&pdev->dev,
- "%s: probe error: RoCE is not supported on this device",
- ROCE_DRV_MODULE_NAME);
- return ERR_PTR(-ENODEV);
- }
-
- dev_hold(netdev);
-
- return en_dev;
-}
-
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -679,7 +523,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.create_qp = bnxt_re_create_qp,
.create_srq = bnxt_re_create_srq,
.create_user_ah = bnxt_re_create_ah,
- .dealloc_driver = bnxt_re_dealloc_driver,
.dealloc_pd = bnxt_re_dealloc_pd,
.dealloc_ucontext = bnxt_re_dealloc_ucontext,
.del_gid = bnxt_re_del_gid,
@@ -744,18 +587,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
}
-static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
-{
- dev_put(rdev->netdev);
- rdev->netdev = NULL;
- mutex_lock(&bnxt_re_dev_lock);
- list_del_rcu(&rdev->list);
- mutex_unlock(&bnxt_re_dev_lock);
-
- synchronize_rcu();
-}
-
-static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
+static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
struct bnxt_en_dev *en_dev)
{
struct bnxt_re_dev *rdev;
@@ -768,8 +600,8 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
return NULL;
}
/* Default values */
- rdev->netdev = netdev;
- dev_hold(rdev->netdev);
+ rdev->nb.notifier_call = NULL;
+ rdev->netdev = en_dev->net;
rdev->en_dev = en_dev;
rdev->id = rdev->en_dev->pdev->devfn;
INIT_LIST_HEAD(&rdev->qp_list);
@@ -784,9 +616,6 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
rdev->cosq[0] = 0xFFFF;
rdev->cosq[1] = 0xFFFF;
- mutex_lock(&bnxt_re_dev_lock);
- list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
- mutex_unlock(&bnxt_re_dev_lock);
return rdev;
}
@@ -930,7 +759,7 @@ static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
BNXT_RE_GEN_P5_PF_NQ_DB) :
- rdev->msix_entries[indx].db_offset;
+ rdev->en_dev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
@@ -955,7 +784,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
for (i = 1; i < rdev->num_msix ; i++) {
db_offt = bnxt_re_get_nqdb_offset(rdev, i);
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
- i - 1, rdev->msix_entries[i].vector,
+ i - 1, rdev->en_dev->msix_entries[i].vector,
db_offt, &bnxt_re_cqn_handler,
&bnxt_re_srqn_handler);
if (rc) {
@@ -1042,7 +871,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
- rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
+ rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev,
@@ -1095,7 +924,6 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
u64 *cid_map)
{
struct hwrm_queue_pri2cos_qcfg_input req = {0};
- struct bnxt *bp = netdev_priv(rdev->netdev);
struct hwrm_queue_pri2cos_qcfg_output resp;
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct bnxt_fw_msg fw_msg;
@@ -1112,11 +940,11 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
flags |= (dir & 0x01);
flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN;
req.flags = cpu_to_le32(flags);
- req.port_id = bp->pf.port_id;
+ req.port_id = en_dev->pf_port_id;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
return rc;
@@ -1299,7 +1127,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
rc);
@@ -1323,7 +1151,7 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
pr_err("Failed to register with IB: %#x\n", rc);
return rc;
}
- dev_info(rdev_to_dev(rdev), "Device registered successfully");
+ dev_info(rdev_to_dev(rdev), "Device registered with IB successfully");
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1362,20 +1190,12 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
- if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
- rc = bnxt_re_free_msix(rdev);
- if (rc)
- ibdev_warn(&rdev->ibdev,
- "Failed to free MSI-X vectors: %#x", rc);
- }
+ if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
+ rdev->num_msix = 0;
bnxt_re_destroy_chip_ctx(rdev);
- if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
- rc = bnxt_re_unregister_netdev(rdev);
- if (rc)
- ibdev_warn(&rdev->ibdev,
- "Failed to unregister with netdev: %#x", rc);
- }
+ if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
+ bnxt_unregister_dev(rdev->en_dev);
}
/* worker thread for polling periodic events. Now used for QoS programming*/
@@ -1416,13 +1236,15 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
- rc = bnxt_re_request_msix(rdev);
- if (rc) {
+ if (!rdev->en_dev->ulp_tbl->msix_requested) {
ibdev_err(&rdev->ibdev,
"Failed to get MSI-X vectors: %#x\n", rc);
rc = -EINVAL;
goto fail;
}
+ ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
+ rdev->en_dev->ulp_tbl->msix_requested);
+ rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
bnxt_re_query_hwrm_intf_version(rdev);
@@ -1446,14 +1268,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
- rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw;
}
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
- vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
+ vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt, rdev->is_virtfn,
&bnxt_re_aeq_handler);
@@ -1521,6 +1343,11 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ /*
+ * Use the total VF count since the actual VF count may not be
+ * available at this point.
+ */
+ bnxt_re_vf_res_config(rdev);
}
return 0;
@@ -1541,135 +1368,43 @@ fail:
return rc;
}
-static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
-{
- struct net_device *netdev = rdev->netdev;
-
- bnxt_re_dev_remove(rdev);
-
- if (netdev)
- dev_put(netdev);
-}
-
-static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
+static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
{
+ struct bnxt_aux_priv *aux_priv =
+ container_of(adev, struct bnxt_aux_priv, aux_dev);
struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
int rc = 0;
- if (!is_bnxt_re_dev(netdev))
- return -ENODEV;
+ /* en_dev should never be NULL as long as adev and aux_dev are valid. */
+ en_dev = aux_priv->edev;
- en_dev = bnxt_re_dev_probe(netdev);
- if (IS_ERR(en_dev)) {
- if (en_dev != ERR_PTR(-ENODEV))
- ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n",
- ROCE_DRV_MODULE_NAME);
- rc = PTR_ERR(en_dev);
- goto exit;
- }
- *rdev = bnxt_re_dev_add(netdev, en_dev);
- if (!*rdev) {
+ rdev = bnxt_re_dev_add(aux_priv, en_dev);
+ if (!rdev || !rdev_to_dev(rdev)) {
rc = -ENOMEM;
- dev_put(netdev);
goto exit;
}
-exit:
- return rc;
-}
-static void bnxt_re_remove_device(struct bnxt_re_dev *rdev)
-{
- bnxt_re_dev_uninit(rdev);
- pci_dev_put(rdev->en_dev->pdev);
- bnxt_re_dev_unreg(rdev);
-}
-
-static int bnxt_re_add_device(struct bnxt_re_dev **rdev,
- struct net_device *netdev, u8 wqe_mode)
-{
- int rc;
-
- rc = bnxt_re_dev_reg(rdev, netdev);
- if (rc == -ENODEV)
- return rc;
- if (rc) {
- pr_err("Failed to register with the device %s: %#x\n",
- netdev->name, rc);
- return rc;
- }
+ rc = bnxt_re_dev_init(rdev, wqe_mode);
+ if (rc)
+ goto re_dev_dealloc;
- pci_dev_get((*rdev)->en_dev->pdev);
- rc = bnxt_re_dev_init(*rdev, wqe_mode);
+ rc = bnxt_re_ib_init(rdev);
if (rc) {
- pci_dev_put((*rdev)->en_dev->pdev);
- bnxt_re_dev_unreg(*rdev);
+ pr_err("Failed to register with IB: %s",
+ aux_priv->aux_dev.name);
+ goto re_dev_uninit;
}
+ auxiliary_set_drvdata(adev, rdev);
- return rc;
-}
-
-static void bnxt_re_dealloc_driver(struct ib_device *ib_dev)
-{
- struct bnxt_re_dev *rdev =
- container_of(ib_dev, struct bnxt_re_dev, ibdev);
-
- dev_info(rdev_to_dev(rdev), "Unregistering Device");
-
- rtnl_lock();
- bnxt_re_remove_device(rdev);
- rtnl_unlock();
-}
-
-/* Handle all deferred netevents tasks */
-static void bnxt_re_task(struct work_struct *work)
-{
- struct bnxt_re_work *re_work;
- struct bnxt_re_dev *rdev;
- int rc = 0;
-
- re_work = container_of(work, struct bnxt_re_work, work);
- rdev = re_work->rdev;
-
- if (re_work->event == NETDEV_REGISTER) {
- rc = bnxt_re_ib_init(rdev);
- if (rc) {
- ibdev_err(&rdev->ibdev,
- "Failed to register with IB: %#x", rc);
- rtnl_lock();
- bnxt_re_remove_device(rdev);
- rtnl_unlock();
- goto exit;
- }
- goto exit;
- }
-
- if (!ib_device_try_get(&rdev->ibdev))
- goto exit;
+ return 0;
- switch (re_work->event) {
- case NETDEV_UP:
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
- IB_EVENT_PORT_ACTIVE);
- break;
- case NETDEV_DOWN:
- bnxt_re_dev_stop(rdev);
- break;
- case NETDEV_CHANGE:
- if (!netif_carrier_ok(rdev->netdev))
- bnxt_re_dev_stop(rdev);
- else if (netif_carrier_ok(rdev->netdev))
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
- IB_EVENT_PORT_ACTIVE);
- ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
- &rdev->active_width);
- break;
- default:
- break;
- }
- ib_device_put(&rdev->ibdev);
+re_dev_uninit:
+ bnxt_re_dev_uninit(rdev);
+re_dev_dealloc:
+ ib_dealloc_device(&rdev->ibdev);
exit:
- put_device(&rdev->ibdev.dev);
- kfree(re_work);
+ return rc;
}
/*
@@ -1690,109 +1425,189 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
- struct bnxt_re_work *re_work;
struct bnxt_re_dev *rdev;
- int rc = 0;
- bool sch_work = false;
- bool release = true;
real_dev = rdma_vlan_dev_real_dev(netdev);
if (!real_dev)
real_dev = netdev;
- rdev = bnxt_re_from_netdev(real_dev);
- if (!rdev && event != NETDEV_REGISTER)
- return NOTIFY_OK;
-
if (real_dev != netdev)
goto exit;
- switch (event) {
- case NETDEV_REGISTER:
- if (rdev)
- break;
- rc = bnxt_re_add_device(&rdev, real_dev,
- BNXT_QPLIB_WQE_MODE_STATIC);
- if (!rc)
- sch_work = true;
- release = false;
- break;
+ rdev = bnxt_re_from_netdev(real_dev);
+ if (!rdev)
+ return NOTIFY_DONE;
- case NETDEV_UNREGISTER:
- ib_unregister_device_queued(&rdev->ibdev);
- break;
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ netif_carrier_ok(real_dev) ?
+ IB_EVENT_PORT_ACTIVE :
+ IB_EVENT_PORT_ERR);
+ break;
default:
- sch_work = true;
break;
}
- if (sch_work) {
- /* Allocate for the deferred task */
- re_work = kzalloc(sizeof(*re_work), GFP_KERNEL);
- if (re_work) {
- get_device(&rdev->ibdev.dev);
- re_work->rdev = rdev;
- re_work->event = event;
- re_work->vlan_dev = (real_dev == netdev ?
- NULL : netdev);
- INIT_WORK(&re_work->work, bnxt_re_task);
- queue_work(bnxt_re_wq, &re_work->work);
- }
- }
-
+ ib_device_put(&rdev->ibdev);
exit:
- if (rdev && release)
- ib_device_put(&rdev->ibdev);
return NOTIFY_DONE;
}
-static struct notifier_block bnxt_re_netdev_notifier = {
- .notifier_call = bnxt_re_netdev_event
-};
+#define BNXT_ADEV_NAME "bnxt_en"
-static int __init bnxt_re_mod_init(void)
+static void bnxt_re_remove(struct auxiliary_device *adev)
{
- int rc = 0;
+ struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
- pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
+ if (!rdev)
+ return;
- bnxt_re_wq = create_singlethread_workqueue("bnxt_re");
- if (!bnxt_re_wq)
- return -ENOMEM;
+ mutex_lock(&bnxt_re_mutex);
+ if (rdev->nb.notifier_call) {
+ unregister_netdevice_notifier(&rdev->nb);
+ rdev->nb.notifier_call = NULL;
+ } else {
+ /* If notifier is null, we should have already done a
+ * clean up before coming here.
+ */
+ goto skip_remove;
+ }
+
+ ib_unregister_device(&rdev->ibdev);
+ ib_dealloc_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev);
+skip_remove:
+ mutex_unlock(&bnxt_re_mutex);
+}
+
+static int bnxt_re_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct bnxt_re_dev *rdev;
+ int rc;
- INIT_LIST_HEAD(&bnxt_re_dev_list);
+ mutex_lock(&bnxt_re_mutex);
+ rc = bnxt_re_add_device(adev, BNXT_QPLIB_WQE_MODE_STATIC);
+ if (rc) {
+ mutex_unlock(&bnxt_re_mutex);
+ return rc;
+ }
+
+ rdev = auxiliary_get_drvdata(adev);
- rc = register_netdevice_notifier(&bnxt_re_netdev_notifier);
+ rdev->nb.notifier_call = bnxt_re_netdev_event;
+ rc = register_netdevice_notifier(&rdev->nb);
if (rc) {
+ rdev->nb.notifier_call = NULL;
pr_err("%s: Cannot register to netdevice_notifier",
ROCE_DRV_MODULE_NAME);
- goto err_netdev;
+ goto err;
}
+
+ mutex_unlock(&bnxt_re_mutex);
return 0;
-err_netdev:
- destroy_workqueue(bnxt_re_wq);
+err:
+ mutex_unlock(&bnxt_re_mutex);
+ bnxt_re_remove(adev);
return rc;
}
-static void __exit bnxt_re_mod_exit(void)
+static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct bnxt_re_dev *rdev;
+ struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
- unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
- if (bnxt_re_wq)
- destroy_workqueue(bnxt_re_wq);
- list_for_each_entry(rdev, &bnxt_re_dev_list, list) {
- /* VF device removal should be called before the removal
- * of PF device. Queue VFs unregister first, so that VFs
- * shall be removed before the PF during the call of
- * ib_unregister_driver.
- */
- if (rdev->is_virtfn)
- ib_unregister_device(&rdev->ibdev);
+ if (!rdev)
+ return 0;
+
+ mutex_lock(&bnxt_re_mutex);
+ /* L2 driver may invoke this callback during device error/crash or device
+ * reset. Current RoCE driver doesn't recover the device in case of
+ * error. Handle the error by dispatching fatal events to all qps
+ * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
+ * L2 driver want to modify the MSIx table.
+ */
+
+ ibdev_info(&rdev->ibdev, "Handle device suspend call");
+ /* Check the current device state from bnxt_en_dev and move the
+ * device to detached state if FW_FATAL_COND is set.
+ * This prevents more commands to HW during clean-up,
+ * in case the device is already in error.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state))
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+
+ bnxt_re_dev_stop(rdev);
+ bnxt_re_stop_irq(rdev);
+ /* Move the device states to detached and avoid sending any more
+ * commands to HW
+ */
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ mutex_unlock(&bnxt_re_mutex);
+
+ return 0;
+}
+
+static int bnxt_re_resume(struct auxiliary_device *adev)
+{
+ struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
+
+ if (!rdev)
+ return 0;
+
+ mutex_lock(&bnxt_re_mutex);
+ /* L2 driver may invoke this callback during device recovery, resume.
+ * reset. Current RoCE driver doesn't recover the device in case of
+ * error. Handle the error by dispatching fatal events to all qps
+ * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
+ * L2 driver want to modify the MSIx table.
+ */
+
+ ibdev_info(&rdev->ibdev, "Handle device resume call");
+ mutex_unlock(&bnxt_re_mutex);
+
+ return 0;
+}
+
+static const struct auxiliary_device_id bnxt_re_id_table[] = {
+ { .name = BNXT_ADEV_NAME ".rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, bnxt_re_id_table);
+
+static struct auxiliary_driver bnxt_re_driver = {
+ .name = "rdma",
+ .probe = bnxt_re_probe,
+ .remove = bnxt_re_remove,
+ .shutdown = bnxt_re_shutdown,
+ .suspend = bnxt_re_suspend,
+ .resume = bnxt_re_resume,
+ .id_table = bnxt_re_id_table,
+};
+
+static int __init bnxt_re_mod_init(void)
+{
+ int rc = 0;
+
+ pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
+ rc = auxiliary_driver_register(&bnxt_re_driver);
+ if (rc) {
+ pr_err("%s: Failed to register auxiliary driver\n",
+ ROCE_DRV_MODULE_NAME);
+ return rc;
}
- ib_unregister_driver(RDMA_DRIVER_BNXT_RE);
+ return 0;
+}
+
+static void __exit bnxt_re_mod_exit(void)
+{
+ auxiliary_driver_unregister(&bnxt_re_driver);
}
module_init(bnxt_re_mod_init);
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index 74f6348f240a..771059a8eb7d 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -11,6 +11,7 @@
/* Copyright (c) 2017, Open Grid Computing, Inc. */
#include <linux/workqueue.h>
+#include <trace/events/sock.h>
#include "erdma.h"
#include "erdma_cm.h"
@@ -925,6 +926,8 @@ static void erdma_cm_llp_data_ready(struct sock *sk)
{
struct erdma_cep *cep;
+ trace_sk_data_ready(sk);
+
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 52821485371a..ddcfc116b19a 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -37,6 +37,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
const struct mlx5_ib_profile *profile;
struct mlx5_core_dev *peer_dev;
struct mlx5_ib_dev *ibdev;
+ int second_uplink = false;
u32 peer_num_ports;
int vport_index;
int ret;
@@ -47,17 +48,24 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
peer_dev = mlx5_lag_get_peer_mdev(dev);
peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
if (mlx5_lag_is_master(dev)) {
- /* Only 1 ib port is the representor for both uplinks */
- num_ports += peer_num_ports - 1;
+ if (mlx5_lag_is_mpesw(dev))
+ num_ports += peer_num_ports;
+ else
+ num_ports += peer_num_ports - 1;
+
} else {
- if (rep->vport == MLX5_VPORT_UPLINK)
- return 0;
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ if (!mlx5_lag_is_mpesw(dev))
+ return 0;
+ second_uplink = true;
+ }
+
vport_index += peer_num_ports;
dev = peer_dev;
}
}
- if (rep->vport == MLX5_VPORT_UPLINK)
+ if (rep->vport == MLX5_VPORT_UPLINK && !second_uplink)
profile = &raw_eth_profile;
else
return mlx5_ib_set_vport_rep(dev, rep, vport_index);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c669ef6e47e7..dc32e4518a28 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3012,26 +3012,63 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
+static void mlx5_netdev_notifier_register(struct mlx5_roce *roce,
+ struct net_device *netdev)
{
int err;
- dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
- if (err) {
- dev->port[port_num].roce.nb.notifier_call = NULL;
- return err;
- }
+ if (roce->tracking_netdev)
+ return;
+ roce->tracking_netdev = netdev;
+ roce->nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier_dev_net(netdev, &roce->nb, &roce->nn);
+ WARN_ON(err);
+}
- return 0;
+static void mlx5_netdev_notifier_unregister(struct mlx5_roce *roce)
+{
+ if (!roce->tracking_netdev)
+ return;
+ unregister_netdevice_notifier_dev_net(roce->tracking_netdev, &roce->nb,
+ &roce->nn);
+ roce->tracking_netdev = NULL;
}
-static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
+static int mlx5e_mdev_notifier_event(struct notifier_block *nb,
+ unsigned long event, void *data)
{
- if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
- dev->port[port_num].roce.nb.notifier_call = NULL;
+ struct mlx5_roce *roce = container_of(nb, struct mlx5_roce, mdev_nb);
+ struct net_device *netdev = data;
+
+ switch (event) {
+ case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
+ if (netdev)
+ mlx5_netdev_notifier_register(roce, netdev);
+ else
+ mlx5_netdev_notifier_unregister(roce);
+ break;
+ default:
+ return NOTIFY_DONE;
}
+
+ return NOTIFY_OK;
+}
+
+static void mlx5_mdev_netdev_track(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ struct mlx5_roce *roce = &dev->port[port_num].roce;
+
+ roce->mdev_nb.notifier_call = mlx5e_mdev_notifier_event;
+ mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb);
+ mlx5_core_uplink_netdev_event_replay(dev->mdev);
+}
+
+static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ struct mlx5_roce *roce = &dev->port[port_num].roce;
+
+ mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb);
+ mlx5_netdev_notifier_unregister(roce);
}
static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
@@ -3138,7 +3175,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
if (mpi->mdev_events.notifier_call)
mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
mpi->mdev_events.notifier_call = NULL;
- mlx5_remove_netdev_notifier(ibdev, port_num);
+ mlx5_mdev_netdev_untrack(ibdev, port_num);
spin_lock(&port->mp.mpi_lock);
comps = mpi->mdev_refcnt;
@@ -3196,12 +3233,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
if (err)
goto unbind;
- err = mlx5_add_netdev_notifier(ibdev, port_num);
- if (err) {
- mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
- port_num + 1);
- goto unbind;
- }
+ mlx5_mdev_netdev_track(ibdev, port_num);
mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
@@ -3909,9 +3941,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
/* Register only for native ports */
- err = mlx5_add_netdev_notifier(dev, port_num);
- if (err)
- return err;
+ mlx5_mdev_netdev_track(dev, port_num);
err = mlx5_enable_eth(dev);
if (err)
@@ -3920,7 +3950,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
return 0;
cleanup:
- mlx5_remove_netdev_notifier(dev, port_num);
+ mlx5_mdev_netdev_untrack(dev, port_num);
return err;
}
@@ -3938,7 +3968,7 @@ static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
mlx5_disable_eth(dev);
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
- mlx5_remove_netdev_notifier(dev, port_num);
+ mlx5_mdev_netdev_untrack(dev, port_num);
}
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 8b91babdd4c0..7394e7f36ba7 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -832,6 +832,9 @@ struct mlx5_roce {
rwlock_t netdev_lock;
struct net_device *netdev;
struct notifier_block nb;
+ struct netdev_net_notifier nn;
+ struct notifier_block mdev_nb;
+ struct net_device *tracking_netdev;
atomic_t tx_port_affinity;
enum ib_port_state last_port_state;
struct mlx5_ib_dev *dev;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index f88d2971c2c6..da530c0404da 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -16,6 +16,7 @@
#include <net/tcp.h>
#include <linux/inet.h>
#include <linux/tcp.h>
+#include <trace/events/sock.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
@@ -109,6 +110,8 @@ static void siw_rtr_data_ready(struct sock *sk)
struct siw_qp *qp = NULL;
read_descriptor_t rd_desc;
+ trace_sk_data_ready(sk);
+
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
@@ -1216,6 +1219,8 @@ static void siw_cm_llp_data_ready(struct sock *sk)
{
struct siw_cep *cep;
+ trace_sk_data_ready(sk);
+
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index e6f634971228..81e9bbd9ebda 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -10,6 +10,7 @@
#include <linux/llist.h>
#include <asm/barrier.h>
#include <net/tcp.h>
+#include <trace/events/sock.h>
#include "siw.h"
#include "siw_verbs.h"
@@ -94,6 +95,8 @@ void siw_qp_llp_data_ready(struct sock *sk)
{
struct siw_qp *qp;
+ trace_sk_data_ready(sk);
+
read_lock(&sk->sk_callback_lock);
if (unlikely(!sk->sk_user_data || !sk_to_qp(sk)))
diff --git a/drivers/mfd/ocelot-core.c b/drivers/mfd/ocelot-core.c
index 1816d52c65c5..b0ff05c1759f 100644
--- a/drivers/mfd/ocelot-core.c
+++ b/drivers/mfd/ocelot-core.c
@@ -34,16 +34,49 @@
#define VSC7512_MIIM0_RES_START 0x7107009c
#define VSC7512_MIIM1_RES_START 0x710700c0
-#define VSC7512_MIIM_RES_SIZE 0x024
+#define VSC7512_MIIM_RES_SIZE 0x00000024
#define VSC7512_PHY_RES_START 0x710700f0
-#define VSC7512_PHY_RES_SIZE 0x004
+#define VSC7512_PHY_RES_SIZE 0x00000004
#define VSC7512_GPIO_RES_START 0x71070034
-#define VSC7512_GPIO_RES_SIZE 0x06c
+#define VSC7512_GPIO_RES_SIZE 0x0000006c
#define VSC7512_SIO_CTRL_RES_START 0x710700f8
-#define VSC7512_SIO_CTRL_RES_SIZE 0x100
+#define VSC7512_SIO_CTRL_RES_SIZE 0x00000100
+
+#define VSC7512_ANA_RES_START 0x71880000
+#define VSC7512_ANA_RES_SIZE 0x00010000
+
+#define VSC7512_QS_RES_START 0x71080000
+#define VSC7512_QS_RES_SIZE 0x00000100
+
+#define VSC7512_QSYS_RES_START 0x71800000
+#define VSC7512_QSYS_RES_SIZE 0x00200000
+
+#define VSC7512_REW_RES_START 0x71030000
+#define VSC7512_REW_RES_SIZE 0x00010000
+
+#define VSC7512_SYS_RES_START 0x71010000
+#define VSC7512_SYS_RES_SIZE 0x00010000
+
+#define VSC7512_S0_RES_START 0x71040000
+#define VSC7512_S1_RES_START 0x71050000
+#define VSC7512_S2_RES_START 0x71060000
+#define VCAP_RES_SIZE 0x00000400
+
+#define VSC7512_PORT_0_RES_START 0x711e0000
+#define VSC7512_PORT_1_RES_START 0x711f0000
+#define VSC7512_PORT_2_RES_START 0x71200000
+#define VSC7512_PORT_3_RES_START 0x71210000
+#define VSC7512_PORT_4_RES_START 0x71220000
+#define VSC7512_PORT_5_RES_START 0x71230000
+#define VSC7512_PORT_6_RES_START 0x71240000
+#define VSC7512_PORT_7_RES_START 0x71250000
+#define VSC7512_PORT_8_RES_START 0x71260000
+#define VSC7512_PORT_9_RES_START 0x71270000
+#define VSC7512_PORT_10_RES_START 0x71280000
+#define VSC7512_PORT_RES_SIZE 0x00010000
#define VSC7512_GCB_RST_SLEEP_US 100
#define VSC7512_GCB_RST_TIMEOUT_US 100000
@@ -96,6 +129,28 @@ static const struct resource vsc7512_sgpio_resources[] = {
DEFINE_RES_REG_NAMED(VSC7512_SIO_CTRL_RES_START, VSC7512_SIO_CTRL_RES_SIZE, "gcb_sio"),
};
+static const struct resource vsc7512_switch_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_ANA_RES_START, VSC7512_ANA_RES_SIZE, "ana"),
+ DEFINE_RES_REG_NAMED(VSC7512_QS_RES_START, VSC7512_QS_RES_SIZE, "qs"),
+ DEFINE_RES_REG_NAMED(VSC7512_QSYS_RES_START, VSC7512_QSYS_RES_SIZE, "qsys"),
+ DEFINE_RES_REG_NAMED(VSC7512_REW_RES_START, VSC7512_REW_RES_SIZE, "rew"),
+ DEFINE_RES_REG_NAMED(VSC7512_SYS_RES_START, VSC7512_SYS_RES_SIZE, "sys"),
+ DEFINE_RES_REG_NAMED(VSC7512_S0_RES_START, VCAP_RES_SIZE, "s0"),
+ DEFINE_RES_REG_NAMED(VSC7512_S1_RES_START, VCAP_RES_SIZE, "s1"),
+ DEFINE_RES_REG_NAMED(VSC7512_S2_RES_START, VCAP_RES_SIZE, "s2"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_0_RES_START, VSC7512_PORT_RES_SIZE, "port0"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_1_RES_START, VSC7512_PORT_RES_SIZE, "port1"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_2_RES_START, VSC7512_PORT_RES_SIZE, "port2"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_3_RES_START, VSC7512_PORT_RES_SIZE, "port3"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_4_RES_START, VSC7512_PORT_RES_SIZE, "port4"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_5_RES_START, VSC7512_PORT_RES_SIZE, "port5"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_6_RES_START, VSC7512_PORT_RES_SIZE, "port6"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_7_RES_START, VSC7512_PORT_RES_SIZE, "port7"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_8_RES_START, VSC7512_PORT_RES_SIZE, "port8"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_9_RES_START, VSC7512_PORT_RES_SIZE, "port9"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_10_RES_START, VSC7512_PORT_RES_SIZE, "port10")
+};
+
static const struct mfd_cell vsc7512_devs[] = {
{
.name = "ocelot-pinctrl",
@@ -121,6 +176,11 @@ static const struct mfd_cell vsc7512_devs[] = {
.use_of_reg = true,
.num_resources = ARRAY_SIZE(vsc7512_miim1_resources),
.resources = vsc7512_miim1_resources,
+ }, {
+ .name = "ocelot-switch",
+ .of_compatible = "mscc,vsc7512-switch",
+ .num_resources = ARRAY_SIZE(vsc7512_switch_resources),
+ .resources = vsc7512_switch_resources,
},
};
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 12910338ea1a..c34bd432da27 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -582,18 +582,7 @@ config FUJITSU_ES
This driver provides support for Extended Socket network device
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
-config USB4_NET
- tristate "Networking over USB4 and Thunderbolt cables"
- depends on USB4 && INET
- help
- Select this if you want to create network between two computers
- over a USB4 and Thunderbolt cables. The driver supports Apple
- ThunderboltIP protocol and allows communication with any host
- supporting the same protocol including Windows and macOS.
-
- To compile this driver a module, choose M here. The module will be
- called thunderbolt-net.
-
+source "drivers/net/thunderbolt/Kconfig"
source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 6ce076462dbf..e26f98f897c5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -84,8 +84,6 @@ obj-$(CONFIG_HYPERV_NET) += hyperv/
obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/
-
-thunderbolt-net-y += thunderbolt.o
-obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
+obj-$(CONFIG_USB4_NET) += thunderbolt/
obj-$(CONFIG_NETDEVSIM) += netdevsim/
obj-$(CONFIG_NET_FAILOVER) += net_failover.o
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0363ce597661..00646aa315c3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -419,8 +419,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
/**
* bond_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
**/
-static int bond_ipsec_add_sa(struct xfrm_state *xs)
+static int bond_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct net_device *bond_dev = xs->xso.dev;
struct bond_ipsec *ipsec;
@@ -442,7 +444,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
rcu_read_unlock();
return -EINVAL;
}
@@ -454,7 +456,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
}
xs->xso.real_dev = slave->dev;
- err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
if (!err) {
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
@@ -494,7 +496,7 @@ static void bond_ipsec_add_sa_all(struct bonding *bond)
spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
ipsec->xs->xso.real_dev = slave->dev;
- if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
+ if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
ipsec->xs->xso.real_dev = NULL;
}
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
index f83684f006ea..a17561d97192 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_platform.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -47,7 +47,6 @@ static void ctucan_platform_set_drvdata(struct device *dev,
*/
static int ctucan_platform_probe(struct platform_device *pdev)
{
- struct resource *res; /* IO mem resources */
struct device *dev = &pdev->dev;
void __iomem *addr;
int ret;
@@ -55,8 +54,7 @@ static int ctucan_platform_probe(struct platform_device *pdev)
int irq;
/* Get the virtual base address for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err;
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 7ae80763c960..0b93900b1dfa 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -6,25 +6,81 @@
#include <linux/can/dev.h>
+void can_sjw_set_default(struct can_bittiming *bt)
+{
+ if (bt->sjw)
+ return;
+
+ /* If user space provides no sjw, use sane default of phase_seg2 / 2 */
+ bt->sjw = max(1U, min(bt->phase_seg1, bt->phase_seg2 / 2));
+}
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ if (bt->sjw > btc->sjw_max) {
+ NL_SET_ERR_MSG_FMT(extack, "sjw: %u greater than max sjw: %u",
+ bt->sjw, btc->sjw_max);
+ return -EINVAL;
+ }
+
+ if (bt->sjw > bt->phase_seg1) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg1: %u",
+ bt->sjw, bt->phase_seg1);
+ return -EINVAL;
+ }
+
+ if (bt->sjw > bt->phase_seg2) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg2: %u",
+ bt->sjw, bt->phase_seg2);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* Checks the validity of the specified bit-timing parameters prop_seg,
* phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc,
+ struct netlink_ext_ack *extack)
{
+ const unsigned int tseg1 = bt->prop_seg + bt->phase_seg1;
const struct can_priv *priv = netdev_priv(dev);
- unsigned int tseg1, alltseg;
u64 brp64;
+ int err;
- tseg1 = bt->prop_seg + bt->phase_seg1;
- if (!bt->sjw)
- bt->sjw = 1;
- if (bt->sjw > btc->sjw_max ||
- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
- return -ERANGE;
+ if (tseg1 < btc->tseg1_min) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u less than tseg1-min: %u",
+ tseg1, btc->tseg1_min);
+ return -EINVAL;
+ }
+ if (tseg1 > btc->tseg1_max) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u greater than tseg1-max: %u",
+ tseg1, btc->tseg1_max);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 < btc->tseg2_min) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u less than tseg2-min: %u",
+ bt->phase_seg2, btc->tseg2_min);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 > btc->tseg2_max) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u greater than tseg2-max: %u",
+ bt->phase_seg2, btc->tseg2_max);
+ return -EINVAL;
+ }
+
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
brp64 = (u64)priv->clock.freq * (u64)bt->tq;
if (btc->brp_inc > 1)
@@ -35,12 +91,21 @@ static int can_fixup_bittiming(const struct net_device *dev, struct can_bittimin
brp64 *= btc->brp_inc;
bt->brp = (u32)brp64;
- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ if (bt->brp < btc->brp_min) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u less than brp-min: %u",
+ bt->brp, btc->brp_min);
+ return -EINVAL;
+ }
+ if (bt->brp > btc->brp_max) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u greater than brp-max: %u",
+ bt->brp, btc->brp_max);
return -EINVAL;
+ }
- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+ bt->bitrate = priv->clock.freq / (bt->brp * can_bit_time(bt));
+ bt->sample_point = ((CAN_SYNC_SEG + tseg1) * 1000) / can_bit_time(bt);
+ bt->tq = DIV_U64_ROUND_CLOSEST(mul_u32_u32(bt->brp, NSEC_PER_SEC),
+ priv->clock.freq);
return 0;
}
@@ -49,7 +114,8 @@ static int can_fixup_bittiming(const struct net_device *dev, struct can_bittimin
static int
can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
unsigned int i;
@@ -58,30 +124,30 @@ can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *b
return 0;
}
+ NL_SET_ERR_MSG_FMT(extack, "bitrate %u bps not supported",
+ bt->brp);
+
return -EINVAL;
}
int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
- int err;
-
/* Depending on the given can_bittiming parameter structure the CAN
* timing parameters are calculated based on the provided bitrate OR
* alternatively the CAN timing parameters (tq, prop_seg, etc.) are
* provided directly which are then checked and fixed up.
*/
if (!bt->tq && bt->bitrate && btc)
- err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate && btc)
- err = can_fixup_bittiming(dev, bt, btc);
- else if (!bt->tq && bt->bitrate && bitrate_const)
- err = can_validate_bitrate(dev, bt, bitrate_const,
- bitrate_const_cnt);
- else
- err = -EINVAL;
-
- return err;
+ return can_calc_bittiming(dev, bt, btc, extack);
+ if (bt->tq && !bt->bitrate && btc)
+ return can_fixup_bittiming(dev, bt, btc, extack);
+ if (!bt->tq && bt->bitrate && bitrate_const)
+ return can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt, extack);
+
+ return -EINVAL;
}
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index d3caa040614d..3809c148fb88 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -63,7 +63,7 @@ can_update_sample_point(const struct can_bittiming_const *btc,
}
int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
unsigned int bitrate; /* current bitrate */
@@ -76,6 +76,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
unsigned int best_brp = 0; /* current best value for brp */
unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
u64 v64;
+ int err;
/* Use CiA recommended sample points */
if (bt->sample_point) {
@@ -133,13 +134,14 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
do_div(v64, bt->bitrate);
bitrate_error = (u32)v64;
if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%u%% too high",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EINVAL;
}
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%u%%",
+ bitrate_error / 10, bitrate_error % 10);
}
/* real sample point */
@@ -154,23 +156,17 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
bt->phase_seg1 = tseg1 - bt->prop_seg;
bt->phase_seg2 = tseg2;
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
bt->brp = best_brp;
/* real bitrate */
bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+ (bt->brp * can_bit_time(bt));
return 0;
}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index c1956b1e9faf..7f9334a8af50 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -498,6 +498,18 @@ static int can_get_termination(struct net_device *ndev)
return 0;
}
+static bool
+can_bittiming_const_valid(const struct can_bittiming_const *btc)
+{
+ if (!btc)
+ return true;
+
+ if (!btc->sjw_max)
+ return false;
+
+ return true;
+}
+
/* Register the CAN network device */
int register_candev(struct net_device *dev)
{
@@ -518,6 +530,15 @@ int register_candev(struct net_device *dev)
if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
return -EINVAL;
+ /* We only support either fixed bit rates or bit timing const. */
+ if ((priv->bitrate_const || priv->data_bitrate_const) &&
+ (priv->bittiming_const || priv->data_bittiming_const))
+ return -EINVAL;
+
+ if (!can_bittiming_const_valid(priv->bittiming_const) ||
+ !can_bittiming_const_valid(priv->data_bittiming_const))
+ return -EINVAL;
+
if (!priv->termination_const) {
err = can_get_termination(dev);
if (err)
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 8efa22d9f214..036d85ef07f5 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -36,10 +36,24 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
+static int can_validate_bittiming(const struct can_bittiming *bt,
+ struct netlink_ext_ack *extack)
+{
+ /* sample point is in one-tenth of a percent */
+ if (bt->sample_point >= 1000) {
+ NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
bool is_can_fd = false;
+ int err;
/* Make sure that valid CAN FD configurations always consist of
* - nominal/arbitration bittiming
@@ -51,6 +65,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (!data)
return 0;
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
@@ -71,7 +94,6 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
*/
if (data[IFLA_CAN_TDC]) {
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
- int err;
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
data[IFLA_CAN_TDC],
@@ -102,6 +124,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
return -EOPNOTSUPP;
}
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -184,13 +215,15 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
err = can_get_bittiming(dev, &bt,
priv->bittiming_const,
priv->bitrate_const,
- priv->bitrate_const_cnt);
+ priv->bitrate_const_cnt,
+ extack);
if (err)
return err;
if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
+ NL_SET_ERR_MSG_FMT(extack,
+ "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
+ bt.bitrate, priv->bitrate_max);
return -EINVAL;
}
@@ -288,13 +321,15 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
err = can_get_bittiming(dev, &dbt,
priv->data_bittiming_const,
priv->data_bitrate_const,
- priv->data_bitrate_const_cnt);
+ priv->data_bitrate_const_cnt,
+ extack);
if (err)
return err;
if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
+ NL_SET_ERR_MSG_FMT(extack,
+ "CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
return -EINVAL;
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index f6fa7157b99b..ef4e1b9a9e1e 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -21,23 +21,23 @@
* wherever it is modified to a readable name.
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/can/dev.h>
-#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/iopoll.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/types.h>
#define RCANFD_DRV_NAME "rcar_canfd"
@@ -82,8 +82,8 @@
#define RCANFD_GERFL_DEF BIT(0)
#define RCANFD_GERFL_ERR(gpriv, x) \
- ((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \
- RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
+ ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \
+ RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
RCANFD_GERFL_MES | \
((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
@@ -91,16 +91,16 @@
/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
- (((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \
- (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8)))
+ (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \
+ (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8)))
#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
- (((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \
- reg_v3u(gpriv, 0x1ff, 0xff))
+ (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \
+ reg_gen4(gpriv, 0x1ff, 0xff))
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_v3u(gpriv, 0x7f, 0x1f))
+#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f))
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -118,13 +118,13 @@
/* RSCFDnCFDCmNCFG - CAN FD only */
#define RCANFD_NCFG_NTSEG2(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 25, 24))
+ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24))
#define RCANFD_NCFG_NTSEG1(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0xff, 0x7f)) << reg_v3u(gpriv, 17, 16))
+ (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16))
#define RCANFD_NCFG_NSJW(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 10, 11))
+ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11))
#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
@@ -186,19 +186,19 @@
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
+#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24)
#define RCANFD_DCFG_DTSEG2(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x0f, 0x7)) << reg_v3u(gpriv, 16, 20))
+ (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20))
#define RCANFD_DCFG_DTSEG1(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x1f, 0xf)) << reg_v3u(gpriv, 8, 16))
+ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16))
#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCmFDCFG */
-#define RCANFD_FDCFG_CLOE BIT(30)
-#define RCANFD_FDCFG_FDOE BIT(28)
+#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
+#define RCANFD_GEN4_FDCFG_FDOE BIT(28)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
@@ -233,10 +233,11 @@
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(gpriv, x) (((x) & 0xf) << reg_v3u(gpriv, 16, 20))
-#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_v3u(gpriv, 8, 16))
+#define RCANFD_CFCC_CFTML(gpriv, x) \
+ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20))
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16))
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_v3u(gpriv, 21, 8))
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8))
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -304,7 +305,7 @@
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(gpriv, x) (reg_v3u(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
@@ -314,13 +315,13 @@
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
#define RCANFD_CFCC(gpriv, ch, idx) \
- (reg_v3u(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
+ (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
#define RCANFD_CFSTS(gpriv, ch, idx) \
- (reg_v3u(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
+ (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
#define RCANFD_CFPCTR(gpriv, ch, idx) \
- (reg_v3u(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
+ (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDFESTS / RSCFDnFESTS */
#define RCANFD_FESTS (0x0238)
@@ -428,16 +429,15 @@
/* RSCFDnRPGACCr */
#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
-/* R-Car V3U Classical and CAN FD mode specific register map */
-#define RCANFD_V3U_CFDCFG (0x1314)
-#define RCANFD_V3U_DCFG(m) (0x1400 + (0x20 * (m)))
+/* R-Car Gen4 Classical and CAN FD mode specific register map */
+#define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m)))
-#define RCANFD_V3U_GAFL_OFFSET (0x1800)
+#define RCANFD_GEN4_GAFL_OFFSET (0x1800)
/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
+#define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m)))
#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
@@ -453,7 +453,7 @@
#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET(gpriv) reg_v3u(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000)
#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
@@ -461,7 +461,7 @@
(RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET(gpriv) reg_v3u(gpriv, 0x6400, 0x3400)
+#define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400)
#define RCANFD_F_CFID(gpriv, ch, idx) \
(RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
@@ -597,28 +597,28 @@ static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
.shared_global_irqs = 1,
};
+static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
+ .max_channels = 8,
+ .postdiv = 2,
+ .shared_global_irqs = 1,
+};
+
static const struct rcar_canfd_hw_info rzg2l_hw_info = {
.max_channels = 2,
.postdiv = 1,
.multi_channel_irqs = 1,
};
-static const struct rcar_canfd_hw_info r8a779a0_hw_info = {
- .max_channels = 8,
- .postdiv = 2,
- .shared_global_irqs = 1,
-};
-
/* Helper functions */
-static inline bool is_v3u(struct rcar_canfd_global *gpriv)
+static inline bool is_gen4(struct rcar_canfd_global *gpriv)
{
- return gpriv->info == &r8a779a0_hw_info;
+ return gpriv->info == &rcar_gen4_hw_info;
}
-static inline u32 reg_v3u(struct rcar_canfd_global *gpriv,
- u32 v3u, u32 not_v3u)
+static inline u32 reg_gen4(struct rcar_canfd_global *gpriv,
+ u32 gen4, u32 not_gen4)
{
- return is_v3u(gpriv) ? v3u : not_v3u;
+ return is_gen4(gpriv) ? gen4 : not_gen4;
}
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
@@ -688,13 +688,14 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
{
- if (is_v3u(gpriv)) {
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
- RCANFD_FDCFG_FDOE);
- else
- rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
- RCANFD_FDCFG_CLOE);
+ if (is_gen4(gpriv)) {
+ u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
+ : RCANFD_GEN4_FDCFG_CLOE;
+
+ for_each_set_bit(ch, &gpriv->channels_mask,
+ gpriv->info->max_channels)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch),
+ val);
} else {
if (gpriv->fdmode)
rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
@@ -814,8 +815,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
/* Write number of rules for channel */
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
- if (is_v3u(gpriv))
- offset = RCANFD_V3U_GAFL_OFFSET;
+ if (is_gen4(gpriv))
+ offset = RCANFD_GEN4_GAFL_OFFSET;
else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
else
@@ -1343,17 +1344,14 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
tseg2 = dbt->phase_seg2 - 1;
cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
+ RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
- if (is_v3u(gpriv))
- rcar_canfd_write(priv->base, RCANFD_V3U_DCFG(ch), cfg);
- else
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+ rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg);
netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
brp, sjw, tseg1, tseg2);
} else {
/* Classical CAN only mode */
- if (is_v3u(gpriv)) {
+ if (is_gen4(gpriv)) {
cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
RCANFD_NCFG_NBRP(brp) |
RCANFD_NCFG_NSJW(gpriv, sjw) |
@@ -1510,7 +1508,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
rcar_canfd_write(priv->base,
RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
@@ -1569,7 +1567,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
@@ -1620,7 +1618,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
- else if (is_v3u(gpriv))
+ else if (is_gen4(gpriv))
rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
@@ -1717,13 +1715,14 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
{
const struct rcar_canfd_hw_info *info = gpriv->info;
struct platform_device *pdev = gpriv->pdev;
+ struct device *dev = &pdev->dev;
struct rcar_canfd_channel *priv;
struct net_device *ndev;
int err = -ENODEV;
ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH);
if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
+ dev_err(dev, "alloc_candev() failed\n");
return -ENOMEM;
}
priv = netdev_priv(ndev);
@@ -1736,7 +1735,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->channel = ch;
priv->gpriv = gpriv;
priv->can.clock.freq = fcan_freq;
- dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
+ dev_info(dev, "can_clk rate is %u\n", priv->can.clock.freq);
if (info->multi_channel_irqs) {
char *irq_name;
@@ -1755,31 +1754,31 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_err", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_err",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, err_irq,
+ err = devm_request_irq(dev, err_irq,
rcar_canfd_channel_err_interrupt, 0,
irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq CH Err(%d) failed, error %d\n",
err_irq, err);
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_trx", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_trx",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, tx_irq,
+ err = devm_request_irq(dev, tx_irq,
rcar_canfd_channel_tx_interrupt, 0,
irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq Tx (%d) failed, error %d\n",
tx_irq, err);
goto fail;
}
@@ -1803,7 +1802,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->can.do_set_mode = rcar_canfd_do_set_mode;
priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll,
RCANFD_NAPI_WEIGHT);
@@ -1811,11 +1810,10 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev,
- "register_candev() failed, error %d\n", err);
+ dev_err(dev, "register_candev() failed, error %d\n", err);
goto fail_candev;
}
- dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
+ dev_info(dev, "device registered (channel %u)\n", priv->channel);
return 0;
fail_candev:
@@ -1839,6 +1837,7 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
static int rcar_canfd_probe(struct platform_device *pdev)
{
const struct rcar_canfd_hw_info *info;
+ struct device *dev = &pdev->dev;
void __iomem *addr;
u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
@@ -1850,14 +1849,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
char name[9] = "channelX";
int i;
- info = of_device_get_match_data(&pdev->dev);
+ info = of_device_get_match_data(dev);
- if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
+ if (of_property_read_bool(dev->of_node, "renesas,no-can-fd"))
fdmode = false; /* Classical CAN only mode */
for (i = 0; i < info->max_channels; ++i) {
name[7] = '0' + i;
- of_child = of_get_child_by_name(pdev->dev.of_node, name);
+ of_child = of_get_child_by_name(dev->of_node, name);
if (of_child && of_device_is_available(of_child))
channels_mask |= BIT(i);
of_node_put(of_child);
@@ -1890,7 +1889,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
/* Global controller context */
- gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
+ gpriv = devm_kzalloc(dev, sizeof(*gpriv), GFP_KERNEL);
if (!gpriv)
return -ENOMEM;
@@ -1899,32 +1898,30 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->fdmode = fdmode;
gpriv->info = info;
- gpriv->rstc1 = devm_reset_control_get_optional_exclusive(&pdev->dev,
- "rstp_n");
+ gpriv->rstc1 = devm_reset_control_get_optional_exclusive(dev, "rstp_n");
if (IS_ERR(gpriv->rstc1))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc1),
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc1),
"failed to get rstp_n\n");
- gpriv->rstc2 = devm_reset_control_get_optional_exclusive(&pdev->dev,
- "rstc_n");
+ gpriv->rstc2 = devm_reset_control_get_optional_exclusive(dev, "rstc_n");
if (IS_ERR(gpriv->rstc2))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc2),
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc2),
"failed to get rstc_n\n");
/* Peripheral clock */
- gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
+ gpriv->clkp = devm_clk_get(dev, "fck");
if (IS_ERR(gpriv->clkp))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->clkp),
+ return dev_err_probe(dev, PTR_ERR(gpriv->clkp),
"cannot get peripheral clock\n");
/* fCAN clock: Pick External clock. If not available fallback to
* CANFD clock
*/
- gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+ gpriv->can_clk = devm_clk_get(dev, "can_clk");
if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
- gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
+ gpriv->can_clk = devm_clk_get(dev, "canfd");
if (IS_ERR(gpriv->can_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->can_clk),
+ return dev_err_probe(dev, PTR_ERR(gpriv->can_clk),
"cannot get canfd clock\n");
gpriv->fcan = RCANFD_CANFDCLK;
@@ -1947,39 +1944,38 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Request IRQ that's common for both channels */
if (info->shared_global_irqs) {
- err = devm_request_irq(&pdev->dev, ch_irq,
+ err = devm_request_irq(dev, ch_irq,
rcar_canfd_channel_interrupt, 0,
"canfd.ch_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
ch_irq, err);
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_irq,
- rcar_canfd_global_interrupt, 0,
- "canfd.g_int", gpriv);
+ err = devm_request_irq(dev, g_irq, rcar_canfd_global_interrupt,
+ 0, "canfd.g_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
g_irq, err);
goto fail_dev;
}
} else {
- err = devm_request_irq(&pdev->dev, g_recc_irq,
+ err = devm_request_irq(dev, g_recc_irq,
rcar_canfd_global_receive_fifo_interrupt, 0,
"canfd.g_recc", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
g_recc_irq, err);
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_err_irq,
+ err = devm_request_irq(dev, g_err_irq,
rcar_canfd_global_err_interrupt, 0,
"canfd.g_err", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
g_err_irq, err);
goto fail_dev;
}
@@ -1997,14 +1993,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Enable peripheral clock for register access */
err = clk_prepare_enable(gpriv->clkp);
if (err) {
- dev_err(&pdev->dev,
- "failed to enable peripheral clock, error %d\n", err);
+ dev_err(dev, "failed to enable peripheral clock, error %d\n",
+ err);
goto fail_reset;
}
err = rcar_canfd_reset_controller(gpriv);
if (err) {
- dev_err(&pdev->dev, "reset controller failed\n");
+ dev_err(dev, "reset controller failed\n");
goto fail_clk;
}
@@ -2034,7 +2030,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
!(sts & RCANFD_GSTS_GNOPM), 2, 500000);
if (err) {
- dev_err(&pdev->dev, "global operational mode failed\n");
+ dev_err(dev, "global operational mode failed\n");
goto fail_mode;
}
@@ -2045,7 +2041,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, gpriv);
- dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n",
+ dev_info(dev, "global operational state (clk %d, fdmode %d)\n",
gpriv->fcan, gpriv->fdmode);
return 0;
@@ -2099,9 +2095,10 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
rcar_canfd_resume);
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
+ { .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
+ { .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
- { .compatible = "renesas,r8a779a0-canfd", .data = &r8a779a0_hw_info },
{ }
};
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 4ab91759a5c6..c56e27223e5f 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -3,6 +3,7 @@
* Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
* Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ * Copyright (C) 2023 EMS Dr. Thomas Wuensche
*/
#include <linux/kernel.h>
@@ -19,12 +20,14 @@
#define DRV_NAME "ems_pci"
-MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
+MODULE_AUTHOR("Sebastian Haas <support@ems-wuensche.com>");
+MODULE_AUTHOR("Gerhard Uttenthaler <uttenthaler@ems-wuensche.com>");
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");
MODULE_LICENSE("GPL v2");
#define EMS_PCI_V1_MAX_CHAN 2
#define EMS_PCI_V2_MAX_CHAN 4
+#define EMS_PCI_V3_MAX_CHAN 4
#define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN
struct ems_pci_card {
@@ -40,8 +43,7 @@ struct ems_pci_card {
#define EMS_PCI_CAN_CLOCK (16000000 / 2)
-/*
- * Register definitions and descriptions are from LinCAN 0.3.3.
+/* Register definitions and descriptions are from LinCAN 0.3.3.
*
* PSB4610 PITA-2 bridge control registers
*/
@@ -52,8 +54,7 @@ struct ems_pci_card {
#define PITA2_MISC 0x1c /* Miscellaneous Register */
#define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */
-/*
- * Register definitions for the PLX 9030
+/* Register definitions for the PLX 9030
*/
#define PLX_ICSR 0x4c /* Interrupt Control/Status register */
#define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */
@@ -62,8 +63,16 @@ struct ems_pci_card {
#define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \
PLX_ICSR_LINTI1_CLR)
-/*
- * The board configuration is probably following:
+/* Register definitions for the ASIX99100
+ */
+#define ASIX_LINTSR 0x28 /* Interrupt Control/Status register */
+#define ASIX_LINTSR_INT0AC BIT(0) /* Writing 1 enables or clears interrupt */
+
+#define ASIX_LIEMR 0x24 /* Local Interrupt Enable / Miscellaneous Register */
+#define ASIX_LIEMR_L0EINTEN BIT(16) /* Local INT0 input assertion enable */
+#define ASIX_LIEMR_LRST BIT(14) /* Local Reset assert */
+
+/* The board configuration is probably following:
* RX1 is connected to ground.
* TX1 is not connected.
* CLKO is not connected.
@@ -72,23 +81,40 @@ struct ems_pci_card {
*/
#define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
-/*
- * In the CDR register, you should set CBP to 1.
+/* In the CDR register, you should set CBP to 1.
* You will probably also want to set the clock divider value to 7
* (meaning direct oscillator output) because the second SJA1000 chip
* is driven by the first one CLKOUT output.
*/
#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
-#define EMS_PCI_V1_BASE_BAR 1
-#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
-#define EMS_PCI_V2_BASE_BAR 2
-#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
-#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
-#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+#define EMS_PCI_V1_BASE_BAR 1
+#define EMS_PCI_V1_CONF_BAR 0
+#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
+#define EMS_PCI_V1_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V1_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V2_BASE_BAR 2
+#define EMS_PCI_V2_CONF_BAR 0
+#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
+#define EMS_PCI_V2_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V2_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V3_BASE_BAR 0
+#define EMS_PCI_V3_CONF_BAR 5
+#define EMS_PCI_V3_CONF_SIZE 128 /* size of ASIX control area */
+#define EMS_PCI_V3_CAN_BASE_OFFSET 0x00 /* offset where the controllers starts */
+#define EMS_PCI_V3_CAN_CTRL_SIZE 0x100 /* memory size for each controller */
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
+#ifndef PCI_VENDOR_ID_ASIX
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_9110 0x9110
+#define PCI_SUBVENDOR_ID_ASIX 0xa000
+#endif
+#define PCI_SUBDEVICE_ID_EMS 0x4010
+
static const struct pci_device_id ems_pci_tbl[] = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
@@ -96,12 +122,13 @@ static const struct pci_device_id ems_pci_tbl[] = {
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000},
/* CPC-104P v2 */
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002},
+ /* CPC-PCIe v3 */
+ {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_9110, PCI_SUBVENDOR_ID_ASIX, PCI_SUBDEVICE_ID_EMS},
{0,}
};
MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
-/*
- * Helper to read internal registers from card logic (not CAN)
+/* Helper to read internal registers from card logic (not CAN)
*/
static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port)
{
@@ -146,8 +173,25 @@ static void ems_pci_v2_post_irq(const struct sja1000_priv *priv)
writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR);
}
-/*
- * Check if a CAN controller is present at the specified location
+static u8 ems_pci_v3_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + port);
+}
+
+static void ems_pci_v3_write_reg(const struct sja1000_priv *priv,
+ int port, u8 val)
+{
+ writeb(val, priv->reg_base + port);
+}
+
+static void ems_pci_v3_post_irq(const struct sja1000_priv *priv)
+{
+ struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+}
+
+/* Check if a CAN controller is present at the specified location
* by trying to set 'em into the PeliCAN mode
*/
static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
@@ -185,10 +229,10 @@ static void ems_pci_del_card(struct pci_dev *pdev)
free_sja1000dev(dev);
}
- if (card->base_addr != NULL)
+ if (card->base_addr)
pci_iounmap(card->pci_dev, card->base_addr);
- if (card->conf_addr != NULL)
+ if (card->conf_addr)
pci_iounmap(card->pci_dev, card->conf_addr);
kfree(card);
@@ -202,8 +246,7 @@ static void ems_pci_card_reset(struct ems_pci_card *card)
writeb(0, card->base_addr);
}
-/*
- * Probe PCI device for EMS CAN signature and register each available
+/* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
static int ems_pci_add_card(struct pci_dev *pdev,
@@ -212,7 +255,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pci_card *card;
- int max_chan, conf_size, base_bar;
+ int max_chan, conf_size, base_bar, conf_bar;
int err, i;
/* Enabling PCI device */
@@ -222,8 +265,8 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
/* Allocating card structures to hold addresses, ... */
- card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL);
- if (card == NULL) {
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
pci_disable_device(pdev);
return -ENOMEM;
}
@@ -234,27 +277,35 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels = 0;
- if (pdev->vendor == PCI_VENDOR_ID_PLX) {
+ if (pdev->vendor == PCI_VENDOR_ID_ASIX) {
+ card->version = 3; /* CPC-PCI v3 */
+ max_chan = EMS_PCI_V3_MAX_CHAN;
+ base_bar = EMS_PCI_V3_BASE_BAR;
+ conf_bar = EMS_PCI_V3_CONF_BAR;
+ conf_size = EMS_PCI_V3_CONF_SIZE;
+ } else if (pdev->vendor == PCI_VENDOR_ID_PLX) {
card->version = 2; /* CPC-PCI v2 */
max_chan = EMS_PCI_V2_MAX_CHAN;
base_bar = EMS_PCI_V2_BASE_BAR;
+ conf_bar = EMS_PCI_V2_CONF_BAR;
conf_size = EMS_PCI_V2_CONF_SIZE;
} else {
card->version = 1; /* CPC-PCI v1 */
max_chan = EMS_PCI_V1_MAX_CHAN;
base_bar = EMS_PCI_V1_BASE_BAR;
+ conf_bar = EMS_PCI_V1_CONF_BAR;
conf_size = EMS_PCI_V1_CONF_SIZE;
}
/* Remap configuration space and controller memory area */
- card->conf_addr = pci_iomap(pdev, 0, conf_size);
- if (card->conf_addr == NULL) {
+ card->conf_addr = pci_iomap(pdev, conf_bar, conf_size);
+ if (!card->conf_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
- if (card->base_addr == NULL) {
+ if (!card->base_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -276,12 +327,20 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
}
+ if (card->version == 3) {
+ /* ASIX chip asserts local reset to CAN controllers
+ * after bootup until it is deasserted
+ */
+ writel(readl(card->conf_addr + ASIX_LIEMR) & ~ASIX_LIEMR_LRST,
+ card->conf_addr + ASIX_LIEMR);
+ }
+
ems_pci_card_reset(card);
/* Detect available channels */
for (i = 0; i < max_chan; i++) {
dev = alloc_sja1000dev(0);
- if (dev == NULL) {
+ if (!dev) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -292,16 +351,25 @@ static int ems_pci_add_card(struct pci_dev *pdev,
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
- priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET
- + (i * EMS_PCI_CAN_CTRL_SIZE);
+
if (card->version == 1) {
priv->read_reg = ems_pci_v1_read_reg;
priv->write_reg = ems_pci_v1_write_reg;
priv->post_irq = ems_pci_v1_post_irq;
- } else {
+ priv->reg_base = card->base_addr + EMS_PCI_V1_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V1_CAN_CTRL_SIZE);
+ } else if (card->version == 2) {
priv->read_reg = ems_pci_v2_read_reg;
priv->write_reg = ems_pci_v2_write_reg;
priv->post_irq = ems_pci_v2_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V2_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V2_CAN_CTRL_SIZE);
+ } else {
+ priv->read_reg = ems_pci_v3_read_reg;
+ priv->write_reg = ems_pci_v3_write_reg;
+ priv->post_irq = ems_pci_v3_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V3_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V3_CAN_CTRL_SIZE);
}
/* Check if channel is present */
@@ -313,20 +381,28 @@ static int ems_pci_add_card(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
dev->dev_id = i;
- if (card->version == 1)
+ if (card->version == 1) {
/* reset int flag of pita */
writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
card->conf_addr + PITA2_ICR);
- else
+ } else if (card->version == 2) {
/* enable IRQ in PLX 9030 */
writel(PLX_ICSR_ENA_CLR,
card->conf_addr + PLX_ICSR);
+ } else {
+ /* Enable IRQ in AX99100 */
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+ /* Enable local INT0 input enable */
+ writel(readl(card->conf_addr + ASIX_LIEMR) | ASIX_LIEMR_L0EINTEN,
+ card->conf_addr + ASIX_LIEMR);
+ }
/* Register SJA1000 device */
err = register_sja1000dev(dev);
if (err) {
- dev_err(&pdev->dev, "Registering device failed "
- "(err=%d)\n", err);
+ dev_err(&pdev->dev,
+ "Registering device failed: %pe\n",
+ ERR_PTR(err));
free_sja1000dev(dev);
goto failure_cleanup;
}
@@ -334,7 +410,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels++;
dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n",
- i + 1, priv->reg_base, dev->irq);
+ i + 1, priv->reg_base, dev->irq);
} else {
free_sja1000dev(dev);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index bf3f0f150199..bfe4caa0c99d 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -30,11 +30,23 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
last_byte = mcp251xfd_last_byte_set(mask);
len = last_byte - first_byte + 1;
- data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
+ data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len);
val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
memcpy(data, &val_le32, len);
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) {
+ len += sizeof(write_reg_buf->nocrc.cmd);
+ } else if (len == 1) {
+ u16 crc;
+
+ /* CRC */
+ len += sizeof(write_reg_buf->safe.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->safe.crc);
+ } else {
u16 crc;
mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
@@ -46,8 +58,6 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
/* Total length */
len += sizeof(write_reg_buf->crc.crc);
- } else {
- len += sizeof(write_reg_buf->nocrc.cmd);
}
return len;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 2b0309fedfac..7024ff0cc2c0 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -504,6 +504,11 @@ union mcp251xfd_write_reg_buf {
u8 data[4];
__be16 crc;
} crc;
+ struct __packed {
+ struct mcp251xfd_buf_cmd cmd;
+ u8 data[1];
+ __be16 crc;
+ } safe;
} ____cacheline_aligned;
struct mcp251xfd_tx_obj {
@@ -759,6 +764,13 @@ mcp251xfd_spi_cmd_write_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd,
}
static inline void
+mcp251xfd_spi_cmd_write_safe_set_addr(struct mcp251xfd_buf_cmd *cmd,
+ u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE | addr);
+}
+
+static inline void
mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
u16 addr, u16 len)
{
@@ -769,14 +781,20 @@ mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
static inline u8 *
mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
union mcp251xfd_write_reg_buf *write_reg_buf,
- u16 addr)
+ u16 addr, u8 len)
{
u8 *data;
if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
- mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
- addr);
- data = write_reg_buf->crc.data;
+ if (len == 1) {
+ mcp251xfd_spi_cmd_write_safe_set_addr(&write_reg_buf->safe.cmd,
+ addr);
+ data = write_reg_buf->safe.data;
+ } else {
+ mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
+ addr);
+ data = write_reg_buf->crc.data;
+ }
} else {
mcp251xfd_spi_cmd_write_nocrc(&write_reg_buf->nocrc.cmd,
addr);
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 42323f5e6f3a..55b36973952d 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -127,7 +127,15 @@ struct rx_msg {
u8 dlc;
__le32 ts;
__le32 id; /* upper 3 bits contain flags */
- u8 data[8];
+ union {
+ u8 data[8];
+ struct {
+ u8 status; /* CAN Controller Status */
+ u8 ecc; /* Error Capture Register */
+ u8 rec; /* RX Error Counter */
+ u8 tec; /* TX Error Counter */
+ } ev_can_err_ext; /* For ESD_EV_CAN_ERROR_EXT */
+ };
};
struct tx_msg {
@@ -229,51 +237,52 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK;
if (id == ESD_EV_CAN_ERROR_EXT) {
- u8 state = msg->msg.rx.data[0];
- u8 ecc = msg->msg.rx.data[1];
- u8 rxerr = msg->msg.rx.data[2];
- u8 txerr = msg->msg.rx.data[3];
+ u8 state = msg->msg.rx.ev_can_err_ext.status;
+ u8 ecc = msg->msg.rx.ev_can_err_ext.ecc;
+ u8 rxerr = msg->msg.rx.ev_can_err_ext.rec;
+ u8 txerr = msg->msg.rx.ev_can_err_ext.tec;
netdev_dbg(priv->netdev,
"CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n",
msg->msg.rx.dlc, state, ecc, rxerr, txerr);
skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb == NULL) {
- stats->rx_dropped++;
- return;
- }
if (state != priv->old_state) {
+ enum can_state tx_state, rx_state;
+ enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
+
priv->old_state = state;
switch (state & ESD_BUSSTATE_MASK) {
case ESD_BUSSTATE_BUSOFF:
- priv->can.state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- priv->can.can_stats.bus_off++;
+ new_state = CAN_STATE_BUS_OFF;
can_bus_off(priv->netdev);
break;
case ESD_BUSSTATE_WARN:
- priv->can.state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
+ new_state = CAN_STATE_ERROR_WARNING;
break;
case ESD_BUSSTATE_ERRPASSIVE:
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- priv->can.can_stats.error_passive++;
+ new_state = CAN_STATE_ERROR_PASSIVE;
break;
default:
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ new_state = CAN_STATE_ERROR_ACTIVE;
txerr = 0;
rxerr = 0;
break;
}
- } else {
+
+ if (new_state != priv->can.state) {
+ tx_state = (txerr >= rxerr) ? new_state : 0;
+ rx_state = (txerr <= rxerr) ? new_state : 0;
+ can_change_state(priv->netdev, cf,
+ tx_state, rx_state);
+ }
+ } else if (skb) {
priv->can.can_stats.bus_error++;
stats->rx_errors++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR |
- CAN_ERR_CNT;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (ecc & SJA1000_ECC_MASK) {
case SJA1000_ECC_BIT:
@@ -286,7 +295,6 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[3] = ecc & SJA1000_ECC_SEG;
break;
}
@@ -294,20 +302,22 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
if (!(ecc & SJA1000_ECC_DIR))
cf->data[2] |= CAN_ERR_PROT_TX;
- if (priv->can.state == CAN_STATE_ERROR_WARNING ||
- priv->can.state == CAN_STATE_ERROR_PASSIVE) {
- cf->data[1] = (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- }
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ /* Bit stream position in CAN frame as the error was detected */
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
}
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
- netif_rx(skb);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ }
}
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 687dd542f7f6..b211b6e283a2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -9,10 +9,11 @@
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
#include <asm/unaligned.h>
+
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -381,23 +382,42 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
}
/*
- * read device id from device
+ * read can channel id from device
*/
-static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
+static int pcan_usb_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id)
{
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args);
if (err)
- netdev_err(dev->netdev, "getting device id failure: %d\n", err);
+ netdev_err(dev->netdev, "getting can channel id failure: %d\n", err);
else
- *device_id = args[0];
+ *can_ch_id = args[0];
return err;
}
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN];
+
+ /* this kind of device supports 8-bit values only */
+ if (can_ch_id > U8_MAX)
+ return -EINVAL;
+
+ /* during the flash process the device disconnects during ~1.25 s.:
+ * prohibit access when interface is UP
+ */
+ if (dev->netdev->flags & IFF_UP)
+ return -EBUSY;
+
+ args[0] = can_ch_id;
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_DEVID, PCAN_USB_SET, args);
+}
+
/*
* update current time ref with received timestamp
*/
@@ -963,9 +983,18 @@ static int pcan_usb_set_phys_id(struct net_device *netdev,
return err;
}
+/* This device only handles 8-bit CAN channel id. */
+static int pcan_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u8);
+}
+
static const struct ethtool_ops pcan_usb_ethtool_ops = {
.set_phys_id = pcan_usb_set_phys_id,
.get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = pcan_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -1017,7 +1046,8 @@ const struct peak_usb_adapter pcan_usb = {
.dev_init = pcan_usb_init,
.dev_set_bus = pcan_usb_write_mode,
.dev_set_bittiming = pcan_usb_set_bittiming,
- .dev_get_device_id = pcan_usb_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_set_can_channel_id,
.dev_decode_buf = pcan_usb_decode_buf,
.dev_encode_msg = pcan_usb_encode_msg,
.dev_start = pcan_usb_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1d996d3320fe..d881e1d30183 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -8,13 +8,15 @@
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
+#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/usb.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -53,6 +55,26 @@ static const struct usb_device_id peak_usb_table[] = {
MODULE_DEVICE_TABLE(usb, peak_usb_table);
+static ssize_t can_channel_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct peak_usb_device *peak_dev = netdev_priv(netdev);
+
+ return sysfs_emit(buf, "%08X\n", peak_dev->can_channel_id);
+}
+static DEVICE_ATTR_RO(can_channel_id);
+
+/* mutable to avoid cast in attribute_group */
+static struct attribute *peak_usb_sysfs_attrs[] = {
+ &dev_attr_can_channel_id.attr,
+ NULL,
+};
+
+static const struct attribute_group peak_usb_sysfs_group = {
+ .name = "peak_usb",
+ .attrs = peak_usb_sysfs_attrs,
+};
+
/*
* dump memory
*/
@@ -808,6 +830,86 @@ static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+/* CAN-USB devices generally handle 32-bit CAN channel IDs.
+ * In case one doesn't, then it have to overload this function.
+ */
+int peak_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u32);
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id() operation. It is used
+ * here to fill the data buffer with the user defined CAN channel ID.
+ */
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err)
+ return err;
+
+ /* ethtool operates on individual bytes. The byte order of the CAN
+ * channel id in memory depends on the kernel architecture. We
+ * convert the CAN channel id back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy(data, (u8 *)&ch_id_le + eeprom->offset, eeprom->len);
+
+ /* update cached value */
+ dev->can_channel_id = ch_id;
+ return err;
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id()/dev_set_can_channel_id()
+ * operations. They are used here to set the new user defined CAN channel ID.
+ */
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ /* first, read the current user defined CAN channel ID */
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to init CAN channel id (err %d)\n", err);
+ return err;
+ }
+
+ /* do update the value with user given bytes.
+ * ethtool operates on individual bytes. The byte order of the CAN
+ * channel ID in memory depends on the kernel architecture. We
+ * convert the CAN channel ID back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy((u8 *)&ch_id_le + eeprom->offset, data, eeprom->len);
+ ch_id = le32_to_cpu(ch_id_le);
+
+ /* flash the new value now */
+ err = dev->adapter->dev_set_can_channel_id(dev, ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to write new CAN channel id (err %d)\n",
+ err);
+ return err;
+ }
+
+ /* update cached value with the new one */
+ dev->can_channel_id = ch_id;
+
+ return 0;
+}
+
int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
{
info->so_timestamping =
@@ -881,6 +983,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
/* add ethtool support */
netdev->ethtool_ops = peak_usb_adapter->ethtool_ops;
+ /* register peak_usb sysfs files */
+ netdev->sysfs_groups[0] = &peak_usb_sysfs_group;
+
init_usb_anchor(&dev->rx_submitted);
init_usb_anchor(&dev->tx_submitted);
@@ -921,12 +1026,11 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
goto adap_dev_free;
}
- /* get device number early */
- if (dev->adapter->dev_get_device_id)
- dev->adapter->dev_get_device_id(dev, &dev->device_number);
+ /* get CAN channel id early */
+ dev->adapter->dev_get_can_channel_id(dev, &dev->can_channel_id);
- netdev_info(netdev, "attached to %s channel %u (device %u)\n",
- peak_usb_adapter->name, ctrl_idx, dev->device_number);
+ netdev_info(netdev, "attached to %s channel %u (device 0x%08X)\n",
+ peak_usb_adapter->name, ctrl_idx, dev->can_channel_id);
return 0;
@@ -964,7 +1068,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev->state &= ~PCAN_USB_STATE_CONNECTED;
strscpy(name, netdev->name, IFNAMSIZ);
- unregister_netdev(netdev);
+ unregister_candev(netdev);
kfree(dev->cmd_buf);
dev->next_siblings = NULL;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index f6bdd8b3f290..980e315186cf 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -60,7 +60,8 @@ struct peak_usb_adapter {
int (*dev_set_data_bittiming)(struct peak_usb_device *dev,
struct can_bittiming *bt);
int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff);
- int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id);
+ int (*dev_get_can_channel_id)(struct peak_usb_device *dev, u32 *can_ch_id);
+ int (*dev_set_can_channel_id)(struct peak_usb_device *dev, u32 can_ch_id);
int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb);
int (*dev_encode_msg)(struct peak_usb_device *dev, struct sk_buff *skb,
u8 *obuf, size_t *size);
@@ -122,7 +123,8 @@ struct peak_usb_device {
u8 *cmd_buf;
struct usb_anchor rx_submitted;
- u32 device_number;
+ /* equivalent to the device ID in the Windows API */
+ u32 can_channel_id;
u8 device_rev;
u8 ep_msg_in;
@@ -147,4 +149,10 @@ void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+/* common 32-bit CAN channel ID ethtool management */
+int peak_usb_get_eeprom_len(struct net_device *netdev);
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 2ea1500df393..4d85b29a17b7 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -4,10 +4,10 @@
*
* Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -147,6 +147,15 @@ struct __packed pcan_ufd_ovr_msg {
u8 unused[3];
};
+#define PCAN_UFD_CMD_DEVID_SET 0x81
+
+struct __packed pcan_ufd_device_id {
+ __le16 opcode_channel;
+
+ u16 unused;
+ __le32 device_id;
+};
+
static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om)
{
return om->channel & 0xf;
@@ -234,6 +243,15 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
return err;
}
+static int pcan_usb_fd_read_fwinfo(struct peak_usb_device *dev,
+ struct pcan_ufd_fw_info *fw_info)
+{
+ return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
+ PCAN_USBPRO_INFO_FW,
+ fw_info,
+ sizeof(*fw_info));
+}
+
/* build the commands list in the given buffer, to enter operational mode */
static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
{
@@ -434,6 +452,34 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
return pcan_usb_fd_send_cmd(dev, ++cmd);
}
+/* read user CAN channel id from device */
+static int pcan_usb_fd_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
+{
+ int err;
+ struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev);
+
+ err = pcan_usb_fd_read_fwinfo(dev, &usb_if->fw_info);
+ if (err)
+ return err;
+
+ *can_ch_id = le32_to_cpu(usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ return err;
+}
+
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_fd_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ struct pcan_ufd_device_id *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
+ PCAN_UFD_CMD_DEVID_SET);
+ cmd->device_id = cpu_to_le32(can_ch_id);
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
/* handle restart but in asynchronously way
* (uses PCAN-USB Pro code to complete asynchronous request)
*/
@@ -907,10 +953,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
fw_info = &pdev->usb_if->fw_info;
- err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
- PCAN_USBPRO_INFO_FW,
- fw_info,
- sizeof(*fw_info));
+ err = pcan_usb_fd_read_fwinfo(dev, fw_info);
if (err) {
dev_err(dev->netdev->dev.parent,
"unable to read %s firmware info (err %d)\n",
@@ -972,7 +1015,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
}
pdev->usb_if->dev[dev->ctrl_idx] = dev;
- dev->device_number =
+ dev->can_channel_id =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
/* if vendor rsp is of type 2, then it contains EP numbers to
@@ -1081,6 +1124,9 @@ static int pcan_usb_fd_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_fd_ethtool_ops = {
.set_phys_id = pcan_usb_fd_set_phys_id,
.get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/* describes the PCAN-USB FD adapter */
@@ -1148,6 +1194,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1222,6 +1270,8 @@ const struct peak_usb_adapter pcan_usb_chip = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1296,6 +1346,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1370,6 +1422,8 @@ const struct peak_usb_adapter pcan_usb_x6 = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 5d8f6a40bb2c..f736196383ac 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -6,10 +6,10 @@
* Copyright (C) 2003-2011 PEAK System-Technik GmbH
* Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -76,6 +76,7 @@ static u16 pcan_usb_pro_sizeof_rec[256] = {
[PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter),
[PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts),
[PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid),
+ [PCAN_USBPRO_SETDEVID] = sizeof(struct pcan_usb_pro_devid),
[PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled),
[PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg),
[PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4,
@@ -149,6 +150,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
case PCAN_USBPRO_SETBTR:
case PCAN_USBPRO_GETDEVID:
+ case PCAN_USBPRO_SETDEVID:
*pc++ = va_arg(ap, int);
pc += 2;
*(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
@@ -419,8 +421,8 @@ static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode,
return pcan_usb_pro_send_cmd(dev, &um);
}
-static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
- u32 *device_id)
+static int pcan_usb_pro_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
{
struct pcan_usb_pro_devid *pdn;
struct pcan_usb_pro_msg um;
@@ -439,11 +441,23 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
return err;
pdn = (struct pcan_usb_pro_devid *)pc;
- *device_id = le32_to_cpu(pdn->dev_num);
+ *can_ch_id = le32_to_cpu(pdn->dev_num);
return err;
}
+static int pcan_usb_pro_set_can_channel_id(struct peak_usb_device *dev,
+ u32 can_ch_id)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETDEVID, dev->ctrl_idx,
+ can_ch_id);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev,
struct can_bittiming *bt)
{
@@ -1023,6 +1037,9 @@ static int pcan_usb_pro_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_pro_ethtool_ops = {
.set_phys_id = pcan_usb_pro_set_phys_id,
.get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -1076,7 +1093,8 @@ const struct peak_usb_adapter pcan_usb_pro = {
.dev_free = pcan_usb_pro_free,
.dev_set_bus = pcan_usb_pro_set_bus,
.dev_set_bittiming = pcan_usb_pro_set_bittiming,
- .dev_get_device_id = pcan_usb_pro_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_pro_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_pro_set_can_channel_id,
.dev_decode_buf = pcan_usb_pro_decode_buf,
.dev_encode_msg = pcan_usb_pro_encode_msg,
.dev_start = pcan_usb_pro_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index a34e0fc021c9..28e740af905d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -62,6 +62,7 @@ struct __packed pcan_usb_pro_fwinfo {
#define PCAN_USBPRO_SETBTR 0x02
#define PCAN_USBPRO_SETBUSACT 0x04
#define PCAN_USBPRO_SETSILENT 0x05
+#define PCAN_USBPRO_SETDEVID 0x06
#define PCAN_USBPRO_SETFILTR 0x0a
#define PCAN_USBPRO_SETTS 0x10
#define PCAN_USBPRO_GETDEVID 0x12
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 2e270b479143..cbe831875347 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -15,6 +15,9 @@
#include "lan9303.h"
+/* For the LAN9303 and LAN9354, only port 0 is an XMII port. */
+#define IS_PORT_XMII(port) ((port) == 0)
+
#define LAN9303_NUM_PORTS 3
/* 13.2 System Control and Status Registers
@@ -50,6 +53,9 @@
#define LAN9303_MANUAL_FC_1 0x68
#define LAN9303_MANUAL_FC_2 0x69
#define LAN9303_MANUAL_FC_0 0x6a
+# define LAN9303_BP_EN BIT(6)
+# define LAN9303_RX_FC_EN BIT(2)
+# define LAN9303_TX_FC_EN BIT(1)
#define LAN9303_SWITCH_CSR_DATA 0x6b
#define LAN9303_SWITCH_CSR_CMD 0x6c
#define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31)
@@ -225,6 +231,13 @@ const struct regmap_access_table lan9303_register_set = {
};
EXPORT_SYMBOL(lan9303_register_set);
+/* Flow Control registers indexed by port number */
+static unsigned int flow_ctl_reg[] = {
+ LAN9303_MANUAL_FC_0,
+ LAN9303_MANUAL_FC_1,
+ LAN9303_MANUAL_FC_2
+};
+
static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
{
int ret, i;
@@ -902,6 +915,7 @@ static int lan9303_setup(struct dsa_switch *ds)
{
struct lan9303 *chip = ds->priv;
int ret;
+ u32 reg;
/* Make sure that port 0 is the cpu port */
if (!dsa_is_cpu_port(ds, 0)) {
@@ -909,6 +923,17 @@ static int lan9303_setup(struct dsa_switch *ds)
return -EINVAL;
}
+ /* Virtual Phy: Remove Turbo 200Mbit mode */
+ ret = lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &reg);
+ if (ret)
+ return (ret);
+
+ /* Clear the TURBO Mode bit if it was set. */
+ if (reg & LAN9303_VIRT_SPECIAL_TURBO) {
+ reg &= ~LAN9303_VIRT_SPECIAL_TURBO;
+ regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, reg);
+ }
+
ret = lan9303_setup_tagging(chip);
if (ret)
dev_err(chip->dev, "failed to setup port tagging %d\n", ret);
@@ -1049,42 +1074,6 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
return chip->ops->phy_write(chip, phy, regnum, val);
}
-static void lan9303_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct lan9303 *chip = ds->priv;
- int ctl;
-
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
-
- ctl = lan9303_phy_read(ds, port, MII_BMCR);
-
- ctl &= ~BMCR_ANENABLE;
-
- if (phydev->speed == SPEED_100)
- ctl |= BMCR_SPEED100;
- else if (phydev->speed == SPEED_10)
- ctl &= ~BMCR_SPEED100;
- else
- dev_err(ds->dev, "unsupported speed: %d\n", phydev->speed);
-
- if (phydev->duplex == DUPLEX_FULL)
- ctl |= BMCR_FULLDPLX;
- else
- ctl &= ~BMCR_FULLDPLX;
-
- lan9303_phy_write(ds, port, MII_BMCR, ctl);
-
- if (port == chip->phy_addr_base) {
- /* Virtual Phy: Remove Turbo 200Mbit mode */
- lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
-
- ctl &= ~LAN9303_VIRT_SPECIAL_TURBO;
- regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ctl);
- }
-}
-
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
@@ -1281,26 +1270,96 @@ static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
return 0;
}
+static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d) entered.", __func__, port);
+
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+}
+
+static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct lan9303 *chip = ds->priv;
+ u32 ctl;
+ u32 reg;
+
+ /* On this device, we are only interested in doing something here if
+ * this is the xMII port. All other ports are 10/100 phys using MDIO
+ * to control there link settings.
+ */
+ if (!IS_PORT_XMII(port))
+ return;
+
+ /* Disable auto-negotiation and force the speed/duplex settings. */
+ ctl = lan9303_phy_read(ds, port, MII_BMCR);
+ ctl &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ lan9303_phy_write(ds, port, MII_BMCR, ctl);
+
+ /* Force the flow control settings. */
+ lan9303_read(chip->regmap, flow_ctl_reg[port], &reg);
+ reg &= ~(LAN9303_BP_EN | LAN9303_RX_FC_EN | LAN9303_TX_FC_EN);
+ if (rx_pause)
+ reg |= (LAN9303_RX_FC_EN | LAN9303_BP_EN);
+ if (tx_pause)
+ reg |= LAN9303_TX_FC_EN;
+ regmap_write(chip->regmap, flow_ctl_reg[port], reg);
+}
+
static const struct dsa_switch_ops lan9303_switch_ops = {
- .get_tag_protocol = lan9303_get_tag_protocol,
- .setup = lan9303_setup,
- .get_strings = lan9303_get_strings,
- .phy_read = lan9303_phy_read,
- .phy_write = lan9303_phy_write,
- .adjust_link = lan9303_adjust_link,
- .get_ethtool_stats = lan9303_get_ethtool_stats,
- .get_sset_count = lan9303_get_sset_count,
- .port_enable = lan9303_port_enable,
- .port_disable = lan9303_port_disable,
- .port_bridge_join = lan9303_port_bridge_join,
- .port_bridge_leave = lan9303_port_bridge_leave,
- .port_stp_state_set = lan9303_port_stp_state_set,
- .port_fast_age = lan9303_port_fast_age,
- .port_fdb_add = lan9303_port_fdb_add,
- .port_fdb_del = lan9303_port_fdb_del,
- .port_fdb_dump = lan9303_port_fdb_dump,
- .port_mdb_add = lan9303_port_mdb_add,
- .port_mdb_del = lan9303_port_mdb_del,
+ .get_tag_protocol = lan9303_get_tag_protocol,
+ .setup = lan9303_setup,
+ .get_strings = lan9303_get_strings,
+ .phy_read = lan9303_phy_read,
+ .phy_write = lan9303_phy_write,
+ .phylink_get_caps = lan9303_phylink_get_caps,
+ .phylink_mac_link_up = lan9303_phylink_mac_link_up,
+ .get_ethtool_stats = lan9303_get_ethtool_stats,
+ .get_sset_count = lan9303_get_sset_count,
+ .port_enable = lan9303_port_enable,
+ .port_disable = lan9303_port_disable,
+ .port_bridge_join = lan9303_port_bridge_join,
+ .port_bridge_leave = lan9303_port_bridge_leave,
+ .port_stp_state_set = lan9303_port_stp_state_set,
+ .port_fast_age = lan9303_port_fast_age,
+ .port_fdb_add = lan9303_port_fdb_add,
+ .port_fdb_del = lan9303_port_fdb_del,
+ .port_fdb_dump = lan9303_port_fdb_dump,
+ .port_mdb_add = lan9303_port_mdb_add,
+ .port_mdb_del = lan9303_port_mdb_del,
};
static int lan9303_register_switch(struct lan9303 *chip)
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 913f83ef013c..394ca8678d2b 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -22,6 +22,16 @@ config NET_DSA_MICROCHIP_KSZ_SPI
help
Select to enable support for registering switches configured through SPI.
+config NET_DSA_MICROCHIP_KSZ_PTP
+ bool "Support for the PTP clock on the KSZ9563/LAN937x Ethernet Switch"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && PTP_1588_CLOCK
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON=m || PTP_1588_CLOCK=y
+ help
+ Select to enable support for timestamping & PTP clock manipulation in
+ KSZ8563/KSZ9563/LAN937x series of switches. KSZ9563/KSZ8563 supports
+ only one step timestamping. LAN937x switch supports both one step and
+ two step timestamping.
+
config NET_DSA_MICROCHIP_KSZ8863_SMI
tristate "KSZ series SMI connected switch driver"
depends on NET_DSA_MICROCHIP_KSZ_COMMON
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 28873559efc2..48360cc9fc68 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -4,6 +4,11 @@ ksz_switch-objs := ksz_common.o
ksz_switch-objs += ksz9477.o
ksz_switch-objs += ksz8795.o
ksz_switch-objs += lan937x_main.o
+
+ifdef CONFIG_NET_DSA_MICROCHIP_KSZ_PTP
+ksz_switch-objs += ksz_ptp.o
+endif
+
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 6178a96e389f..bf13d47c26cf 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -980,6 +980,22 @@ int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
}
+void ksz9477_port_queue_split(struct ksz_device *dev, int port)
+{
+ u8 data;
+
+ if (dev->info->num_tx_queues == 8)
+ data = PORT_EIGHT_QUEUE;
+ else if (dev->info->num_tx_queues == 4)
+ data = PORT_FOUR_QUEUE;
+ else if (dev->info->num_tx_queues == 2)
+ data = PORT_TWO_QUEUE;
+ else
+ data = PORT_SINGLE_QUEUE;
+
+ ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
+}
+
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
@@ -991,6 +1007,8 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
true);
+ ksz9477_port_queue_split(dev, port);
+
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
/* set back pressure */
@@ -1166,6 +1184,13 @@ u32 ksz9477_get_port_addr(int port, int offset)
return PORT_CTRL_ADDR(port, offset);
}
+int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
+{
+ val = val >> 8;
+
+ return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
+}
+
int ksz9477_switch_init(struct ksz_device *dev)
{
u8 data8;
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index 7c5bb3032772..b6f7e3c46e3f 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -51,10 +51,12 @@ int ksz9477_mdb_del(struct ksz_device *dev, int port,
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu);
void ksz9477_config_cpu_port(struct dsa_switch *ds);
+int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val);
int ksz9477_enable_stp_addr(struct ksz_device *dev);
int ksz9477_reset_switch(struct ksz_device *dev);
int ksz9477_dsa_init(struct ksz_device *dev);
int ksz9477_switch_init(struct ksz_device *dev);
void ksz9477_switch_exit(struct ksz_device *dev);
+void ksz9477_port_queue_split(struct ksz_device *dev, int port);
#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index cc457fa64939..cba3dba58bc3 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -850,7 +850,11 @@
#define PORT_FORCE_TX_FLOW_CTRL BIT(4)
#define PORT_FORCE_RX_FLOW_CTRL BIT(3)
#define PORT_TAIL_TAG_ENABLE BIT(2)
-#define PORT_QUEUE_SPLIT_ENABLE 0x3
+#define PORT_QUEUE_SPLIT_MASK GENMASK(1, 0)
+#define PORT_EIGHT_QUEUE 0x3
+#define PORT_FOUR_QUEUE 0x2
+#define PORT_TWO_QUEUE 0x1
+#define PORT_SINGLE_QUEUE 0x0
#define REG_PORT_CTRL_1 0x0021
@@ -1480,33 +1484,10 @@
/* 9 - Shaping */
-#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
+#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
-#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
+#define MTI_PVID_REPLACE BIT(0)
-#define MTI_PVID_REPLACE BIT(0)
-
-#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
-
-#define MTI_SCHEDULE_MODE_M 0x3
-#define MTI_SCHEDULE_MODE_S 6
-#define MTI_SCHEDULE_STRICT_PRIO 0
-#define MTI_SCHEDULE_WRR 2
-#define MTI_SHAPING_M 0x3
-#define MTI_SHAPING_S 4
-#define MTI_SHAPING_OFF 0
-#define MTI_SHAPING_SRP 1
-#define MTI_SHAPING_TIME_AWARE 2
-
-#define REG_PORT_MTI_QUEUE_CTRL_1 0x0915
-
-#define MTI_TX_RATIO_M (BIT(7) - 1)
-
-#define REG_PORT_MTI_QUEUE_CTRL_2__2 0x0916
-#define REG_PORT_MTI_HI_WATER_MARK 0x0916
-#define REG_PORT_MTI_QUEUE_CTRL_3__2 0x0918
-#define REG_PORT_MTI_LO_WATER_MARK 0x0918
-#define REG_PORT_MTI_QUEUE_CTRL_4__2 0x091A
#define REG_PORT_MTI_CREDIT_INCREMENT 0x091A
/* A - QM */
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 9b20c2ee6d62..729b36eeb2c4 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -6,6 +6,7 @@
*/
#include <linux/delay.h>
+#include <linux/dsa/ksz_common.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
@@ -22,13 +23,19 @@
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
#include <net/dsa.h>
+#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz_ptp.h"
#include "ksz8.h"
#include "ksz9477.h"
#include "lan937x.h"
+#define KSZ_CBS_ENABLE ((MTI_SCHEDULE_STRICT_PRIO << MTI_SCHEDULE_MODE_S) | \
+ (MTI_SHAPING_SRP << MTI_SHAPING_S))
+#define KSZ_CBS_DISABLE ((MTI_SCHEDULE_WRR << MTI_SCHEDULE_MODE_S) |\
+ (MTI_SHAPING_OFF << MTI_SHAPING_S))
#define MIB_COUNTER_NUM 0x20
struct ksz_stats_raw {
@@ -248,6 +255,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.change_mtu = ksz9477_change_mtu,
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = ksz9477_config_cpu_port,
+ .tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = ksz9477_reset_switch,
.init = ksz9477_switch_init,
@@ -284,6 +292,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.change_mtu = lan937x_change_mtu,
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
+ .tc_cbs_set_cinc = lan937x_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = lan937x_reset_switch,
.init = lan937x_switch_init,
@@ -1078,6 +1087,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1104,6 +1115,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1142,6 +1154,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1166,6 +1179,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1190,6 +1204,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x4, /* can be configured as cpu port */
.port_cnt = 3,
+ .num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
@@ -1211,6 +1226,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.port_nirqs = 4,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -1243,6 +1260,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x3F, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
.port_nirqs = 2,
+ .num_tx_queues = 4,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -1275,6 +1293,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.port_nirqs = 2,
+ .num_tx_queues = 4,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -1305,6 +1324,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
.port_nirqs = 2,
+ .num_tx_queues = 4,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1330,6 +1350,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1355,6 +1377,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -1385,6 +1409,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1409,6 +1435,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1433,6 +1461,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1461,6 +1491,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x38, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1489,6 +1521,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1775,9 +1809,6 @@ static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
u16 val;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
if (ret < 0)
return ret;
@@ -1790,9 +1821,6 @@ static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
{
struct ksz_device *dev = bus->priv;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
return dev->dev_ops->w_phy(dev, addr, regnum, val);
}
@@ -2069,6 +2097,8 @@ static int ksz_setup(struct dsa_switch *ds)
dev->dev_ops->enable_stp_addr(dev);
+ ds->num_tx_queues = dev->info->num_tx_queues;
+
regmap_update_bits(dev->regmap[0], regs[S_MULTICAST_CTRL],
MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE);
@@ -2099,13 +2129,23 @@ static int ksz_setup(struct dsa_switch *ds)
ret = ksz_pirq_setup(dev, dp->index);
if (ret)
goto out_girq;
+
+ ret = ksz_ptp_irq_setup(ds, dp->index);
+ if (ret)
+ goto out_pirq;
}
}
+ ret = ksz_ptp_clock_register(ds);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret);
+ goto out_ptpirq;
+ }
+
ret = ksz_mdio_register(dev);
if (ret < 0) {
dev_err(dev->dev, "failed to register the mdio");
- goto out_pirq;
+ goto out_ptp_clock_unregister;
}
/* start switch */
@@ -2114,6 +2154,12 @@ static int ksz_setup(struct dsa_switch *ds)
return 0;
+out_ptp_clock_unregister:
+ ksz_ptp_clock_unregister(ds);
+out_ptpirq:
+ if (dev->irq > 0)
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_ptp_irq_free(ds, dp->index);
out_pirq:
if (dev->irq > 0)
dsa_switch_for_each_user_port(dp, dev->ds)
@@ -2130,9 +2176,14 @@ static void ksz_teardown(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
+ ksz_ptp_clock_unregister(ds);
+
if (dev->irq > 0) {
- dsa_switch_for_each_user_port(dp, dev->ds)
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ksz_ptp_irq_free(ds, dp->index);
+
ksz_irq_free(&dev->ports[dp->index].pirq);
+ }
ksz_irq_free(&dev->girq);
}
@@ -2517,6 +2568,17 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
return proto;
}
+static int ksz_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct ksz_tagger_data *tagger_data;
+
+ tagger_data = ksz_tagger_data(ds);
+ tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
+
+ return 0;
+}
+
static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
bool flag, struct netlink_ext_ack *extack)
{
@@ -2611,6 +2673,70 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
return -EOPNOTSUPP;
}
+static int ksz_validate_eee(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->info->internal_phy[port])
+ return -EOPNOTSUPP;
+
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ int ret;
+
+ ret = ksz_validate_eee(ds, port);
+ if (ret)
+ return ret;
+
+ /* There is no documented control of Tx LPI configuration. */
+ e->tx_lpi_enabled = true;
+
+ /* There is no documented control of Tx LPI timer. According to tests
+ * Tx LPI timer seems to be set by default to minimal value.
+ */
+ e->tx_lpi_timer = 0;
+
+ return 0;
+}
+
+static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ ret = ksz_validate_eee(ds, port);
+ if (ret)
+ return ret;
+
+ if (!e->tx_lpi_enabled) {
+ dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
+ return -EINVAL;
+ }
+
+ if (e->tx_lpi_timer) {
+ dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void ksz_set_xmii(struct ksz_device *dev, int port,
phy_interface_t interface)
{
@@ -2930,8 +3056,104 @@ static int ksz_switch_detect(struct ksz_device *dev)
return 0;
}
+/* Bandwidth is calculated by idle slope/transmission speed. Then the Bandwidth
+ * is converted to Hex-decimal using the successive multiplication method. On
+ * every step, integer part is taken and decimal part is carry forwarded.
+ */
+static int cinc_cal(s32 idle_slope, s32 send_slope, u32 *bw)
+{
+ u32 cinc = 0;
+ u32 txrate;
+ u32 rate;
+ u8 temp;
+ u8 i;
+
+ txrate = idle_slope - send_slope;
+
+ if (!txrate)
+ return -EINVAL;
+
+ rate = idle_slope;
+
+ /* 24 bit register */
+ for (i = 0; i < 6; i++) {
+ rate = rate * 16;
+
+ temp = rate / txrate;
+
+ rate %= txrate;
+
+ cinc = ((cinc << 4) | temp);
+ }
+
+ *bw = cinc;
+
+ return 0;
+}
+
+static int ksz_setup_tc_cbs(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+ u32 bw;
+
+ if (!dev->info->tc_cbs_supported)
+ return -EOPNOTSUPP;
+
+ if (qopt->queue > dev->info->num_tx_queues)
+ return -EINVAL;
+
+ /* Queue Selection */
+ ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue);
+ if (ret)
+ return ret;
+
+ if (!qopt->enable)
+ return ksz_pwrite8(dev, port, REG_PORT_MTI_QUEUE_CTRL_0,
+ KSZ_CBS_DISABLE);
+
+ /* High Credit */
+ ret = ksz_pwrite16(dev, port, REG_PORT_MTI_HI_WATER_MARK,
+ qopt->hicredit);
+ if (ret)
+ return ret;
+
+ /* Low Credit */
+ ret = ksz_pwrite16(dev, port, REG_PORT_MTI_LO_WATER_MARK,
+ qopt->locredit);
+ if (ret)
+ return ret;
+
+ /* Credit Increment Register */
+ ret = cinc_cal(qopt->idleslope, qopt->sendslope, &bw);
+ if (ret)
+ return ret;
+
+ if (dev->dev_ops->tc_cbs_set_cinc) {
+ ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw);
+ if (ret)
+ return ret;
+ }
+
+ return ksz_pwrite8(dev, port, REG_PORT_MTI_QUEUE_CTRL_0,
+ KSZ_CBS_ENABLE);
+}
+
+static int ksz_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return ksz_setup_tc_cbs(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
+ .connect_tag_protocol = ksz_connect_tag_protocol,
.get_phy_flags = ksz_get_phy_flags,
.setup = ksz_setup,
.teardown = ksz_teardown,
@@ -2966,6 +3188,14 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.get_pause_stats = ksz_get_pause_stats,
.port_change_mtu = ksz_change_mtu,
.port_max_mtu = ksz_max_mtu,
+ .get_ts_info = ksz_get_ts_info,
+ .port_hwtstamp_get = ksz_hwtstamp_get,
+ .port_hwtstamp_set = ksz_hwtstamp_set,
+ .port_txtstamp = ksz_port_txtstamp,
+ .port_rxtstamp = ksz_port_rxtstamp,
+ .port_setup_tc = ksz_setup_tc,
+ .get_mac_eee = ksz_get_mac_eee,
+ .set_mac_eee = ksz_set_mac_eee,
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 055d61ff3fb8..d2d5761d58e9 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -15,9 +15,12 @@
#include <net/dsa.h>
#include <linux/irq.h>
+#include "ksz_ptp.h"
+
#define KSZ_MAX_NUM_PORTS 8
struct ksz_device;
+struct ksz_port;
struct vlan_table {
u32 table[3];
@@ -46,6 +49,8 @@ struct ksz_chip_data {
int cpu_ports;
int port_cnt;
u8 port_nirqs;
+ u8 num_tx_queues;
+ bool tc_cbs_supported;
const struct ksz_dev_ops *ops;
bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
@@ -81,6 +86,14 @@ struct ksz_irq {
struct ksz_device *dev;
};
+struct ksz_ptp_irq {
+ struct ksz_port *port;
+ u16 ts_reg;
+ bool ts_en;
+ char name[16];
+ int num;
+};
+
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
bool learning;
@@ -100,6 +113,15 @@ struct ksz_port {
struct ksz_device *ksz_dev;
struct ksz_irq pirq;
u8 num;
+#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
+ struct hwtstamp_config tstamp_config;
+ bool hwts_tx_en;
+ bool hwts_rx_en;
+ struct ksz_irq ptpirq;
+ struct ksz_ptp_irq ptpmsg_irq[3];
+ ktime_t tstamp_msg;
+ struct completion tstamp_msg_comp;
+#endif
};
struct ksz_device {
@@ -140,6 +162,7 @@ struct ksz_device {
u16 port_mask;
struct mutex lock_irq; /* IRQ Access */
struct ksz_irq girq;
+ struct ksz_ptp_data ptp_data;
};
/* List of supported models */
@@ -332,6 +355,7 @@ struct ksz_dev_ops {
struct phy_device *phydev, int speed,
int duplex, bool tx_pause, bool rx_pause);
void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
+ int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val);
void (*config_cpu_port)(struct dsa_switch *ds);
int (*enable_stp_addr)(struct ksz_device *dev);
int (*reset)(struct ksz_device *dev);
@@ -443,6 +467,32 @@ static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
return ret;
}
+static inline int ksz_rmw16(struct ksz_device *dev, u32 reg, u16 mask,
+ u16 value)
+{
+ int ret;
+
+ ret = regmap_update_bits(dev->regmap[1], reg, mask, value);
+ if (ret)
+ dev_err(dev->dev, "can't rmw 16bit reg 0x%x: %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static inline int ksz_rmw32(struct ksz_device *dev, u32 reg, u32 mask,
+ u32 value)
+{
+ int ret;
+
+ ret = regmap_update_bits(dev->regmap[2], reg, mask, value);
+ if (ret)
+ dev_err(dev->dev, "can't rmw 32bit reg 0x%x: %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
{
u32 val[2];
@@ -591,6 +641,7 @@ static inline int is_lan937x(struct ksz_device *dev)
#define REG_PORT_INT_MASK 0x001F
#define PORT_SRC_PHY_INT 1
+#define PORT_SRC_PTP_INT 2
#define KSZ8795_HUGE_PACKET_SIZE 2000
#define KSZ8863_HUGE_PACKET_SIZE 1916
@@ -598,6 +649,24 @@ static inline int is_lan937x(struct ksz_device *dev)
#define KSZ8_LEGAL_PACKET_SIZE 1518
#define KSZ9477_MAX_FRAME_SIZE 9000
+/* CBS related registers */
+#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
+
+#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
+
+#define MTI_SCHEDULE_MODE_M 0x3
+#define MTI_SCHEDULE_MODE_S 6
+#define MTI_SCHEDULE_STRICT_PRIO 0
+#define MTI_SCHEDULE_WRR 2
+#define MTI_SHAPING_M 0x3
+#define MTI_SHAPING_S 4
+#define MTI_SHAPING_OFF 0
+#define MTI_SHAPING_SRP 1
+#define MTI_SHAPING_TIME_AWARE 2
+
+#define REG_PORT_MTI_HI_WATER_MARK 0x0916
+#define REG_PORT_MTI_LO_WATER_MARK 0x0918
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
new file mode 100644
index 000000000000..4e22a695a64c
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -0,0 +1,1201 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip KSZ PTP Implementation
+ *
+ * Copyright (C) 2020 ARRI Lighting
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#include <linux/dsa/ksz_common.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "ksz_common.h"
+#include "ksz_ptp.h"
+#include "ksz_ptp_reg.h"
+
+#define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
+#define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
+#define work_to_xmit_work(w) \
+ container_of((w), struct ksz_deferred_xmit_work, work)
+
+/* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
+ * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
+ */
+#define KSZ_MAX_DRIFT_CORR 6249999
+#define KSZ_MAX_PULSE_WIDTH 125000000LL
+
+#define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */
+#define KSZ_PTP_SUBNS_BITS 32
+
+#define KSZ_PTP_INT_START 13
+
+static int ksz_ptp_tou_gpio(struct ksz_device *dev)
+{
+ int ret;
+
+ if (!is_lan937x(dev))
+ return 0;
+
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
+ GPIO_OUT);
+ if (ret)
+ return ret;
+
+ ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
+ LED_OVR_1 | LED_OVR_2);
+ if (ret)
+ return ret;
+
+ return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
+ LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
+ LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
+}
+
+static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
+{
+ u32 data;
+ int ret;
+
+ /* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
+
+ data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
+ ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
+ if (ret)
+ return ret;
+
+ data = FIELD_PREP(TRIG_INT_M, BIT(unit));
+ ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
+ if (ret)
+ return ret;
+
+ /* Clear reset and set GPIO direction */
+ return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
+ 0);
+}
+
+static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
+{
+ u32 data;
+
+ if (pulse_ns & 0x3)
+ return -EINVAL;
+
+ data = (pulse_ns / 8);
+ if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
+ return -ERANGE;
+
+ return 0;
+}
+
+static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
+ struct timespec64 const *ts)
+{
+ int ret;
+
+ /* Hardware has only 32 bit */
+ if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
+ return -EINVAL;
+
+ ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
+ if (ret)
+ return ret;
+
+ ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
+{
+ u32 data;
+ int ret;
+
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
+ if (ret)
+ return ret;
+
+ /* Check error flag:
+ * - the ACTIVE flag is NOT cleared an error!
+ */
+ ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
+ dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
+ unit);
+ ret = -EIO;
+ /* Unit will be reset on next access */
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ksz_ptp_configure_perout(struct ksz_device *dev,
+ u32 cycle_width_ns, u32 pulse_width_ns,
+ struct timespec64 const *target_time,
+ u8 index)
+{
+ u32 data;
+ int ret;
+
+ data = FIELD_PREP(TRIG_NOTIFY, 1) |
+ FIELD_PREP(TRIG_GPO_M, index) |
+ FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
+ ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
+ if (ret)
+ return ret;
+
+ ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
+ if (ret)
+ return ret;
+
+ /* Set cycle count 0 - Infinite */
+ ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
+ if (ret)
+ return ret;
+
+ data = (pulse_width_ns / 8);
+ ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_target_time_set(dev, target_time);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ksz_ptp_enable_perout(struct ksz_device *dev,
+ struct ptp_perout_request const *request,
+ int on)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ u64 req_pulse_width_ns;
+ u64 cycle_width_ns;
+ u64 pulse_width_ns;
+ int pin = 0;
+ u32 data32;
+ int ret;
+
+ if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
+ return -EOPNOTSUPP;
+
+ if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
+ ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
+ return -EBUSY;
+
+ pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
+ if (pin < 0)
+ return -EINVAL;
+
+ data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
+ FIELD_PREP(PTP_TOU_INDEX, request->index);
+ ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
+ PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_reset(dev, request->index);
+ if (ret)
+ return ret;
+
+ if (!on) {
+ ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
+ return 0;
+ }
+
+ ptp_data->perout_target_time_first.tv_sec = request->start.sec;
+ ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
+
+ ptp_data->perout_period.tv_sec = request->period.sec;
+ ptp_data->perout_period.tv_nsec = request->period.nsec;
+
+ cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
+ if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
+ return -EINVAL;
+
+ if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ pulse_width_ns = request->on.sec * NSEC_PER_SEC +
+ request->on.nsec;
+ } else {
+ /* Use a duty cycle of 50%. Maximum pulse width supported by the
+ * hardware is a little bit more than 125 ms.
+ */
+ req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
+ request->period.nsec) / 2;
+ pulse_width_ns = min_t(u64, req_pulse_width_ns,
+ KSZ_MAX_PULSE_WIDTH);
+ }
+
+ ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
+ &ptp_data->perout_target_time_first,
+ pin);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_gpio(dev);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_start(dev, request->index);
+ if (ret)
+ return ret;
+
+ ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
+
+ return 0;
+}
+
+static int ksz_ptp_enable_mode(struct ksz_device *dev)
+{
+ struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ struct ksz_port *prt;
+ struct dsa_port *dp;
+ bool tag_en = false;
+ int ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ prt = &dev->ports[dp->index];
+ if (prt->hwts_tx_en || prt->hwts_rx_en) {
+ tag_en = true;
+ break;
+ }
+ }
+
+ if (tag_en) {
+ ret = ptp_schedule_worker(ptp_data->clock, 0);
+ if (ret)
+ return ret;
+ } else {
+ ptp_cancel_worker_sync(ptp_data->clock);
+ }
+
+ tagger_data->hwtstamp_set_state(dev->ds, tag_en);
+
+ return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
+ tag_en ? PTP_ENABLE : 0);
+}
+
+/* The function is return back the capability of timestamping feature when
+ * requested through ethtool -T <interface> utility
+ */
+int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+
+ ptp_data = &dev->ptp_data;
+
+ if (!ptp_data->clock)
+ return -ENODEV;
+
+ ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
+
+ if (is_lan937x(dev))
+ ts->tx_types |= BIT(HWTSTAMP_TX_ON);
+
+ ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ ts->phc_index = ptp_clock_index(ptp_data->clock);
+
+ return 0;
+}
+
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct ksz_device *dev = ds->priv;
+ struct hwtstamp_config *config;
+ struct ksz_port *prt;
+
+ prt = &dev->ports[port];
+ config = &prt->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+static int ksz_set_hwtstamp_config(struct ksz_device *dev,
+ struct ksz_port *prt,
+ struct hwtstamp_config *config)
+{
+ int ret;
+
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
+ prt->hwts_tx_en = false;
+ break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
+ prt->hwts_tx_en = true;
+
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
+ if (ret)
+ return ret;
+
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!is_lan937x(dev))
+ return -ERANGE;
+
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
+ prt->hwts_tx_en = true;
+
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ prt->hwts_rx_en = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ return ksz_ptp_enable_mode(dev);
+}
+
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct ksz_device *dev = ds->priv;
+ struct hwtstamp_config config;
+ struct ksz_port *prt;
+ int ret;
+
+ prt = &dev->ports[port];
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ret = ksz_set_hwtstamp_config(dev, prt, &config);
+ if (ret)
+ return ret;
+
+ memcpy(&prt->tstamp_config, &config, sizeof(config));
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
+{
+ struct timespec64 ptp_clock_time;
+ struct ksz_ptp_data *ptp_data;
+ struct timespec64 diff;
+ struct timespec64 ts;
+
+ ptp_data = &dev->ptp_data;
+ ts = ktime_to_timespec64(tstamp);
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_clock_time = ptp_data->clock_time;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+ /* calculate full time from partial time stamp */
+ ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
+
+ /* find nearest possible point in time */
+ diff = timespec64_sub(ts, ptp_clock_time);
+ if (diff.tv_sec > 2)
+ ts.tv_sec -= 4;
+ else if (diff.tv_sec < -2)
+ ts.tv_sec += 4;
+
+ return timespec64_to_ktime(ts);
+}
+
+bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
+ unsigned int type)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct ksz_device *dev = ds->priv;
+ struct ptp_header *ptp_hdr;
+ struct ksz_port *prt;
+ u8 ptp_msg_type;
+ ktime_t tstamp;
+ s64 correction;
+
+ prt = &dev->ports[port];
+
+ tstamp = KSZ_SKB_CB(skb)->tstamp;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
+
+ if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
+ goto out;
+
+ ptp_hdr = ptp_parse_header(skb, type);
+ if (!ptp_hdr)
+ goto out;
+
+ ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
+ if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
+ goto out;
+
+ /* Only subtract the partial time stamp from the correction field. When
+ * the hardware adds the egress time stamp to the correction field of
+ * the PDelay_Resp message on tx, also only the partial time stamp will
+ * be added.
+ */
+ correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
+ correction -= ktime_to_ns(tstamp) << 16;
+
+ ptp_header_update_correction(skb, type, ptp_hdr, correction);
+
+out:
+ return false;
+}
+
+void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ptp_header *hdr;
+ struct sk_buff *clone;
+ struct ksz_port *prt;
+ unsigned int type;
+ u8 ptp_msg_type;
+
+ prt = &dev->ports[port];
+
+ if (!prt->hwts_tx_en)
+ return;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return;
+
+ ptp_msg_type = ptp_get_msgtype(hdr, type);
+
+ switch (ptp_msg_type) {
+ case PTP_MSGTYPE_SYNC:
+ if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
+ return;
+ break;
+ case PTP_MSGTYPE_PDELAY_REQ:
+ break;
+ case PTP_MSGTYPE_PDELAY_RESP:
+ if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
+ KSZ_SKB_CB(skb)->ptp_type = type;
+ KSZ_SKB_CB(skb)->update_correction = true;
+ return;
+ }
+ break;
+
+ default:
+ return;
+ }
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ /* caching the value to be used in tag_ksz.c */
+ KSZ_SKB_CB(skb)->clone = clone;
+}
+
+static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
+ struct ksz_port *prt, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps hwtstamps = {};
+ int ret;
+
+ /* timeout must include DSA master to transmit data, tstamp latency,
+ * IRQ latency and time for reading the time stamp.
+ */
+ ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
+ msecs_to_jiffies(100));
+ if (!ret)
+ return;
+
+ hwtstamps.hwtstamp = prt->tstamp_msg;
+ skb_complete_tx_timestamp(skb, &hwtstamps);
+}
+
+void ksz_port_deferred_xmit(struct kthread_work *work)
+{
+ struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct sk_buff *clone, *skb = xmit_work->skb;
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *prt;
+
+ prt = &dev->ports[xmit_work->dp->index];
+
+ clone = KSZ_SKB_CB(skb)->clone;
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ reinit_completion(&prt->tstamp_msg_comp);
+
+ dsa_enqueue_skb(skb, skb->dev);
+
+ ksz_ptp_txtstamp_skb(dev, prt, clone);
+
+ kfree(xmit_work);
+}
+
+static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
+{
+ u32 nanoseconds;
+ u32 seconds;
+ u8 phase;
+ int ret;
+
+ /* Copy current PTP clock into shadow registers and read */
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
+ if (ret)
+ return ret;
+
+ ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
+ if (ret)
+ return ret;
+
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nanoseconds + phase * 8;
+
+ return 0;
+}
+
+static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+ ret = _ksz_ptp_gettime(dev, ts);
+ mutex_unlock(&ptp_data->lock);
+
+ return ret;
+}
+
+static int ksz_ptp_restart_perout(struct ksz_device *dev)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ s64 now_ns, first_ns, period_ns, next_ns;
+ struct ptp_perout_request request;
+ struct timespec64 next;
+ struct timespec64 now;
+ unsigned int count;
+ int ret;
+
+ dev_info(dev->dev, "Restarting periodic output signal\n");
+
+ ret = _ksz_ptp_gettime(dev, &now);
+ if (ret)
+ return ret;
+
+ now_ns = timespec64_to_ns(&now);
+ first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
+
+ /* Calculate next perout event based on start time and period */
+ period_ns = timespec64_to_ns(&ptp_data->perout_period);
+
+ if (first_ns < now_ns) {
+ count = div_u64(now_ns - first_ns, period_ns);
+ next_ns = first_ns + count * period_ns;
+ } else {
+ next_ns = first_ns;
+ }
+
+ /* Ensure 100 ms guard time prior next event */
+ while (next_ns < now_ns + 100000000)
+ next_ns += period_ns;
+
+ /* Restart periodic output signal */
+ next = ns_to_timespec64(next_ns);
+ request.start.sec = next.tv_sec;
+ request.start.nsec = next.tv_nsec;
+ request.period.sec = ptp_data->perout_period.tv_sec;
+ request.period.nsec = ptp_data->perout_period.tv_nsec;
+ request.index = 0;
+ request.flags = 0;
+
+ return ksz_ptp_enable_perout(dev, &request, 1);
+}
+
+static int ksz_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ /* Write to shadow registers and Load PTP clock */
+ ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
+ if (ret)
+ goto unlock;
+
+ switch (ptp_data->tou_mode) {
+ case KSZ_PTP_TOU_IDLE:
+ break;
+
+ case KSZ_PTP_TOU_PEROUT:
+ ret = ksz_ptp_restart_perout(dev);
+ if (ret)
+ goto unlock;
+
+ break;
+ }
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = *ts;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+
+ return ret;
+}
+
+static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ u64 base, adj;
+ bool negative;
+ u32 data32;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ if (scaled_ppm) {
+ base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
+ negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
+
+ data32 = (u32)adj;
+ data32 &= PTP_SUBNANOSEC_M;
+ if (!negative)
+ data32 |= PTP_RATE_DIR;
+
+ ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
+ PTP_CLK_ADJ_ENABLE);
+ if (ret)
+ goto unlock;
+ } else {
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
+ if (ret)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+ return ret;
+}
+
+static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ struct timespec64 delta64 = ns_to_timespec64(delta);
+ s32 sec, nsec;
+ u16 data16;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ /* do not use ns_to_timespec64(),
+ * both sec and nsec are subtracted by hw
+ */
+ sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
+
+ ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
+ if (ret)
+ goto unlock;
+
+ ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
+ if (ret)
+ goto unlock;
+
+ data16 |= PTP_STEP_ADJ;
+
+ /* PTP_STEP_DIR -- 0: subtract, 1: add */
+ if (delta < 0)
+ data16 &= ~PTP_STEP_DIR;
+ else
+ data16 |= PTP_STEP_DIR;
+
+ ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
+ if (ret)
+ goto unlock;
+
+ switch (ptp_data->tou_mode) {
+ case KSZ_PTP_TOU_IDLE:
+ break;
+
+ case KSZ_PTP_TOU_PEROUT:
+ ret = ksz_ptp_restart_perout(dev);
+ if (ret)
+ goto unlock;
+
+ break;
+ }
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+ return ret;
+}
+
+static int ksz_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ switch (req->type) {
+ case PTP_CLK_REQ_PEROUT:
+ mutex_lock(&ptp_data->lock);
+ ret = ksz_ptp_enable_perout(dev, &req->perout, on);
+ mutex_unlock(&ptp_data->lock);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ int ret = 0;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Function is pointer to the do_aux_work in the ptp_clock capability */
+static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ struct timespec64 ts;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+ ret = _ksz_ptp_gettime(dev, &ts);
+ if (ret)
+ goto out;
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = ts;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return HZ; /* reschedule in 1 second */
+}
+
+static int ksz_ptp_start_clock(struct ksz_device *dev)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ int ret;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
+ if (ret)
+ return ret;
+
+ ptp_data->clock_time.tv_sec = 0;
+ ptp_data->clock_time.tv_nsec = 0;
+
+ return 0;
+}
+
+int ksz_ptp_clock_register(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+ int ret;
+ u8 i;
+
+ ptp_data = &dev->ptp_data;
+ mutex_init(&ptp_data->lock);
+ spin_lock_init(&ptp_data->clock_lock);
+
+ ptp_data->caps.owner = THIS_MODULE;
+ snprintf(ptp_data->caps.name, 16, "Microchip Clock");
+ ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR;
+ ptp_data->caps.gettime64 = ksz_ptp_gettime;
+ ptp_data->caps.settime64 = ksz_ptp_settime;
+ ptp_data->caps.adjfine = ksz_ptp_adjfine;
+ ptp_data->caps.adjtime = ksz_ptp_adjtime;
+ ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work;
+ ptp_data->caps.enable = ksz_ptp_enable;
+ ptp_data->caps.verify = ksz_ptp_verify_pin;
+ ptp_data->caps.n_pins = KSZ_PTP_N_GPIO;
+ ptp_data->caps.n_per_out = 3;
+
+ ret = ksz_ptp_start_clock(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
+ struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
+
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
+
+ ptp_data->caps.pin_config = ptp_data->pin_config;
+
+ /* Currently only P2P mode is supported. When 802_1AS bit is set, it
+ * forwards all PTP packets to host port and none to other ports.
+ */
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
+ PTP_TC_P2P | PTP_802_1AS);
+ if (ret)
+ return ret;
+
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
+
+ return 0;
+}
+
+void ksz_ptp_clock_unregister(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+
+ ptp_data = &dev->ptp_data;
+
+ if (ptp_data->clock)
+ ptp_clock_unregister(ptp_data->clock);
+}
+
+static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_ptp_irq *ptpmsg_irq = dev_id;
+ struct ksz_device *dev;
+ struct ksz_port *port;
+ u32 tstamp_raw;
+ ktime_t tstamp;
+ int ret;
+
+ port = ptpmsg_irq->port;
+ dev = port->ksz_dev;
+
+ if (ptpmsg_irq->ts_en) {
+ ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
+ if (ret)
+ return IRQ_NONE;
+
+ tstamp = ksz_decode_tstamp(tstamp_raw);
+
+ port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
+
+ complete(&port->tstamp_msg_comp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *ptpirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u16 data;
+ int ret;
+ u8 n;
+
+ dev = ptpirq->dev;
+
+ ret = ksz_read16(dev, ptpirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ /* Clear the interrupts W1C */
+ ret = ksz_write16(dev, ptpirq->reg_status, data);
+ if (ret)
+ return IRQ_NONE;
+
+ for (n = 0; n < ptpirq->nirqs; ++n) {
+ if (data & BIT(n + KSZ_PTP_INT_START)) {
+ sub_irq = irq_find_mapping(ptpirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void ksz_ptp_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
+}
+
+static void ksz_ptp_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
+}
+
+static void ksz_ptp_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_ptp_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_ptp_irq_mask,
+ .irq_unmask = ksz_ptp_irq_unmask,
+ .irq_bus_lock = ksz_ptp_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock,
+};
+
+static int ksz_ptp_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
+ .map = ksz_ptp_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
+{
+ struct ksz_ptp_irq *ptpmsg_irq;
+
+ ptpmsg_irq = &port->ptpmsg_irq[n];
+
+ free_irq(ptpmsg_irq->num, ptpmsg_irq);
+ irq_dispose_mapping(ptpmsg_irq->num);
+}
+
+static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
+{
+ u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
+ REG_PTP_PORT_SYNC_TS};
+ static const char * const name[] = {"pdresp-msg", "xdreq-msg",
+ "sync-msg"};
+ const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
+ struct ksz_ptp_irq *ptpmsg_irq;
+
+ ptpmsg_irq = &port->ptpmsg_irq[n];
+
+ ptpmsg_irq->port = port;
+ ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
+
+ snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]);
+
+ ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
+ if (ptpmsg_irq->num < 0)
+ return ptpmsg_irq->num;
+
+ return request_threaded_irq(ptpmsg_irq->num, NULL,
+ ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
+ ptpmsg_irq->name, ptpmsg_irq);
+}
+
+int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
+{
+ struct ksz_device *dev = ds->priv;
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ struct ksz_port *port = &dev->ports[p];
+ struct ksz_irq *ptpirq = &port->ptpirq;
+ int irq;
+ int ret;
+
+ ptpirq->dev = dev;
+ ptpirq->masked = 0;
+ ptpirq->nirqs = 3;
+ ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
+ ptpirq->reg_status = ops->get_port_addr(p,
+ REG_PTP_PORT_TX_INT_STATUS__2);
+ snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
+
+ init_completion(&port->tstamp_msg_comp);
+
+ ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
+ &ksz_ptp_irq_domain_ops, ptpirq);
+ if (!ptpirq->domain)
+ return -ENOMEM;
+
+ for (irq = 0; irq < ptpirq->nirqs; irq++)
+ irq_create_mapping(ptpirq->domain, irq);
+
+ ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
+ if (ptpirq->irq_num < 0) {
+ ret = ptpirq->irq_num;
+ goto out;
+ }
+
+ ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
+ IRQF_ONESHOT, ptpirq->name, ptpirq);
+ if (ret)
+ goto out;
+
+ for (irq = 0; irq < ptpirq->nirqs; irq++) {
+ ret = ksz_ptp_msg_irq_setup(port, irq);
+ if (ret)
+ goto out_ptp_msg;
+ }
+
+ return 0;
+
+out_ptp_msg:
+ free_irq(ptpirq->irq_num, ptpirq);
+ while (irq--)
+ free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
+out:
+ for (irq = 0; irq < ptpirq->nirqs; irq++)
+ irq_dispose_mapping(port->ptpmsg_irq[irq].num);
+
+ irq_domain_remove(ptpirq->domain);
+
+ return ret;
+}
+
+void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *port = &dev->ports[p];
+ struct ksz_irq *ptpirq = &port->ptpirq;
+ u8 n;
+
+ for (n = 0; n < ptpirq->nirqs; n++)
+ ksz_ptp_msg_irq_free(port, n);
+
+ free_irq(ptpirq->irq_num, ptpirq);
+ irq_dispose_mapping(ptpirq->irq_num);
+
+ irq_domain_remove(ptpirq->domain);
+}
+
+MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("PTP support for KSZ switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
new file mode 100644
index 000000000000..0ca8ca4f804e
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip KSZ PTP Implementation
+ *
+ * Copyright (C) 2020 ARRI Lighting
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_DRIVERS_KSZ_PTP_H
+#define _NET_DSA_DRIVERS_KSZ_PTP_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
+
+#include <linux/ptp_clock_kernel.h>
+
+#define KSZ_PTP_N_GPIO 2
+
+enum ksz_ptp_tou_mode {
+ KSZ_PTP_TOU_IDLE,
+ KSZ_PTP_TOU_PEROUT,
+};
+
+struct ksz_ptp_data {
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct ptp_pin_desc pin_config[KSZ_PTP_N_GPIO];
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+ /* lock for accessing the clock_time */
+ spinlock_t clock_lock;
+ struct timespec64 clock_time;
+ enum ksz_ptp_tou_mode tou_mode;
+ struct timespec64 perout_target_time_first; /* start of first pulse */
+ struct timespec64 perout_period;
+};
+
+int ksz_ptp_clock_register(struct dsa_switch *ds);
+
+void ksz_ptp_clock_unregister(struct dsa_switch *ds);
+
+int ksz_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *ts);
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void ksz_port_deferred_xmit(struct kthread_work *work);
+bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
+ unsigned int type);
+int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p);
+void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p);
+
+#else
+
+struct ksz_ptp_data {
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+};
+
+static inline int ksz_ptp_clock_register(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static inline void ksz_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
+{
+ return 0;
+}
+
+static inline void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p) {}
+
+#define ksz_get_ts_info NULL
+
+#define ksz_hwtstamp_get NULL
+
+#define ksz_hwtstamp_set NULL
+
+#define ksz_port_rxtstamp NULL
+
+#define ksz_port_txtstamp NULL
+
+#define ksz_port_deferred_xmit NULL
+
+#endif /* End of CONFIG_NET_DSA_MICROCHIP_KSZ_PTP */
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_ptp_reg.h b/drivers/net/dsa/microchip/ksz_ptp_reg.h
new file mode 100644
index 000000000000..d71e85510cda
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp_reg.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip KSZ PTP register definitions
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ_PTP_REGS_H
+#define __KSZ_PTP_REGS_H
+
+#define REG_SW_GLOBAL_LED_OVR__4 0x0120
+#define LED_OVR_2 BIT(1)
+#define LED_OVR_1 BIT(0)
+
+#define REG_SW_GLOBAL_LED_SRC__4 0x0128
+#define LED_SRC_PTP_GPIO_1 BIT(3)
+#define LED_SRC_PTP_GPIO_2 BIT(2)
+
+/* 5 - PTP Clock */
+#define REG_PTP_CLK_CTRL 0x0500
+
+#define PTP_STEP_ADJ BIT(6)
+#define PTP_STEP_DIR BIT(5)
+#define PTP_READ_TIME BIT(4)
+#define PTP_LOAD_TIME BIT(3)
+#define PTP_CLK_ADJ_ENABLE BIT(2)
+#define PTP_CLK_ENABLE BIT(1)
+#define PTP_CLK_RESET BIT(0)
+
+#define REG_PTP_RTC_SUB_NANOSEC__2 0x0502
+
+#define PTP_RTC_SUB_NANOSEC_M 0x0007
+#define PTP_RTC_0NS 0x00
+
+#define REG_PTP_RTC_NANOSEC 0x0504
+
+#define REG_PTP_RTC_SEC 0x0508
+
+#define REG_PTP_SUBNANOSEC_RATE 0x050C
+
+#define PTP_SUBNANOSEC_M 0x3FFFFFFF
+#define PTP_RATE_DIR BIT(31)
+#define PTP_TMP_RATE_ENABLE BIT(30)
+
+#define REG_PTP_SUBNANOSEC_RATE_L 0x050E
+
+#define REG_PTP_RATE_DURATION 0x0510
+#define REG_PTP_RATE_DURATION_H 0x0510
+#define REG_PTP_RATE_DURATION_L 0x0512
+
+#define REG_PTP_MSG_CONF1 0x0514
+
+#define PTP_802_1AS BIT(7)
+#define PTP_ENABLE BIT(6)
+#define PTP_ETH_ENABLE BIT(5)
+#define PTP_IPV4_UDP_ENABLE BIT(4)
+#define PTP_IPV6_UDP_ENABLE BIT(3)
+#define PTP_TC_P2P BIT(2)
+#define PTP_MASTER BIT(1)
+#define PTP_1STEP BIT(0)
+
+#define REG_PTP_UNIT_INDEX__4 0x0520
+
+#define PTP_GPIO_INDEX GENMASK(19, 16)
+#define PTP_TSI_INDEX BIT(8)
+#define PTP_TOU_INDEX GENMASK(1, 0)
+
+#define REG_PTP_TRIG_STATUS__4 0x0524
+
+#define TRIG_ERROR_M GENMASK(18, 16)
+#define TRIG_DONE_M GENMASK(2, 0)
+
+#define REG_PTP_INT_STATUS__4 0x0528
+
+#define TRIG_INT_M GENMASK(18, 16)
+#define TS_INT_M GENMASK(1, 0)
+
+#define REG_PTP_CTRL_STAT__4 0x052C
+
+#define GPIO_IN BIT(7)
+#define GPIO_OUT BIT(6)
+#define TS_INT_ENABLE BIT(5)
+#define TRIG_ACTIVE BIT(4)
+#define TRIG_ENABLE BIT(3)
+#define TRIG_RESET BIT(2)
+#define TS_ENABLE BIT(1)
+#define TS_RESET BIT(0)
+
+#define REG_TRIG_TARGET_NANOSEC 0x0530
+#define REG_TRIG_TARGET_SEC 0x0534
+
+#define REG_TRIG_CTRL__4 0x0538
+
+#define TRIG_CASCADE_ENABLE BIT(31)
+#define TRIG_CASCADE_TAIL BIT(30)
+#define TRIG_CASCADE_UPS_M GENMASK(29, 26)
+#define TRIG_NOW BIT(25)
+#define TRIG_NOTIFY BIT(24)
+#define TRIG_EDGE BIT(23)
+#define TRIG_PATTERN_M GENMASK(22, 20)
+#define TRIG_NEG_EDGE 0
+#define TRIG_POS_EDGE 1
+#define TRIG_NEG_PULSE 2
+#define TRIG_POS_PULSE 3
+#define TRIG_NEG_PERIOD 4
+#define TRIG_POS_PERIOD 5
+#define TRIG_REG_OUTPUT 6
+#define TRIG_GPO_M GENMASK(19, 16)
+#define TRIG_CASCADE_ITERATE_CNT_M GENMASK(15, 0)
+
+#define REG_TRIG_CYCLE_WIDTH 0x053C
+#define TRIG_CYCLE_WIDTH_M GENMASK(31, 0)
+
+#define REG_TRIG_CYCLE_CNT 0x0540
+
+#define TRIG_CYCLE_CNT_M GENMASK(31, 16)
+#define TRIG_BIT_PATTERN_M GENMASK(15, 0)
+
+#define REG_TRIG_ITERATE_TIME 0x0544
+
+#define REG_TRIG_PULSE_WIDTH__4 0x0548
+
+#define TRIG_PULSE_WIDTH_M GENMASK(23, 0)
+
+/* Port PTP Register */
+#define REG_PTP_PORT_RX_DELAY__2 0x0C00
+#define REG_PTP_PORT_TX_DELAY__2 0x0C02
+#define REG_PTP_PORT_ASYM_DELAY__2 0x0C04
+
+#define REG_PTP_PORT_XDELAY_TS 0x0C08
+#define REG_PTP_PORT_SYNC_TS 0x0C0C
+#define REG_PTP_PORT_PDRESP_TS 0x0C10
+
+#define REG_PTP_PORT_TX_INT_STATUS__2 0x0C14
+#define REG_PTP_PORT_TX_INT_ENABLE__2 0x0C16
+
+#define PTP_PORT_SYNC_INT BIT(15)
+#define PTP_PORT_XDELAY_REQ_INT BIT(14)
+#define PTP_PORT_PDELAY_RESP_INT BIT(13)
+#define KSZ_SYNC_MSG 2
+#define KSZ_XDREQ_MSG 1
+#define KSZ_PDRES_MSG 0
+
+#endif
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
index 8e9e66d6728d..3388d91dbc44 100644
--- a/drivers/net/dsa/microchip/lan937x.h
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -20,4 +20,5 @@ void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int lan937x_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val);
#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index 06d3d0308cba..399a3905e6ca 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -15,6 +15,7 @@
#include "lan937x_reg.h"
#include "ksz_common.h"
+#include "ksz9477.h"
#include "lan937x.h"
static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
@@ -180,6 +181,9 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_TAIL_TAG_ENABLE, true);
+ /* Enable the Port Queue split */
+ ksz9477_port_queue_split(dev, port);
+
/* set back pressure for half duplex */
lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
true);
@@ -336,6 +340,11 @@ void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
}
}
+int lan937x_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
+{
+ return ksz_pwrite32(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
+}
+
int lan937x_switch_init(struct ksz_device *dev)
{
dev->port_mask = (1 << dev->info->port_cnt) - 1;
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index 5bc16a4c4441..45b606b6429f 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -185,6 +185,9 @@
#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+/* 9 - Shaping */
+#define REG_PORT_MTI_CREDIT_INCREMENT 0x091C
+
/* The port number as per the datasheet */
#define RGMII_2_PORT_NUM 5
#define RGMII_1_PORT_NUM 6
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 338f238f2043..3a15015bc409 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -608,17 +608,29 @@ mt7530_mib_reset(struct dsa_switch *ds)
mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
}
-static int mt7530_phy_read(struct mt7530_priv *priv, int port, int regnum)
+static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum)
{
return mdiobus_read_nested(priv->bus, port, regnum);
}
-static int mt7530_phy_write(struct mt7530_priv *priv, int port, int regnum,
- u16 val)
+static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum,
+ u16 val)
{
return mdiobus_write_nested(priv->bus, port, regnum, val);
}
+static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port,
+ int devad, int regnum)
+{
+ return mdiobus_c45_read_nested(priv->bus, port, devad, regnum);
+}
+
+static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u16 val)
+{
+ return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val);
+}
+
static int
mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
int regnum)
@@ -670,7 +682,7 @@ out:
static int
mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
- int regnum, u32 data)
+ int regnum, u16 data)
{
struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
@@ -793,55 +805,36 @@ out:
}
static int
-mt7531_ind_phy_read(struct mt7530_priv *priv, int port, int regnum)
+mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum)
{
- int devad;
- int ret;
-
- if (regnum & MII_ADDR_C45) {
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- ret = mt7531_ind_c45_phy_read(priv, port, devad,
- regnum & MII_REGADDR_C45_MASK);
- } else {
- ret = mt7531_ind_c22_phy_read(priv, port, regnum);
- }
+ struct mt7530_priv *priv = bus->priv;
- return ret;
+ return priv->info->phy_read_c22(priv, port, regnum);
}
static int
-mt7531_ind_phy_write(struct mt7530_priv *priv, int port, int regnum,
- u16 data)
+mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum)
{
- int devad;
- int ret;
-
- if (regnum & MII_ADDR_C45) {
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- ret = mt7531_ind_c45_phy_write(priv, port, devad,
- regnum & MII_REGADDR_C45_MASK,
- data);
- } else {
- ret = mt7531_ind_c22_phy_write(priv, port, regnum, data);
- }
+ struct mt7530_priv *priv = bus->priv;
- return ret;
+ return priv->info->phy_read_c45(priv, port, devad, regnum);
}
static int
-mt753x_phy_read(struct mii_bus *bus, int port, int regnum)
+mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val)
{
struct mt7530_priv *priv = bus->priv;
- return priv->info->phy_read(priv, port, regnum);
+ return priv->info->phy_write_c22(priv, port, regnum, val);
}
static int
-mt753x_phy_write(struct mii_bus *bus, int port, int regnum, u16 val)
+mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum,
+ u16 val)
{
struct mt7530_priv *priv = bus->priv;
- return priv->info->phy_write(priv, port, regnum, val);
+ return priv->info->phy_write_c45(priv, port, devad, regnum, val);
}
static void
@@ -2098,8 +2091,10 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
- bus->read = mt753x_phy_read;
- bus->write = mt753x_phy_write;
+ bus->read = mt753x_phy_read_c22;
+ bus->write = mt753x_phy_write_c22;
+ bus->read_c45 = mt753x_phy_read_c45;
+ bus->write_c45 = mt753x_phy_write_c45;
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
@@ -3194,8 +3189,10 @@ static const struct mt753x_info mt753x_table[] = {
.id = ID_MT7621,
.pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
+ .phy_read_c22 = mt7530_phy_read_c22,
+ .phy_write_c22 = mt7530_phy_write_c22,
+ .phy_read_c45 = mt7530_phy_read_c45,
+ .phy_write_c45 = mt7530_phy_write_c45,
.pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
@@ -3204,8 +3201,10 @@ static const struct mt753x_info mt753x_table[] = {
.id = ID_MT7530,
.pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
+ .phy_read_c22 = mt7530_phy_read_c22,
+ .phy_write_c22 = mt7530_phy_write_c22,
+ .phy_read_c45 = mt7530_phy_read_c45,
+ .phy_write_c45 = mt7530_phy_write_c45,
.pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
@@ -3214,8 +3213,10 @@ static const struct mt753x_info mt753x_table[] = {
.id = ID_MT7531,
.pcs_ops = &mt7531_pcs_ops,
.sw_setup = mt7531_setup,
- .phy_read = mt7531_ind_phy_read,
- .phy_write = mt7531_ind_phy_write,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
.pad_setup = mt7531_pad_setup,
.cpu_port_config = mt7531_cpu_port_config,
.mac_port_get_caps = mt7531_mac_port_get_caps,
@@ -3275,7 +3276,7 @@ mt7530_probe(struct mdio_device *mdiodev)
* properly.
*/
if (!priv->info->sw_setup || !priv->info->pad_setup ||
- !priv->info->phy_read || !priv->info->phy_write ||
+ !priv->info->phy_read_c22 || !priv->info->phy_write_c22 ||
!priv->info->mac_port_get_caps ||
!priv->info->mac_port_config)
return -EINVAL;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index e8d966435350..6b2fc6290ea8 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -750,8 +750,10 @@ struct mt753x_pcs {
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
* @sw_setup: Holding the handler to a device initialization
- * @phy_read: Holding the way reading PHY port
- * @phy_write: Holding the way writing PHY port
+ * @phy_read_c22: Holding the way reading PHY port using C22
+ * @phy_write_c22: Holding the way writing PHY port using C22
+ * @phy_read_c45: Holding the way reading PHY port using C45
+ * @phy_write_c45: Holding the way writing PHY port using C45
* @pad_setup: Holding the way setting up the bus pad for a certain
* MAC port
* @phy_mode_supported: Check if the PHY type is being supported on a certain
@@ -767,8 +769,13 @@ struct mt753x_info {
const struct phylink_pcs_ops *pcs_ops;
int (*sw_setup)(struct dsa_switch *ds);
- int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
- int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
+ int (*phy_read_c22)(struct mt7530_priv *priv, int port, int regnum);
+ int (*phy_write_c22)(struct mt7530_priv *priv, int port, int regnum,
+ u16 val);
+ int (*phy_read_c45)(struct mt7530_priv *priv, int port, int devad,
+ int regnum);
+ int (*phy_write_c45)(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u16 val);
int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
int (*cpu_port_config)(struct dsa_switch *ds, int port);
void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 49bf358b9c4f..1409e691ab77 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -15,6 +15,7 @@ mv88e6xxx-objs += port_hidden.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
mv88e6xxx-objs += serdes.o
mv88e6xxx-objs += smi.o
+mv88e6xxx-objs += switchdev.o
mv88e6xxx-objs += trace.o
# for tracing framework to find trace.h
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 242b8b325504..0a5d6c7bb128 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1728,11 +1728,11 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
return err;
}
-static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
- int (*cb)(struct mv88e6xxx_chip *chip,
- const struct mv88e6xxx_vtu_entry *entry,
- void *priv),
- void *priv)
+int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv)
{
struct mv88e6xxx_vtu_entry entry = {
.vid = mv88e6xxx_max_vid(chip),
@@ -3884,6 +3884,24 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
return err ? err : val;
}
+static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
+ int reg)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 val;
+ int err;
+
+ if (!chip->info->ops->phy_read_c45)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err ? err : val;
+}
+
static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
@@ -3900,6 +3918,23 @@ static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return err;
}
+static int mv88e6xxx_mdio_write_c45(struct mii_bus *bus, int phy, int devad,
+ int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ int err;
+
+ if (!chip->info->ops->phy_write_c45)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_write_c45(chip, bus, phy, devad, reg, val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
struct device_node *np,
bool external)
@@ -3938,6 +3973,8 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
bus->read = mv88e6xxx_mdio_read;
bus->write = mv88e6xxx_mdio_write;
+ bus->read_c45 = mv88e6xxx_mdio_read_c45;
+ bus->write_c45 = mv88e6xxx_mdio_write_c45;
bus->parent = chip->dev;
if (!external) {
@@ -4149,8 +4186,10 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
@@ -4198,8 +4237,10 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
@@ -4279,8 +4320,10 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4343,8 +4386,10 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
@@ -4426,8 +4471,10 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4472,8 +4519,10 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4527,8 +4576,10 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4573,8 +4624,10 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4673,8 +4726,10 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4736,8 +4791,10 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4799,8 +4856,10 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4862,8 +4921,10 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4925,8 +4986,10 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4964,8 +5027,10 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -5017,7 +5082,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
+ .ptp_ops = &mv88e6390_ptp_ops,
.phylink_get_caps = mv88e6390_phylink_get_caps,
};
@@ -5029,8 +5094,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
@@ -5074,8 +5141,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
@@ -5117,8 +5186,10 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -5183,8 +5254,10 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -5227,8 +5300,10 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -5275,8 +5350,10 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -5340,8 +5417,10 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -5391,7 +5470,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
+ .ptp_ops = &mv88e6390_ptp_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
@@ -5407,8 +5486,10 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -5462,7 +5543,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
+ .ptp_ops = &mv88e6390_ptp_ops,
.phylink_get_caps = mv88e6390x_phylink_get_caps,
};
@@ -5473,8 +5554,10 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -6526,7 +6609,7 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
const struct mv88e6xxx_ops *ops;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD | BR_PORT_LOCKED))
+ BR_BCAST_FLOOD | BR_PORT_LOCKED | BR_PORT_MAB))
return -EINVAL;
ops = chip->info->ops;
@@ -6545,7 +6628,7 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err = -EOPNOTSUPP;
+ int err = 0;
mv88e6xxx_reg_lock(chip);
@@ -6584,6 +6667,12 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
goto out;
}
+ if (flags.mask & BR_PORT_MAB) {
+ bool mab = !!(flags.val & BR_PORT_MAB);
+
+ mv88e6xxx_port_set_mab(chip, port, mab);
+ }
+
if (flags.mask & BR_PORT_LOCKED) {
bool locked = !!(flags.val & BR_PORT_LOCKED);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index e693154cf803..da6e1339f809 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -280,6 +280,9 @@ struct mv88e6xxx_port {
unsigned int serdes_irq;
char serdes_irq_name[64];
struct devlink_region *region;
+
+ /* MacAuth Bypass control flag */
+ bool mab;
};
enum mv88e6xxx_region_id {
@@ -451,6 +454,13 @@ struct mv88e6xxx_ops {
struct mii_bus *bus,
int addr, int reg, u16 val);
+ int (*phy_read_c45)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 *val);
+ int (*phy_write_c45)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 val);
+
/* Priority Override Table operations */
int (*pot_clear)(struct mv88e6xxx_chip *chip);
@@ -705,6 +715,7 @@ struct mv88e6xxx_ptp_ops {
int (*port_disable)(struct mv88e6xxx_chip *chip, int port);
int (*global_enable)(struct mv88e6xxx_chip *chip);
int (*global_disable)(struct mv88e6xxx_chip *chip);
+ int (*set_ptp_cpu_port)(struct mv88e6xxx_chip *chip, int port);
int n_ext_ts;
int arr0_sts_reg;
int arr1_sts_reg;
@@ -784,6 +795,12 @@ static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int po
return (chip->info->invalid_port_mask & BIT(port)) != 0;
}
+static inline void mv88e6xxx_port_set_mab(struct mv88e6xxx_chip *chip,
+ int port, bool mab)
+{
+ chip->ports[port].mab = mab;
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
@@ -802,6 +819,12 @@ static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip)
mutex_unlock(&chip->reg_lock);
}
+int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv);
+
int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap);
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 5848112036b0..2fa55a643591 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -403,6 +403,18 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
+int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST;
+
+ /* Use the default high priority for PTP frames sent to
+ * the CPU.
+ */
+ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
+ return mv88e6390_g1_monitor_write(chip, ptr, port);
+}
+
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
{
u16 ptr;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 65958b2a0d3a..c99ddd117fe6 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -214,6 +214,7 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST 0x3200
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
@@ -303,6 +304,7 @@ int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
int port);
int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 61ae2d61e25c..ce3b3690c3c0 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -12,6 +12,7 @@
#include "chip.h"
#include "global1.h"
+#include "switchdev.h"
#include "trace.h"
/* Offset 0x01: ATU FID Register */
@@ -409,23 +410,25 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
err = mv88e6xxx_g1_read_atu_violation(chip);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_atu_fid_read(chip, &fid);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_atu_data_read(chip, &entry);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_atu_mac_read(chip, &entry);
if (err)
- goto out;
+ goto out_unlock;
+
+ mv88e6xxx_reg_unlock(chip);
spid = entry.state;
@@ -441,6 +444,13 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
entry.portvec, entry.mac,
fid);
chip->ports[spid].atu_miss_violation++;
+
+ if (fid != MV88E6XXX_FID_STANDALONE && chip->ports[spid].mab) {
+ err = mv88e6xxx_handle_miss_violation(chip, spid,
+ &entry, fid);
+ if (err)
+ goto out;
+ }
}
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
@@ -449,13 +459,13 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
fid);
chip->ports[spid].atu_full_violation++;
}
- mv88e6xxx_reg_unlock(chip);
return IRQ_HANDLED;
-out:
+out_unlock:
mv88e6xxx_reg_unlock(chip);
+out:
dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
err);
return IRQ_HANDLED;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index fa65ecd9cb85..ed3b2f88e783 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -739,20 +739,18 @@ static int mv88e6xxx_g2_smi_phy_read_data_c45(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
}
-static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
- bool external, int port, int reg,
- u16 *data)
+static int _mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int devad,
+ int reg, u16 *data)
{
- int dev = (reg >> 16) & 0x1f;
- int addr = reg & 0xffff;
int err;
- err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
- addr);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
+ reg);
if (err)
return err;
- return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, dev,
+ return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, devad,
data);
}
@@ -771,51 +769,65 @@ static int mv88e6xxx_g2_smi_phy_write_data_c45(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
}
-static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
- bool external, int port, int reg,
- u16 data)
+static int _mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int devad,
+ int reg, u16 data)
{
- int dev = (reg >> 16) & 0x1f;
- int addr = reg & 0xffff;
int err;
- err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
- addr);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
+ reg);
if (err)
return err;
- return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, dev,
+ return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, devad,
data);
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
- int addr, int reg, u16 *val)
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
- if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, reg,
- val);
-
return mv88e6xxx_g2_smi_phy_read_data_c22(chip, external, addr, reg,
val);
}
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
- int addr, int reg, u16 val)
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int devad,
+ int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
- if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, reg,
- val);
+ return _mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, devad, reg,
+ val);
+}
+
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int reg,
+ u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
return mv88e6xxx_g2_smi_phy_write_data_c22(chip, external, addr, reg,
val);
}
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int devad,
+ int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
+
+ return _mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, devad, reg,
+ val);
+}
+
/* Offset 0x1B: Watchdog Control */
static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 7536b8b0ad01..e973114d6890 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -314,12 +314,18 @@ int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg,
int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val);
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val);
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 val);
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 252b5b3a3efe..8bb88b3d900d 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -55,6 +55,38 @@ int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val)
return chip->info->ops->phy_write(chip, bus, addr, reg, val);
}
+int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 *val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_read_c45)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_read_c45(chip, bus, addr, devad, reg, val);
+}
+
+int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_write_c45)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_write_c45(chip, bus, addr, devad, reg, val);
+}
+
static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
{
return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h
index 05ea0d546969..5f47722364cc 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.h
+++ b/drivers/net/dsa/mv88e6xxx/phy.h
@@ -28,6 +28,10 @@ int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 *val);
int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 val);
+int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 *val);
+int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 val);
int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
u8 page, int reg, u16 *val);
int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index d838c174dc0d..ea17231dc34e 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -11,6 +11,7 @@
*/
#include "chip.h"
+#include "global1.h"
#include "global2.h"
#include "hwtstamp.h"
#include "ptp.h"
@@ -419,6 +420,34 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
.cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
+const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
+ .clock_read = mv88e6352_ptp_clock_read,
+ .ptp_enable = mv88e6352_ptp_enable,
+ .ptp_verify = mv88e6352_ptp_verify,
+ .event_work = mv88e6352_tai_event_work,
+ .port_enable = mv88e6352_hwtstamp_port_enable,
+ .port_disable = mv88e6352_hwtstamp_port_disable,
+ .set_ptp_cpu_port = mv88e6390_g1_set_ptp_cpu_port,
+ .n_ext_ts = 1,
+ .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6XXX_CC_SHIFT,
+ .cc_mult = MV88E6XXX_CC_MULT,
+ .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+};
+
static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
@@ -491,6 +520,23 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+ if (ptp_ops->set_ptp_cpu_port) {
+ struct dsa_port *dp;
+ int upstream = 0;
+ int err;
+
+ dsa_switch_for_each_user_port(dp, chip->ds) {
+ upstream = dsa_upstream_port(chip->ds, dp->index);
+ break;
+ }
+
+ err = ptp_ops->set_ptp_cpu_port(chip, upstream);
+ if (err) {
+ dev_err(chip->dev, "Failed to set PTP CPU destination port!\n");
+ return err;
+ }
+ }
+
chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
if (IS_ERR(chip->ptp_clock))
return PTR_ERR(chip->ptp_clock);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 269d5d16a466..6c4d09adc93c 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -151,6 +151,7 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
@@ -171,6 +172,7 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {};
#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index d94150d8f3f4..72faec8f44dc 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -36,17 +36,13 @@ static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 *val)
{
- int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
-
- return mv88e6xxx_phy_read(chip, lane, reg_c45, val);
+ return mv88e6xxx_phy_read_c45(chip, lane, device, reg, val);
}
static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 val)
{
- int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
-
- return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
+ return mv88e6xxx_phy_write_c45(chip, lane, device, reg, val);
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.c b/drivers/net/dsa/mv88e6xxx/switchdev.c
new file mode 100644
index 000000000000..4c346a884fb2
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/switchdev.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * switchdev.c
+ *
+ * Authors:
+ * Hans J. Schultz <netdev@kapio-technology.com>
+ *
+ */
+
+#include <net/switchdev.h>
+#include "chip.h"
+#include "global1.h"
+#include "switchdev.h"
+
+struct mv88e6xxx_fid_search_ctx {
+ u16 fid_search;
+ u16 vid_found;
+};
+
+static int __mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv)
+{
+ struct mv88e6xxx_fid_search_ctx *ctx = priv;
+
+ if (ctx->fid_search == entry->fid) {
+ ctx->vid_found = entry->vid;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip, u16 fid, u16 *vid)
+{
+ struct mv88e6xxx_fid_search_ctx ctx;
+ int err;
+
+ ctx.fid_search = fid;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_vtu_walk(chip, __mv88e6xxx_find_vid, &ctx);
+ mv88e6xxx_reg_unlock(chip);
+ if (err < 0)
+ return err;
+ if (err == 1)
+ *vid = ctx.vid_found;
+ else
+ return -ENOENT;
+
+ return 0;
+}
+
+int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port,
+ struct mv88e6xxx_atu_entry *entry, u16 fid)
+{
+ struct switchdev_notifier_fdb_info info = {
+ .addr = entry->mac,
+ .locked = true,
+ };
+ struct net_device *brport;
+ struct dsa_port *dp;
+ u16 vid;
+ int err;
+
+ err = mv88e6xxx_find_vid(chip, fid, &vid);
+ if (err)
+ return err;
+
+ info.vid = vid;
+ dp = dsa_to_port(chip->ds, port);
+
+ rtnl_lock();
+ brport = dsa_port_to_bridge_port(dp);
+ if (!brport) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ err = call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ brport, &info.info, NULL);
+ rtnl_unlock();
+
+ return err;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.h b/drivers/net/dsa/mv88e6xxx/switchdev.h
new file mode 100644
index 000000000000..62214f9d62b0
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/switchdev.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * switchdev.h
+ *
+ * Authors:
+ * Hans J. Schultz <netdev@kapio-technology.com>
+ *
+ */
+
+#ifndef _MV88E6XXX_SWITCHDEV_H_
+#define _MV88E6XXX_SWITCHDEV_H_
+
+#include "chip.h"
+
+int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port,
+ struct mv88e6xxx_atu_entry *entry,
+ u16 fid);
+
+#endif /* _MV88E6XXX_SWITCHDEV_H_ */
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 08db9cf76818..081e7a88ea02 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -1,4 +1,34 @@
# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MSCC_FELIX_DSA_LIB
+ tristate
+ help
+ This is an umbrella module for all network switches that are
+ register-compatible with Ocelot and that perform I/O to their host
+ CPU through an NPI (Node Processor Interface) Ethernet port.
+ Its name comes from the first hardware chip to make use of it
+ (VSC9959), code named Felix.
+
+config NET_DSA_MSCC_OCELOT_EXT
+ tristate "Ocelot External Ethernet switch support"
+ depends on NET_DSA && SPI
+ depends on NET_VENDOR_MICROSEMI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select MDIO_MSCC_MIIM
+ select MFD_OCELOT
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
+ select NET_DSA_TAG_OCELOT_8021Q
+ select NET_DSA_TAG_OCELOT
+ help
+ This driver supports the VSC7511, VSC7512, VSC7513 and VSC7514 chips
+ when controlled through SPI.
+
+ The Ocelot switch family is a set of multi-port networking chips. All
+ of these chips have the ability to be controlled externally through
+ SPI or PCIe interfaces.
+
+ Say "Y" here to enable external control to these chips.
+
config NET_DSA_MSCC_FELIX
tristate "Ocelot / Felix Ethernet switch support"
depends on NET_DSA && PCI
@@ -8,6 +38,7 @@ config NET_DSA_MSCC_FELIX
depends on PTP_1588_CLOCK_OPTIONAL
depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
@@ -24,6 +55,7 @@ config NET_DSA_MSCC_SEVILLE
depends on PTP_1588_CLOCK_OPTIONAL
select MDIO_MSCC_MIIM
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select PCS_LYNX
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
index f6dd131e7491..ead868a293e3 100644
--- a/drivers/net/dsa/ocelot/Makefile
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -1,11 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_MSCC_FELIX_DSA_LIB) += mscc_felix_dsa_lib.o
obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+obj-$(CONFIG_NET_DSA_MSCC_OCELOT_EXT) += mscc_ocelot_ext.o
obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o
-mscc_felix-objs := \
- felix.o \
- felix_vsc9959.o
-
-mscc_seville-objs := \
- felix.o \
- seville_vsc9953.o
+mscc_felix_dsa_lib-objs := felix.o
+mscc_felix-objs := felix_vsc9959.o
+mscc_ocelot_ext-objs := ocelot_ext.o
+mscc_seville-objs := seville_vsc9953.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3b738cb2ae6e..d4cc9e60f369 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1075,9 +1075,12 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -1092,7 +1095,7 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex, tx_pause, rx_pause,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
@@ -1270,10 +1273,15 @@ static int felix_parse_ports_node(struct felix *felix,
err = felix_validate_phy_mode(felix, port, phy_mode);
if (err < 0) {
- dev_err(dev, "Unsupported PHY mode %s on port %d\n",
- phy_modes(phy_mode), port);
+ dev_info(dev, "Unsupported PHY mode %s on port %d\n",
+ phy_modes(phy_mode), port);
of_node_put(child);
- return err;
+
+ /* Leave port_phy_modes[port] = 0, which is also
+ * PHY_INTERFACE_MODE_NA. This will perform a
+ * best-effort to bring up as many ports as possible.
+ */
+ continue;
}
port_phy_modes[port] = phy_mode;
@@ -1312,6 +1320,13 @@ static struct regmap *felix_request_regmap_by_name(struct felix *felix,
struct resource res;
int i;
+ /* In an MFD configuration, regmaps are registered directly to the
+ * parent device before the child devices are probed, so there is no
+ * need to initialize a new one.
+ */
+ if (!felix->info->resources)
+ return dev_get_regmap(ocelot->dev->parent, resource_name);
+
for (i = 0; i < felix->info->num_resources; i++) {
if (strcmp(resource_name, felix->info->resources[i].name))
continue;
@@ -2024,6 +2039,31 @@ static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
}
+static int felix_get_mm(struct dsa_switch *ds, int port,
+ struct ethtool_mm_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_mm(ocelot, port, state);
+}
+
+static int felix_set_mm(struct dsa_switch *ds, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_mm(ocelot, port, cfg, extack);
+}
+
+static void felix_get_mm_stats(struct dsa_switch *ds, int port,
+ struct ethtool_mm_stats *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_mm_stats(ocelot, port, stats);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
@@ -2031,6 +2071,9 @@ const struct dsa_switch_ops felix_switch_ops = {
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
+ .get_mm = felix_get_mm,
+ .set_mm = felix_set_mm,
+ .get_mm_stats = felix_get_mm_stats,
.get_stats64 = felix_get_stats64,
.get_pause_stats = felix_get_pause_stats,
.get_rmon_stats = felix_get_rmon_stats,
@@ -2103,6 +2146,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_set_host_flood = felix_port_set_host_flood,
.port_change_master = felix_port_change_master,
};
+EXPORT_SYMBOL_GPL(felix_switch_ops);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
@@ -2114,6 +2158,7 @@ struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
return dsa_to_port(ds, port)->slave;
}
+EXPORT_SYMBOL_GPL(felix_port_to_netdev);
int felix_netdev_to_port(struct net_device *dev)
{
@@ -2125,3 +2170,7 @@ int felix_netdev_to_port(struct net_device *dev)
return dp->index;
}
+EXPORT_SYMBOL_GPL(felix_netdev_to_port);
+
+MODULE_DESCRIPTION("Felix DSA library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index be22d6ccd7c8..d5d0b30c0b75 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -7,6 +7,7 @@
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
+#define OCELOT_PORT_MODE_NONE 0
#define OCELOT_PORT_MODE_INTERNAL BIT(0)
#define OCELOT_PORT_MODE_SGMII BIT(1)
#define OCELOT_PORT_MODE_QSGMII BIT(2)
@@ -36,6 +37,7 @@ struct felix_info {
u16 vcap_pol_base2;
u16 vcap_pol_max2;
const struct ptp_clock_info *ptp_caps;
+ unsigned long quirks;
/* Some Ocelot switches are integrated into the SoC without the
* extraction IRQ line connected to the ARM GIC. By enabling this
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 01ac70fd7ddf..354aa3dbfde7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -6,6 +6,7 @@
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
#include <net/tc_act/tc_gate.h>
@@ -318,6 +319,29 @@ static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
+ REG(SYS_COUNT_RX_ASSEMBLY_ERRS, 0x0000b0),
+ REG(SYS_COUNT_RX_SMD_ERRS, 0x0000b4),
+ REG(SYS_COUNT_RX_ASSEMBLY_OK, 0x0000b8),
+ REG(SYS_COUNT_RX_MERGE_FRAGMENTS, 0x0000bc),
+ REG(SYS_COUNT_RX_PMAC_OCTETS, 0x0000c0),
+ REG(SYS_COUNT_RX_PMAC_UNICAST, 0x0000c4),
+ REG(SYS_COUNT_RX_PMAC_MULTICAST, 0x0000c8),
+ REG(SYS_COUNT_RX_PMAC_BROADCAST, 0x0000cc),
+ REG(SYS_COUNT_RX_PMAC_SHORTS, 0x0000d0),
+ REG(SYS_COUNT_RX_PMAC_FRAGMENTS, 0x0000d4),
+ REG(SYS_COUNT_RX_PMAC_JABBERS, 0x0000d8),
+ REG(SYS_COUNT_RX_PMAC_CRC_ALIGN_ERRS, 0x0000dc),
+ REG(SYS_COUNT_RX_PMAC_SYM_ERRS, 0x0000e0),
+ REG(SYS_COUNT_RX_PMAC_64, 0x0000e4),
+ REG(SYS_COUNT_RX_PMAC_65_127, 0x0000e8),
+ REG(SYS_COUNT_RX_PMAC_128_255, 0x0000ec),
+ REG(SYS_COUNT_RX_PMAC_256_511, 0x0000f0),
+ REG(SYS_COUNT_RX_PMAC_512_1023, 0x0000f4),
+ REG(SYS_COUNT_RX_PMAC_1024_1526, 0x0000f8),
+ REG(SYS_COUNT_RX_PMAC_1527_MAX, 0x0000fc),
+ REG(SYS_COUNT_RX_PMAC_PAUSE, 0x000100),
+ REG(SYS_COUNT_RX_PMAC_CONTROL, 0x000104),
+ REG(SYS_COUNT_RX_PMAC_LONGS, 0x000108),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
REG(SYS_COUNT_TX_UNICAST, 0x000204),
REG(SYS_COUNT_TX_MULTICAST, 0x000208),
@@ -349,6 +373,20 @@ static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
REG(SYS_COUNT_TX_AGED, 0x000278),
+ REG(SYS_COUNT_TX_MM_HOLD, 0x00027c),
+ REG(SYS_COUNT_TX_MERGE_FRAGMENTS, 0x000280),
+ REG(SYS_COUNT_TX_PMAC_OCTETS, 0x000284),
+ REG(SYS_COUNT_TX_PMAC_UNICAST, 0x000288),
+ REG(SYS_COUNT_TX_PMAC_MULTICAST, 0x00028c),
+ REG(SYS_COUNT_TX_PMAC_BROADCAST, 0x000290),
+ REG(SYS_COUNT_TX_PMAC_PAUSE, 0x000294),
+ REG(SYS_COUNT_TX_PMAC_64, 0x000298),
+ REG(SYS_COUNT_TX_PMAC_65_127, 0x00029c),
+ REG(SYS_COUNT_TX_PMAC_128_255, 0x0002a0),
+ REG(SYS_COUNT_TX_PMAC_256_511, 0x0002a4),
+ REG(SYS_COUNT_TX_PMAC_512_1023, 0x0002a8),
+ REG(SYS_COUNT_TX_PMAC_1024_1526, 0x0002ac),
+ REG(SYS_COUNT_TX_PMAC_1527_MAX, 0x0002b0),
REG(SYS_COUNT_DROP_LOCAL, 0x000400),
REG(SYS_COUNT_DROP_TAIL, 0x000404),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
@@ -439,6 +477,9 @@ static const u32 vsc9959_dev_gmii_regmap[] = {
REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
REG(DEV_MAC_STICKY, 0x44),
+ REG(DEV_MM_ENABLE_CONFIG, 0x48),
+ REG(DEV_MM_VERIF_CONFIG, 0x4C),
+ REG(DEV_MM_STATUS, 0x50),
REG_RESERVED(PCS1G_CFG),
REG_RESERVED(PCS1G_MODE_CFG),
REG_RESERVED(PCS1G_SD_CFG),
@@ -954,8 +995,10 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return -ENOMEM;
bus->name = "VSC9959 internal MDIO bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
@@ -2550,6 +2593,7 @@ static const struct felix_info felix_info_vsc9959 = {
.num_mact_rows = 2048,
.num_ports = VSC9959_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
+ .quirks = FELIX_MAC_QUIRKS,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
@@ -2560,20 +2604,19 @@ static const struct felix_info felix_info_vsc9959 = {
.tas_guard_bands_update = vsc9959_tas_guard_bands_update,
};
+/* The INTB interrupt is shared between for PTP TX timestamp availability
+ * notification and MAC Merge status change on each port.
+ */
static irqreturn_t felix_irq_handler(int irq, void *data)
{
struct ocelot *ocelot = (struct ocelot *)data;
-
- /* The INTB interrupt is used for both PTP TX timestamp interrupt
- * and preemption status change interrupt on each port.
- *
- * - Get txtstamp if have
- * - TODO: handle preemption. Without handling it, driver may get
- * interrupt storm.
- */
+ int port;
ocelot_get_txtstamp(ocelot);
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_port_mm_irq(ocelot, port);
+
return IRQ_HANDLED;
}
@@ -2621,6 +2664,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
}
ocelot->ptp = 1;
+ ocelot->mm_supported = true;
ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
if (!ds) {
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
new file mode 100644
index 000000000000..14efa6387bd7
--- /dev/null
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ */
+
+#include <linux/mfd/ocelot.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <soc/mscc/ocelot.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "felix.h"
+
+#define VSC7514_NUM_PORTS 11
+
+#define OCELOT_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc7512_port_modes[VSC7514_NUM_PORTS] = {
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+};
+
+static const struct ocelot_ops ocelot_ext_ops = {
+ .reset = ocelot_reset,
+ .wm_enc = ocelot_wm_enc,
+ .wm_dec = ocelot_wm_dec,
+ .wm_stat = ocelot_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+};
+
+static const char * const vsc7512_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [QS] = "qs",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
+};
+
+static const struct felix_info vsc7512_info = {
+ .resource_names = vsc7512_resource_names,
+ .regfields = vsc7514_regfields,
+ .map = vsc7514_regmap,
+ .ops = &ocelot_ext_ops,
+ .vcap = vsc7514_vcap_props,
+ .num_mact_rows = 1024,
+ .num_ports = VSC7514_NUM_PORTS,
+ .num_tx_queues = OCELOT_NUM_TC,
+ .port_modes = vsc7512_port_modes,
+};
+
+static int ocelot_ext_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ felix = kzalloc(sizeof(*felix), GFP_KERNEL);
+ if (!felix)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, felix);
+
+ ocelot = &felix->ocelot;
+ ocelot->dev = dev;
+
+ ocelot->num_flooding_pgids = 1;
+
+ felix->info = &vsc7512_info;
+
+ ds = kzalloc(sizeof(*ds), GFP_KERNEL);
+ if (!ds) {
+ err = -ENOMEM;
+ dev_err_probe(dev, err, "Failed to allocate DSA switch\n");
+ goto err_free_felix;
+ }
+
+ ds->dev = dev;
+ ds->num_ports = felix->info->num_ports;
+ ds->num_tx_queues = felix->info->num_tx_queues;
+
+ ds->ops = &felix_switch_ops;
+ ds->priv = ocelot;
+ felix->ds = ds;
+ felix->tag_proto = DSA_TAG_PROTO_OCELOT;
+
+ err = dsa_register_switch(ds);
+ if (err) {
+ dev_err_probe(dev, err, "Failed to register DSA switch\n");
+ goto err_free_ds;
+ }
+
+ return 0;
+
+err_free_ds:
+ kfree(ds);
+err_free_felix:
+ kfree(felix);
+ return err;
+}
+
+static int ocelot_ext_remove(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return 0;
+
+ dsa_unregister_switch(felix->ds);
+
+ kfree(felix->ds);
+ kfree(felix);
+
+ return 0;
+}
+
+static void ocelot_ext_shutdown(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id ocelot_ext_switch_of_match[] = {
+ { .compatible = "mscc,vsc7512-switch" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ocelot_ext_switch_of_match);
+
+static struct platform_driver ocelot_ext_switch_driver = {
+ .driver = {
+ .name = "ocelot-switch",
+ .of_match_table = of_match_ptr(ocelot_ext_switch_of_match),
+ },
+ .probe = ocelot_ext_probe,
+ .remove = ocelot_ext_remove,
+ .shutdown = ocelot_ext_shutdown,
+};
+module_platform_driver(ocelot_ext_switch_driver);
+
+MODULE_DESCRIPTION("External Ocelot Switch driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(MFD_OCELOT);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 88ed3a2e487a..287b64b788db 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -971,6 +971,7 @@ static const struct felix_info seville_info_vsc9953 = {
.vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
.vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
.vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
+ .quirks = FELIX_MAC_QUIRKS,
.num_mact_rows = 2048,
.num_ports = VSC9953_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 2f224b166bbb..55df4479ea30 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -425,16 +425,12 @@ qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 wri
}
static int
-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+qca8k_read_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t *val)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
- if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
- return 0;
-
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
@@ -451,16 +447,12 @@ exit:
}
static int
-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+qca8k_write_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t val)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
- if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
- return 0;
-
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
@@ -477,17 +469,14 @@ exit:
}
static int
-qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+qca8k_regmap_update_bits_mii(struct qca8k_priv *priv, uint32_t reg,
+ uint32_t mask, uint32_t write_val)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
- if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
- return 0;
-
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
@@ -510,17 +499,84 @@ exit:
return ret;
}
+static int
+qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ int i, count = val_len / sizeof(u32), ret;
+ u32 reg = *(u32 *)reg_buf & U16_MAX;
+ struct qca8k_priv *priv = ctx;
+
+ if (priv->mgmt_master &&
+ !qca8k_read_eth(priv, reg, val_buf, val_len))
+ return 0;
+
+ /* loop count times and increment reg of 4 */
+ for (i = 0; i < count; i++, reg += sizeof(u32)) {
+ ret = qca8k_read_mii(priv, reg, val_buf + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
+ const void *val_buf, size_t val_len)
+{
+ int i, count = val_len / sizeof(u32), ret;
+ u32 reg = *(u32 *)reg_buf & U16_MAX;
+ struct qca8k_priv *priv = ctx;
+ u32 *val = (u32 *)val_buf;
+
+ if (priv->mgmt_master &&
+ !qca8k_write_eth(priv, reg, val, val_len))
+ return 0;
+
+ /* loop count times, increment reg of 4 and increment val ptr to
+ * the next value
+ */
+ for (i = 0; i < count; i++, reg += sizeof(u32), val++) {
+ ret = qca8k_write_mii(priv, reg, *val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_bulk_write(void *ctx, const void *data, size_t bytes)
+{
+ return qca8k_bulk_gather_write(ctx, data, sizeof(u16), data + sizeof(u16),
+ bytes - sizeof(u16));
+}
+
+static int
+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+{
+ struct qca8k_priv *priv = ctx;
+
+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
+ return 0;
+
+ return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
+}
+
static struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x16ac, /* end MIB - Port6 range */
- .reg_read = qca8k_regmap_read,
- .reg_write = qca8k_regmap_write,
+ .read = qca8k_bulk_read,
+ .write = qca8k_bulk_write,
.reg_update_bits = qca8k_regmap_update_bits,
.rd_table = &qca8k_readable_table,
.disable_locking = true, /* Locking is handled by qca8k read/write */
.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
+ .max_raw_read = 32, /* mgmt eth can read/write up to 8 registers at time */
+ .max_raw_write = 32,
};
static int
@@ -2089,8 +2145,6 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
static const struct qca8k_info_ops qca8xxx_ops = {
.autocast_mib = qca8k_get_ethtool_stats_eth,
- .read_eth = qca8k_read_eth,
- .write_eth = qca8k_write_eth,
};
static const struct qca8k_match_data qca8327 = {
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index fb45b598847b..96773e432558 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -101,45 +101,6 @@ const struct regmap_access_table qca8k_readable_table = {
.n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
};
-/* TODO: remove these extra ops when we can support regmap bulk read/write */
-static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
- int i, count = len / sizeof(u32), ret;
-
- if (priv->mgmt_master && priv->info->ops->read_eth &&
- !priv->info->ops->read_eth(priv, reg, val, len))
- return 0;
-
- for (i = 0; i < count; i++) {
- ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-/* TODO: remove these extra ops when we can support regmap bulk read/write */
-static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
- int i, count = len / sizeof(u32), ret;
- u32 tmp;
-
- if (priv->mgmt_master && priv->info->ops->write_eth &&
- !priv->info->ops->write_eth(priv, reg, val, len))
- return 0;
-
- for (i = 0; i < count; i++) {
- tmp = val[i];
-
- ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
{
u32 val;
@@ -150,11 +111,12 @@ static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
- u32 reg[3];
+ u32 reg[QCA8K_ATU_TABLE_SIZE];
int ret;
/* load the ARL table into an array */
- ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ ret = regmap_bulk_read(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
+ QCA8K_ATU_TABLE_SIZE);
if (ret)
return ret;
@@ -178,7 +140,7 @@ static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
const u8 *mac, u8 aging)
{
- u32 reg[3] = { 0 };
+ u32 reg[QCA8K_ATU_TABLE_SIZE] = { 0 };
/* vid - 83:72 */
reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
@@ -195,7 +157,8 @@ static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
- qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ regmap_bulk_write(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
+ QCA8K_ATU_TABLE_SIZE);
}
static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 03514f7a20be..7996975d29d3 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -148,6 +148,8 @@
#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
/* Lookup registers */
+#define QCA8K_ATU_TABLE_SIZE 3 /* 12 bytes wide table / sizeof(u32) */
+
#define QCA8K_REG_ATU_DATA0 0x600
#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
@@ -328,9 +330,6 @@ struct qca8k_priv;
struct qca8k_info_ops {
int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
- /* TODO: remove these extra ops when we can support regmap bulk read/write */
- int (*read_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
- int (*write_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
};
struct qca8k_match_data {
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index ed413d555bec..919027cf2012 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -781,9 +781,6 @@ static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
u32 cmd, status;
int ret;
- if (phy_reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
cmd = A5PSW_MDIO_COMMAND_READ;
cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
@@ -809,9 +806,6 @@ static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
struct a5psw *a5psw = bus->priv;
u32 cmd;
- if (phy_reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 9ba2ec2b966d..fb1549a5fe32 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -149,8 +149,10 @@ struct sja1105_info {
bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
int (*clocking_setup)(struct sja1105_private *priv);
- int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg);
- int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val);
+ int (*pcs_mdio_read_c45)(struct mii_bus *bus, int phy, int mmd,
+ int reg);
+ int (*pcs_mdio_write_c45)(struct mii_bus *bus, int phy, int mmd,
+ int reg, u16 val);
int (*disable_microcontroller)(struct sja1105_private *priv);
const char *name;
bool supports_mii[SJA1105_MAX_NUM_PORTS];
@@ -303,10 +305,12 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
/* From sja1105_mdio.c */
int sja1105_mdiobus_register(struct dsa_switch *ds);
void sja1105_mdiobus_unregister(struct dsa_switch *ds);
-int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
-int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
-int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
-int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg);
+int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val);
+int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg);
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val);
/* From sja1105_devlink.c */
int sja1105_devlink_setup(struct dsa_switch *ds);
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 4059fcc8c832..01f1cb719042 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -7,20 +7,15 @@
#define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc)
-int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
return 0xffff;
@@ -37,19 +32,15 @@ int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd,
+ int reg, u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- u16 mmd;
-
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
tmp = val;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
@@ -58,7 +49,7 @@ int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
}
-int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -66,17 +57,12 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
int offset, bank;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
return NXP_SJA1110_XPCS_ID >> 16;
@@ -108,7 +94,8 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
+ u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -116,17 +103,12 @@ int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
int offset, bank;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
bank = addr >> 8;
offset = addr & GENMASK(7, 0);
@@ -167,7 +149,7 @@ static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv,
return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0);
}
-static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
+static int sja1105_base_t1_mdio_read_c22(struct mii_bus *bus, int phy, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -175,30 +157,31 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
u32 tmp;
int rc;
- if (reg & MII_ADDR_C45) {
- u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
-
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
- mmd);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
- tmp = reg & MII_REGADDR_C45_MASK;
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ return tmp & 0xffff;
+}
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
- mmd);
+static int sja1105_base_t1_mdio_read_c45(struct mii_bus *bus, int phy,
+ int mmd, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
- rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
- return tmp & 0xffff;
- }
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &reg, NULL);
+ if (rc < 0)
+ return rc;
- /* Clause 22 read */
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
if (rc < 0)
@@ -207,41 +190,37 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-static int sja1105_base_t1_mdio_write(struct mii_bus *bus, int phy, int reg,
- u16 val)
+static int sja1105_base_t1_mdio_write_c22(struct mii_bus *bus, int phy, int reg,
+ u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- int rc;
-
- if (reg & MII_ADDR_C45) {
- u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
-
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
- mmd);
- tmp = reg & MII_REGADDR_C45_MASK;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ tmp = val & 0xffff;
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
- mmd);
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
- tmp = val & 0xffff;
+static int sja1105_base_t1_mdio_write_c45(struct mii_bus *bus, int phy,
+ int mmd, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
- return 0;
- }
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &reg, NULL);
+ if (rc < 0)
+ return rc;
- /* Clause 22 write */
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
tmp = val & 0xffff;
@@ -256,9 +235,6 @@ static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
u32 tmp;
int rc;
- if (reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
&tmp, NULL);
if (rc < 0)
@@ -275,9 +251,6 @@ static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
const struct sja1105_regs *regs = priv->info->regs;
u32 tmp = val;
- if (reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
&tmp, NULL);
}
@@ -360,8 +333,10 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
bus->name = "SJA1110 100base-T1 MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1",
dev_name(priv->ds->dev));
- bus->read = sja1105_base_t1_mdio_read;
- bus->write = sja1105_base_t1_mdio_write;
+ bus->read = sja1105_base_t1_mdio_read_c22;
+ bus->write = sja1105_base_t1_mdio_write_c22;
+ bus->read_c45 = sja1105_base_t1_mdio_read_c45;
+ bus->write_c45 = sja1105_base_t1_mdio_write_c45;
bus->parent = priv->ds->dev;
mdio_priv = bus->priv;
mdio_priv->priv = priv;
@@ -398,7 +373,7 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
int rc = 0;
int port;
- if (!priv->info->pcs_mdio_read || !priv->info->pcs_mdio_write)
+ if (!priv->info->pcs_mdio_read_c45 || !priv->info->pcs_mdio_write_c45)
return 0;
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
@@ -408,8 +383,8 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
bus->name = "SJA1105 PCS MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
dev_name(ds->dev));
- bus->read = priv->info->pcs_mdio_read;
- bus->write = priv->info->pcs_mdio_write;
+ bus->read_c45 = priv->info->pcs_mdio_read_c45;
+ bus->write_c45 = priv->info->pcs_mdio_write_c45;
bus->parent = ds->dev;
/* There is no PHY on this MDIO bus => mask out all PHY addresses
* from auto probing.
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index d3c9ad6d39d4..5ce29c8057a4 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -719,8 +719,8 @@ const struct sja1105_info sja1105r_info = {
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
- .pcs_mdio_read = sja1105_pcs_mdio_read,
- .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.regs = &sja1105pqrs_regs,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
@@ -756,8 +756,8 @@ const struct sja1105_info sja1105s_info = {
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
- .pcs_mdio_read = sja1105_pcs_mdio_read,
- .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
@@ -794,8 +794,8 @@ const struct sja1105_info sja1110a_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -844,8 +844,8 @@ const struct sja1105_info sja1110b_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -894,8 +894,8 @@ const struct sja1105_info sja1110c_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -944,8 +944,8 @@ const struct sja1105_info sja1110d_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index cd4d71b83c33..c6f8f852bff1 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1275,9 +1275,6 @@ static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
u32 data, tmp;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
data = OWL_EMAC_BIT_MAC_CSR10_SB;
data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
@@ -1305,9 +1302,6 @@ owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
struct owl_emac_priv *priv = bus->priv;
u32 data, tmp;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
data = OWL_EMAC_BIT_MAC_CSR10_SB;
data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index c26b8597945b..3f316a0f4158 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -523,7 +523,6 @@ static int adin1110_register_mdiobus(struct adin1110_priv *priv,
mii_bus->priv = priv;
mii_bus->parent = dev;
mii_bus->phy_mask = ~((u32)GENMASK(2, 0));
- mii_bus->probe_capabilities = MDIOBUS_C22;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
ret = devm_mdiobus_register(dev, mii_bus);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index e8ad5ea31aff..d3999db7c6a2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -597,7 +597,9 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
if (rc)
return rc;
}
+ xdp_features_set_redirect_target(netdev, false);
} else if (old_bpf_prog) {
+ xdp_features_clear_redirect_target(netdev);
rc = ena_destroy_and_free_all_xdp_queues(adapter);
if (rc)
return rc;
@@ -4103,6 +4105,8 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
/* Set offload features */
ena_set_dev_offloads(feat, netdev);
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
adapter->max_mtu = feat->dev_attr.max_mtu;
netdev->max_mtu = adapter->max_mtu;
netdev->min_mtu = ENA_MIN_MTU;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 466273b22f0a..3b70f6737633 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1285,6 +1285,22 @@
#define MDIO_PMA_RX_CTRL1 0x8051
#endif
+#ifndef MDIO_PMA_RX_LSTS
+#define MDIO_PMA_RX_LSTS 0x018020
+#endif
+
+#ifndef MDIO_PMA_RX_EQ_CTRL4
+#define MDIO_PMA_RX_EQ_CTRL4 0x0001805C
+#endif
+
+#ifndef MDIO_PMA_MP_MISC_STS
+#define MDIO_PMA_MP_MISC_STS 0x0078
+#endif
+
+#ifndef MDIO_PMA_PHY_RX_EQ_CEU
+#define MDIO_PMA_PHY_RX_EQ_CEU 0x1800E
+#endif
+
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
@@ -1395,6 +1411,28 @@
#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+#define XGBE_PMA_RX_SIG_DET_0_MASK BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_ENABLE BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_VALID_0_MASK BIT(12)
+#define XGBE_PMA_RX_VALID_0_ENABLE BIT(12)
+#define XGBE_PMA_RX_VALID_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_AD_REQ_MASK BIT(12)
+#define XGBE_PMA_RX_AD_REQ_ENABLE BIT(12)
+#define XGBE_PMA_RX_AD_REQ_DISABLE 0x0000
+
+#define XGBE_PMA_RX_ADPT_ACK_MASK BIT(12)
+#define XGBE_PMA_RX_ADPT_ACK BIT(12)
+
+#define XGBE_PMA_CFF_UPDTM1_VLD BIT(8)
+#define XGBE_PMA_CFF_UPDT0_VLD BIT(9)
+#define XGBE_PMA_CFF_UPDT1_VLD BIT(10)
+#define XGBE_PMA_CFF_UPDT_MASK (XGBE_PMA_CFF_UPDTM1_VLD |\
+ XGBE_PMA_CFF_UPDT0_VLD | \
+ XGBE_PMA_CFF_UPDT1_VLD)
+
#define XGBE_PMA_PLL_CTRL_MASK BIT(15)
#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15)
#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000
@@ -1699,20 +1737,21 @@ do { \
} while (0)
/* Macros for building, reading or writing register values or bits
- * using MDIO. Different from above because of the use of standardized
- * Linux include values. No shifting is performed with the bit
- * operations, everything works on mask values.
+ * using MDIO.
*/
+
+#define XGBE_ADDR_C45 BIT(30)
+
#define XMDIO_READ(_pdata, _mmd, _reg) \
((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
- MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+ XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
(XMDIO_READ((_pdata), _mmd, _reg) & _mask)
#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
- MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+ XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
do { \
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 4030d619e84f..f393228d41c7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -814,6 +814,9 @@ static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
unsigned int ss;
switch (speed) {
+ case SPEED_10:
+ ss = 0x07;
+ break;
case SPEED_1000:
ss = 0x03;
break;
@@ -1154,8 +1157,8 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address, index, offset;
int mmd_data;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1186,8 +1189,8 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1217,8 +1220,8 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1245,8 +1248,8 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1291,11 +1294,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
}
}
-static unsigned int xgbe_create_mdio_sca(int port, int reg)
+static unsigned int xgbe_create_mdio_sca_c22(int port, int reg)
{
- unsigned int mdio_sca, da;
+ unsigned int mdio_sca;
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
- da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+ return mdio_sca;
+}
+
+static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg)
+{
+ unsigned int mdio_sca;
mdio_sca = 0;
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
@@ -1305,14 +1317,13 @@ static unsigned int xgbe_create_mdio_sca(int port, int reg)
return mdio_sca;
}
-static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
- int reg, u16 val)
+static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata,
+ unsigned int mdio_sca, u16 val)
{
- unsigned int mdio_sca, mdio_sccd;
+ unsigned int mdio_sccd;
reinit_completion(&pdata->mdio_complete);
- mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1329,14 +1340,33 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
return 0;
}
-static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
- int reg)
+static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
{
- unsigned int mdio_sca, mdio_sccd;
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
+
+ return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
+}
+
+static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg, u16 val)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
+
+ return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
+}
+
+static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata,
+ unsigned int mdio_sca)
+{
+ unsigned int mdio_sccd;
reinit_completion(&pdata->mdio_complete);
- mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1352,6 +1382,26 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
}
+static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
+
+ return xgbe_read_ext_mii_regs(pdata, mdio_sca);
+}
+
+static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
+
+ return xgbe_read_ext_mii_regs(pdata, mdio_sca);
+}
+
static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
enum xgbe_mdio_mode mode)
{
@@ -3565,8 +3615,10 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->set_speed = xgbe_set_speed;
hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
- hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
- hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
+ hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22;
+ hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22;
+ hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45;
+ hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45;
hw_if->set_gpio = xgbe_set_gpio;
hw_if->clr_gpio = xgbe_clr_gpio;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 43fdd111235a..33a9574e9e04 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -274,6 +274,15 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000);
}
+static void xgbe_sgmii_10_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 10M speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_10);
+}
+
static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
{
/* Set MAC to 1G speed */
@@ -306,6 +315,9 @@ static void xgbe_change_mode(struct xgbe_prv_data *pdata,
case XGBE_MODE_KR:
xgbe_kr_mode(pdata);
break;
+ case XGBE_MODE_SGMII_10:
+ xgbe_sgmii_10_mode(pdata);
+ break;
case XGBE_MODE_SGMII_100:
xgbe_sgmii_100_mode(pdata);
break;
@@ -1077,6 +1089,8 @@ static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
static const char *xgbe_phy_speed_string(int speed)
{
switch (speed) {
+ case SPEED_10:
+ return "10Mbps";
case SPEED_100:
return "100Mbps";
case SPEED_1000:
@@ -1164,6 +1178,7 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
case XGBE_MODE_KX_1000:
case XGBE_MODE_KX_2500:
case XGBE_MODE_KR:
+ case XGBE_MODE_SGMII_10:
case XGBE_MODE_SGMII_100:
case XGBE_MODE_SGMII_1000:
case XGBE_MODE_X:
@@ -1225,6 +1240,8 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
} else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_10);
} else {
enable_irq(pdata->an_irq);
ret = -EINVAL;
@@ -1325,6 +1342,9 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
mode = xgbe_phy_status_aneg(pdata);
switch (mode) {
+ case XGBE_MODE_SGMII_10:
+ pdata->phy.speed = SPEED_10;
+ break;
case XGBE_MODE_SGMII_100:
pdata->phy.speed = SPEED_100;
break;
@@ -1467,6 +1487,8 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
xgbe_sgmii_1000_mode(pdata);
} else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
xgbe_sgmii_100_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) {
+ xgbe_sgmii_10_mode(pdata);
} else {
ret = -EINVAL;
goto err_irq;
@@ -1564,6 +1586,8 @@ static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
return SPEED_1000;
else if (XGBE_ADV(lks, 100baseT_Full))
return SPEED_100;
+ else if (XGBE_ADV(lks, 10baseT_Full))
+ return SPEED_10;
return SPEED_UNKNOWN;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index c731a04731f8..16e7fb2c0dae 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -124,6 +124,7 @@
#include "xgbe.h"
#include "xgbe-common.h"
+#define XGBE_PHY_PORT_SPEED_10 BIT(0)
#define XGBE_PHY_PORT_SPEED_100 BIT(1)
#define XGBE_PHY_PORT_SPEED_1000 BIT(2)
#define XGBE_PHY_PORT_SPEED_2500 BIT(3)
@@ -387,6 +388,10 @@ struct xgbe_phy_data {
static DEFINE_MUTEX(xgbe_phy_comm_lock);
static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
+static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ enum xgbe_mb_cmd cmd,
+ enum xgbe_mb_subcmd sub_cmd);
static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
struct xgbe_i2c_op *i2c_op)
@@ -599,20 +604,27 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
return -ETIMEDOUT;
}
-static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr,
- int reg, u16 val)
+static int xgbe_phy_mdio_mii_write_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- if (reg & MII_ADDR_C45) {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
- return -ENOTSUPP;
- } else {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
- return -ENOTSUPP;
- }
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.write_ext_mii_regs_c22(pdata, addr, reg, val);
+}
- return pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val);
+static int xgbe_phy_mdio_mii_write_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg, u16 val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.write_ext_mii_regs_c45(pdata, addr, devad,
+ reg, val);
}
static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
@@ -637,7 +649,8 @@ static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
return ret;
}
-static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
+static int xgbe_phy_mii_write_c22(struct mii_bus *mii, int addr, int reg,
+ u16 val)
{
struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -650,29 +663,58 @@ static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
ret = xgbe_phy_i2c_mii_write(pdata, reg, val);
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
- ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val);
+ ret = xgbe_phy_mdio_mii_write_c22(pdata, addr, reg, val);
else
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
xgbe_phy_put_comm_ownership(pdata);
return ret;
}
-static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr,
- int reg)
+static int xgbe_phy_mii_write_c45(struct mii_bus *mii, int addr, int devad,
+ int reg, u16 val)
{
+ struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
- if (reg & MII_ADDR_C45) {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
- return -ENOTSUPP;
- } else {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
- return -ENOTSUPP;
- }
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
- return pdata->hw_if.read_ext_mii_regs(pdata, addr, reg);
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = -EOPNOTSUPP;
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_write_c45(pdata, addr, devad, reg, val);
+ else
+ ret = -EOPNOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mdio_mii_read_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.read_ext_mii_regs_c22(pdata, addr, reg);
+}
+
+static int xgbe_phy_mdio_mii_read_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.read_ext_mii_regs_c45(pdata, addr, devad, reg);
}
static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
@@ -697,7 +739,7 @@ static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
return ret;
}
-static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
+static int xgbe_phy_mii_read_c22(struct mii_bus *mii, int addr, int reg)
{
struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -710,7 +752,30 @@ static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
ret = xgbe_phy_i2c_mii_read(pdata, reg);
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
- ret = xgbe_phy_mdio_mii_read(pdata, addr, reg);
+ ret = xgbe_phy_mdio_mii_read_c22(pdata, addr, reg);
+ else
+ ret = -EOPNOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mii_read_c45(struct mii_bus *mii, int addr, int devad,
+ int reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = -EOPNOTSUPP;
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_read_c45(pdata, addr, devad, reg);
else
ret = -ENOTSUPP;
@@ -759,6 +824,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) {
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10)
+ XGBE_SET_SUP(lks, 10baseT_Full);
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
XGBE_SET_SUP(lks, 100baseT_Full);
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
@@ -1542,6 +1609,16 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
xgbe_phy_phydev_flowctrl(pdata);
switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) {
+ case XGBE_SGMII_AN_LINK_SPEED_10:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ XGBE_SET_LP_ADV(lks, 10baseT_Full);
+ mode = XGBE_MODE_SGMII_10;
+ } else {
+ /* Half-duplex not supported */
+ XGBE_SET_LP_ADV(lks, 10baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
case XGBE_SGMII_AN_LINK_SPEED_100:
if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
XGBE_SET_LP_ADV(lks, 100baseT_Full);
@@ -1658,7 +1735,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
if (phy_data->phydev &&
- (phy_data->phydev->speed == SPEED_100))
+ (phy_data->phydev->speed == SPEED_10))
+ mode = XGBE_MODE_SGMII_10;
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
mode = XGBE_MODE_SGMII_100;
else
mode = XGBE_MODE_SGMII_1000;
@@ -1673,7 +1753,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
break;
default:
if (phy_data->phydev &&
- (phy_data->phydev->speed == SPEED_100))
+ (phy_data->phydev->speed == SPEED_10))
+ mode = XGBE_MODE_SGMII_10;
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
mode = XGBE_MODE_SGMII_100;
else
mode = XGBE_MODE_SGMII_1000;
@@ -1803,6 +1886,9 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
if (phy_data->phydev &&
(phy_data->phydev->speed == SPEED_10000))
XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_2500))
+ XGBE_SET_ADV(dlks, 2500baseX_Full);
else
XGBE_SET_ADV(dlks, 1000baseKX_Full);
break;
@@ -1910,8 +1996,8 @@ static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata,
redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
redrv_val = (u16)mode;
- return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
- redrv_reg, redrv_val);
+ return pdata->hw_if.write_ext_mii_regs_c22(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
}
static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata,
@@ -1956,6 +2042,93 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
+#define MAX_RX_ADAPT_RETRIES 1
+#define XGBE_PMA_RX_VAL_SIG_MASK (XGBE_PMA_RX_SIG_DET_0_MASK | \
+ XGBE_PMA_RX_VALID_0_MASK)
+
+static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+
+ xgbe_phy_perform_ratechange(pdata,
+ mode == XGBE_MODE_KR ?
+ XGBE_MB_CMD_SET_10G_KR :
+ XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
+}
+
+static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* step 2: force PCS to send RX_ADAPT Req to PHY */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_ENABLE);
+
+ /* Step 3: Wait for RX_ADAPT ACK from the PHY */
+ msleep(200);
+
+ /* Software polls for coefficient update command (given by local PHY) */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_PHY_RX_EQ_CEU);
+
+ /* Clear the RX_AD_REQ bit */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_DISABLE);
+
+ /* Check if coefficient update command is set */
+ if ((reg & XGBE_PMA_CFF_UPDT_MASK) != XGBE_PMA_CFF_UPDT_MASK)
+ goto set_mode;
+
+ /* Step 4: Check for Block lock */
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS) {
+ /* If the block lock is found, update the helpers
+ * and declare the link up
+ */
+ netif_dbg(pdata, link, pdata->netdev, "Block_lock done");
+ pdata->rx_adapt_done = true;
+ pdata->mode_set = false;
+ return;
+ }
+
+set_mode:
+ xgbe_set_rx_adap_mode(pdata, phy_data->cur_mode);
+}
+
+static void xgbe_phy_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+rx_adapt_reinit:
+ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_LSTS,
+ XGBE_PMA_RX_VAL_SIG_MASK);
+
+ /* step 1: Check for RX_VALID && LF_SIGDET */
+ if ((reg & XGBE_PMA_RX_VAL_SIG_MASK) != XGBE_PMA_RX_VAL_SIG_MASK) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "RX_VALID or LF_SIGDET is unset, issue rrc");
+ xgbe_phy_rrc(pdata);
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+ goto rx_adapt_reinit;
+ }
+
+ /* perform rx adaptation */
+ xgbe_rx_adaptation(pdata);
+}
+
static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
{
int reg;
@@ -2021,7 +2194,7 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
- goto reenable_pll;
+ goto do_rx_adaptation;
usleep_range(1000, 2000);
}
@@ -2031,6 +2204,20 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
/* Reset on error */
xgbe_phy_rx_reset(pdata);
+ goto reenable_pll;
+
+do_rx_adaptation:
+ if (pdata->en_rx_adap && sub_cmd == XGBE_MB_SUBCMD_RX_ADAP &&
+ (cmd == XGBE_MB_CMD_SET_10G_KR || cmd == XGBE_MB_CMD_SET_10G_SFI)) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "Enabling RX adaptation\n");
+ pdata->mode_set = true;
+ xgbe_phy_rx_adaptation(pdata);
+ /* return from here to avoid enabling PLL ctrl
+ * during adaptation phase
+ */
+ return;
+ }
reenable_pll:
/* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
@@ -2059,6 +2246,31 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
}
+static bool enable_rx_adap(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
+ /* Rx-Adaptation is not supported on older platforms(< 0x30H) */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ if (ver < 0x30)
+ return false;
+
+ /* Re-driver models 4223 && 4227 do not support Rx-Adaptation */
+ if (phy_data->redrv &&
+ (phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4223 ||
+ phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4227))
+ return false;
+
+ /* 10G KR mode with AN does not support Rx-Adaptation */
+ if (mode == XGBE_MODE_KR &&
+ phy_data->port_mode != XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG)
+ return false;
+
+ pdata->en_rx_adap = 1;
+ return true;
+}
+
static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2067,7 +2279,12 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
/* 10G/SFI */
if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
+ pdata->en_rx_adap = 0;
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);
+ } else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
+ (enable_rx_adap(pdata, XGBE_MODE_SFI))) {
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
} else {
if (phy_data->sfp_cable_len <= 1)
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
@@ -2127,6 +2344,20 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "100MbE SGMII mode set\n");
}
+static void xgbe_phy_sgmii_10_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 10M/SGMII */
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_10MBITS);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_10;
+
+ netif_dbg(pdata, link, pdata->netdev, "10MbE SGMII mode set\n");
+}
+
static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2134,7 +2365,12 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 10G/KR */
- xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE);
+ if (enable_rx_adap(pdata, XGBE_MODE_KR))
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_RX_ADAP);
+ else
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_KR;
@@ -2185,12 +2421,15 @@ static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata)
return xgbe_phy_cur_mode(pdata);
switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_SGMII_10:
case XGBE_MODE_SGMII_100:
case XGBE_MODE_SGMII_1000:
return XGBE_MODE_KR;
+ case XGBE_MODE_KX_2500:
+ return XGBE_MODE_SGMII_1000;
case XGBE_MODE_KR:
default:
- return XGBE_MODE_SGMII_1000;
+ return XGBE_MODE_KX_2500;
}
}
@@ -2252,6 +2491,8 @@ static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
int speed)
{
switch (speed) {
+ case SPEED_10:
+ return XGBE_MODE_SGMII_10;
case SPEED_100:
return XGBE_MODE_SGMII_100;
case SPEED_1000:
@@ -2269,6 +2510,8 @@ static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data,
int speed)
{
switch (speed) {
+ case SPEED_10:
+ return XGBE_MODE_SGMII_10;
case SPEED_100:
return XGBE_MODE_SGMII_100;
case SPEED_1000:
@@ -2343,6 +2586,9 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
case XGBE_MODE_KR:
xgbe_phy_kr_mode(pdata);
break;
+ case XGBE_MODE_SGMII_10:
+ xgbe_phy_sgmii_10_mode(pdata);
+ break;
case XGBE_MODE_SGMII_100:
xgbe_phy_sgmii_100_mode(pdata);
break;
@@ -2399,6 +2645,9 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
switch (mode) {
+ case XGBE_MODE_SGMII_10:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10baseT_Full));
case XGBE_MODE_SGMII_100:
return xgbe_phy_check_mode(pdata, mode,
XGBE_ADV(lks, 100baseT_Full));
@@ -2428,6 +2677,11 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
return false;
return xgbe_phy_check_mode(pdata, mode,
XGBE_ADV(lks, 1000baseX_Full));
+ case XGBE_MODE_SGMII_10:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10baseT_Full));
case XGBE_MODE_SGMII_100:
if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
return false;
@@ -2520,15 +2774,23 @@ static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data,
}
}
-static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
+static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_prv_data *pdata,
int speed)
{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
switch (speed) {
+ case SPEED_10:
+ /* Supported in ver >= 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ return (ver >= 0x30) ? true : false;
case SPEED_100:
case SPEED_1000:
return true;
case SPEED_2500:
- return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
+ return ((phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T) ||
+ (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T));
case SPEED_10000:
return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
default:
@@ -2536,10 +2798,17 @@ static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
}
}
-static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data,
+static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_prv_data *pdata,
int speed)
{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
switch (speed) {
+ case SPEED_10:
+ /* Supported in ver >= 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ return (ver >= 0x30) && (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
case SPEED_100:
return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
case SPEED_1000:
@@ -2586,12 +2855,12 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
case XGBE_PORT_MODE_1000BASE_T:
case XGBE_PORT_MODE_NBASE_T:
case XGBE_PORT_MODE_10GBASE_T:
- return xgbe_phy_valid_speed_baset_mode(phy_data, speed);
+ return xgbe_phy_valid_speed_baset_mode(pdata, speed);
case XGBE_PORT_MODE_1000BASE_X:
case XGBE_PORT_MODE_10GBASE_R:
return xgbe_phy_valid_speed_basex_mode(phy_data, speed);
case XGBE_PORT_MODE_SFP:
- return xgbe_phy_valid_speed_sfp_mode(phy_data, speed);
+ return xgbe_phy_valid_speed_sfp_mode(pdata, speed);
default:
return false;
}
@@ -2614,8 +2883,11 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) {
+ if (pdata->en_rx_adap)
+ pdata->rx_adapt_done = false;
return 0;
+ }
}
if (phy_data->phydev) {
@@ -2637,7 +2909,29 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
*/
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- if (reg & MDIO_STAT1_LSTATUS)
+
+ if (pdata->en_rx_adap) {
+ /* if the link is available and adaptation is done,
+ * declare link up
+ */
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ return 1;
+ /* If either link is not available or adaptation is not done,
+ * retrigger the adaptation logic. (if the mode is not set,
+ * then issue mailbox command first)
+ */
+ if (pdata->mode_set) {
+ xgbe_phy_rx_adaptation(pdata);
+ } else {
+ pdata->rx_adapt_done = false;
+ xgbe_phy_set_mode(pdata, phy_data->cur_mode);
+ }
+
+ /* check again for the link and adaptation status */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ return 1;
+ } else if (reg & MDIO_STAT1_LSTATUS)
return 1;
if (pdata->phy.autoneg == AUTONEG_ENABLE &&
@@ -2862,6 +3156,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
+ /* 10 Mbps speed is not supported in ver < 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ if (ver < 0x30 && (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10))
+ return true;
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
@@ -2875,7 +3175,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_1000BASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000))
return false;
break;
@@ -2884,14 +3185,17 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_NBASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500))
return false;
break;
case XGBE_PORT_MODE_10GBASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
break;
@@ -2900,7 +3204,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_SFP:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
@@ -3269,6 +3574,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3299,6 +3608,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3321,6 +3634,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3329,6 +3646,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, 1000baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_1000;
}
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
+ XGBE_SET_SUP(lks, 2500baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
XGBE_SET_SUP(lks, 10000baseT_Full);
phy_data->start_mode = XGBE_MODE_KR;
@@ -3361,6 +3682,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
XGBE_SET_SUP(lks, FIBRE);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10)
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
phy_data->start_mode = XGBE_MODE_SGMII_100;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
@@ -3415,8 +3738,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
mii->priv = pdata;
mii->name = "amd-xgbe-mii";
- mii->read = xgbe_phy_mii_read;
- mii->write = xgbe_phy_mii_write;
+ mii->read = xgbe_phy_mii_read_c22;
+ mii->write = xgbe_phy_mii_write_c22;
+ mii->read_c45 = xgbe_phy_mii_read_c45;
+ mii->write_c45 = xgbe_phy_mii_write_c45;
mii->parent = pdata->dev;
mii->phy_mask = ~0;
snprintf(mii->id, sizeof(mii->id), "%s", dev_name(pdata->dev));
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 7a41367c437d..ad136ed493ed 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -294,6 +294,7 @@
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
@@ -595,6 +596,7 @@ enum xgbe_mode {
XGBE_MODE_KX_2500,
XGBE_MODE_KR,
XGBE_MODE_X,
+ XGBE_MODE_SGMII_10,
XGBE_MODE_SGMII_100,
XGBE_MODE_SGMII_1000,
XGBE_MODE_SFI,
@@ -623,6 +625,7 @@ enum xgbe_mb_cmd {
enum xgbe_mb_subcmd {
XGBE_MB_SUBCMD_NONE = 0,
+ XGBE_MB_SUBCMD_RX_ADAP,
/* 10GbE SFP subcommands */
XGBE_MB_SUBCMD_ACTIVE = 0,
@@ -774,8 +777,11 @@ struct xgbe_hw_if {
int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int,
enum xgbe_mdio_mode);
- int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int);
- int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, u16);
+ int (*read_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int);
+ int (*write_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int, u16);
+ int (*read_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int);
+ int (*write_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int,
+ u16);
int (*set_gpio)(struct xgbe_prv_data *, unsigned int);
int (*clr_gpio)(struct xgbe_prv_data *, unsigned int);
@@ -1311,6 +1317,10 @@ struct xgbe_prv_data {
bool debugfs_an_cdr_workaround;
bool debugfs_an_cdr_track_early;
+ bool en_rx_adap;
+ int rx_adapt_retries;
+ bool rx_adapt_done;
+ bool mode_set;
};
/* Function prototypes*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 77609dc0a08d..0b2a52199914 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -21,6 +21,7 @@
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <linux/filter.h>
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 06508eebb585..d6d6d5d37ff3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -384,6 +384,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
+ self->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
}
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index d30d11872719..306393f8eeca 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1905,7 +1905,6 @@ static void alx_remove(struct pci_dev *pdev)
free_netdev(alx->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int alx_suspend(struct device *dev)
{
struct alx_priv *alx = dev_get_drvdata(dev);
@@ -1951,12 +1950,7 @@ unlock:
return err;
}
-static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
-#define ALX_PM_OPS (&alx_pm_ops)
-#else
-#define ALX_PM_OPS NULL
-#endif
-
+static DEFINE_SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -2055,7 +2049,7 @@ static struct pci_driver alx_driver = {
.probe = alx_probe,
.remove = alx_remove,
.err_handler = &alx_err_handlers,
- .driver.pm = ALX_PM_OPS,
+ .driver.pm = pm_sleep_ptr(&alx_pm_ops),
};
module_pci_driver(alx_driver);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f4ca0c6c0f51..948586bf1b5b 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -213,6 +213,7 @@ config BNXT
select NET_DEVLINK
select PAGE_POOL
select DIMLIB
+ select AUXILIARY_BUS
help
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index b751dc8486dc..392ec09a1d8a 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -196,28 +196,6 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg,
return 0;
}
-static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
-{
- u32 val;
-
- bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
- (index << CAM_CTRL_INDEX_SHIFT)));
-
- b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
-
- val = br32(bp, B44_CAM_DATA_LO);
-
- data[2] = (val >> 24) & 0xFF;
- data[3] = (val >> 16) & 0xFF;
- data[4] = (val >> 8) & 0xFF;
- data[5] = (val >> 0) & 0xFF;
-
- val = br32(bp, B44_CAM_DATA_HI);
-
- data[0] = (val >> 8) & 0xFF;
- data[1] = (val >> 0) & 0xFF;
-}
-
static inline void __b44_cam_write(struct b44 *bp,
const unsigned char *data, int index)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 6c32f5c427b5..5d4b1f2ebeac 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2414,7 +2414,6 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
bnxt_queue_sp_work(bp);
async_event_process_exit:
- bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -5538,7 +5537,7 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
- if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ if (!vnic_id && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
@@ -13185,6 +13184,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
+ bnxt_rdma_aux_device_uninit(bp);
+
bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
@@ -13690,6 +13691,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG;
+
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
@@ -13780,11 +13784,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_dl_fw_reporters_create(bp);
+ bnxt_rdma_aux_device_init(bp);
+
bnxt_print_device_info(bp);
pci_save_state(pdev);
- return 0;
+ return 0;
init_err_cleanup:
bnxt_dl_unregister(bp);
init_err_dl:
@@ -13828,7 +13834,6 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
dev_close(dev);
- bnxt_ulp_shutdown(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5163ef4a49ea..dcb09fbe4007 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
#include <linux/crash_dump.h>
+#include <linux/auxiliary_bus.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/xdp.h>
@@ -1631,6 +1632,12 @@ struct bnxt_fw_health {
#define BNXT_FW_IF_RETRY 10
#define BNXT_FW_SLOT_RESET_RETRY 4
+struct bnxt_aux_priv {
+ struct auxiliary_device aux_dev;
+ struct bnxt_en_dev *edev;
+ int id;
+};
+
enum board_idx {
BCM57301,
BCM57302,
@@ -1852,6 +1859,7 @@ struct bnxt {
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+ struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
struct bnxt_napi **bnapi;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 26913dc816d3..8b3e7697390f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -1303,7 +1303,6 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
- devlink_set_features(dl, DEVLINK_F_RELOAD);
out:
devlink_register(dl);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a4cba7cb2783..3ed3a2b3b3a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -749,7 +749,6 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
*num_vfs = rc;
}
- bnxt_ulp_sriov_cfg(bp, *num_vfs);
return 0;
}
@@ -823,10 +822,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out2;
rc = pci_enable_sriov(bp->pdev, *num_vfs);
- if (rc) {
- bnxt_ulp_sriov_cfg(bp, 0);
+ if (rc)
goto err_out2;
- }
return 0;
@@ -872,8 +869,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
rtnl_unlock();
-
- bnxt_ulp_sriov_cfg(bp, 0);
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 2e54bf4fc7a7..d4cc9c371e7b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -19,89 +19,26 @@
#include <linux/irq.h>
#include <asm/byteorder.h>
#include <linux/bitmap.h>
+#include <linux/auxiliary_bus.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
-static int bnxt_register_dev(struct bnxt_en_dev *edev, unsigned int ulp_id,
- struct bnxt_ulp_ops *ulp_ops, void *handle)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
-
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
- if (rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
- return -EBUSY;
- }
- if (ulp_id == BNXT_ROCE_ULP) {
- unsigned int max_stat_ctxs;
-
- max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
- if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->cp_nr_rings == max_stat_ctxs)
- return -ENOMEM;
- }
-
- atomic_set(&ulp->ref_count, 0);
- ulp->handle = handle;
- rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
-
- if (ulp_id == BNXT_ROCE_ULP) {
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, 0);
- }
-
- return 0;
-}
-
-static int bnxt_unregister_dev(struct bnxt_en_dev *edev, unsigned int ulp_id)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
- int i = 0;
-
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
- if (!rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
- return -EINVAL;
- }
- if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
- edev->en_ops->bnxt_free_msix(edev, ulp_id);
-
- if (ulp->max_async_event_id)
- bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
-
- RCU_INIT_POINTER(ulp->ulp_ops, NULL);
- synchronize_rcu();
- ulp->max_async_event_id = 0;
- ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
- return 0;
-}
+static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
- num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ if (!edev->ulp_tbl->msix_requested) {
+ netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
+ return;
+ }
+ num_msix = edev->ulp_tbl->msix_requested;
+ idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
@@ -115,125 +52,95 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
}
}
-static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id,
- struct bnxt_msix_entry *ent, int num_msix)
+int bnxt_register_dev(struct bnxt_en_dev *edev,
+ struct bnxt_ulp_ops *ulp_ops,
+ void *handle)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
- struct bnxt_hw_resc *hw_resc;
- int max_idx, max_cp_rings;
- int avail_msix, idx;
- int total_vecs;
- int rc = 0;
-
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- return -ENODEV;
+ unsigned int max_stat_ctxs;
+ struct bnxt_ulp *ulp;
- if (edev->ulp_tbl[ulp_id].msix_requested)
- return -EAGAIN;
+ max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+ if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
+ bp->cp_nr_rings == max_stat_ctxs)
+ return -ENOMEM;
- max_cp_rings = bnxt_get_max_func_cp_rings(bp);
- avail_msix = bnxt_get_avail_msix(bp, num_msix);
- if (!avail_msix)
+ ulp = edev->ulp_tbl;
+ if (!ulp)
return -ENOMEM;
- if (avail_msix > num_msix)
- avail_msix = num_msix;
-
- if (BNXT_NEW_RM(bp)) {
- idx = bp->cp_nr_rings;
- } else {
- max_idx = min_t(int, bp->total_irqs, max_cp_rings);
- idx = max_idx - avail_msix;
- }
- edev->ulp_tbl[ulp_id].msix_base = idx;
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- hw_resc = &bp->hw_resc;
- total_vecs = idx + avail_msix;
- if (bp->total_irqs < total_vecs ||
- (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
- if (netif_running(dev)) {
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- } else {
- rc = bnxt_reserve_rings(bp, true);
- }
- }
- if (rc) {
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- return -EAGAIN;
- }
- if (BNXT_NEW_RM(bp)) {
- int resv_msix;
+ ulp->handle = handle;
+ rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
+
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_hwrm_vnic_cfg(bp, 0);
- resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
- avail_msix = min_t(int, resv_msix, avail_msix);
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- }
- bnxt_fill_msix_vecs(bp, ent);
+ bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return avail_msix;
+ return 0;
}
+EXPORT_SYMBOL(bnxt_register_dev);
-static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id)
+void bnxt_unregister_dev(struct bnxt_en_dev *edev)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+ int i = 0;
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
+ ulp = edev->ulp_tbl;
+ if (ulp->msix_requested)
+ edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
- return 0;
+ if (ulp->max_async_event_id)
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
- bnxt_close_nic(bp, true, false);
- bnxt_open_nic(bp, true, false);
+ RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+ synchronize_rcu();
+ ulp->max_async_event_id = 0;
+ ulp->async_events_bmap = NULL;
+ while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
+ msleep(100);
+ i++;
}
- return 0;
+ return;
}
+EXPORT_SYMBOL(bnxt_unregister_dev);
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_en_dev *edev = bp->edev;
+ u32 roce_msix = BNXT_VF(bp) ?
+ BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- }
- return 0;
+ return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
+ min_t(u32, roce_msix, num_online_cpus()) : 0);
}
int bnxt_get_ulp_msix_base(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ if (edev->ulp_tbl->msix_requested)
+ return edev->ulp_tbl->msix_base;
}
return 0;
}
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ if (edev->ulp_tbl->msix_requested)
return BNXT_MIN_ROCE_STAT_CTXS;
}
return 0;
}
-static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
+int bnxt_send_msg(struct bnxt_en_dev *edev,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
@@ -243,7 +150,7 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
u32 resp_len;
int rc;
- if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
+ if (bp->fw_reset_state)
return -EBUSY;
rc = hwrm_req_init(bp, req, 0 /* don't care */);
@@ -267,42 +174,36 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
hwrm_req_drop(bp, req);
return rc;
}
-
-static void bnxt_ulp_get(struct bnxt_ulp *ulp)
-{
- atomic_inc(&ulp->ref_count);
-}
-
-static void bnxt_ulp_put(struct bnxt_ulp *ulp)
-{
- atomic_dec(&ulp->ref_count);
-}
+EXPORT_SYMBOL(bnxt_send_msg);
void bnxt_ulp_stop(struct bnxt *bp)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
if (!edev)
return;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ if (aux_priv) {
+ struct auxiliary_device *adev;
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_stop)
- continue;
- ops->ulp_stop(ulp->handle);
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ struct auxiliary_driver *adrv;
+ pm_message_t pm = {};
+
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->suspend(adev, pm);
+ }
}
}
void bnxt_ulp_start(struct bnxt *bp, int err)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
if (!edev)
return;
@@ -312,58 +213,19 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_start)
- continue;
- ops->ulp_start(ulp->handle);
- }
-}
-
-void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
+ if (aux_priv) {
+ struct auxiliary_device *adev;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ struct auxiliary_driver *adrv;
- rcu_read_lock();
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_sriov_config) {
- rcu_read_unlock();
- continue;
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->resume(adev);
}
- bnxt_ulp_get(ulp);
- rcu_read_unlock();
- ops->ulp_sriov_config(ulp->handle, num_vfs);
- bnxt_ulp_put(ulp);
}
-}
-void bnxt_ulp_shutdown(struct bnxt *bp)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_shutdown)
- continue;
- ops->ulp_shutdown(ulp->handle);
- }
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
@@ -374,8 +236,8 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
if (!ulp->msix_requested)
return;
@@ -395,8 +257,8 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
struct bnxt_msix_entry *ent = NULL;
if (!ulp->msix_requested)
@@ -418,46 +280,15 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
-{
- u16 event_id = le16_to_cpu(cmpl->event_id);
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- rcu_read_lock();
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_async_notifier)
- continue;
- if (!ulp->async_events_bmap ||
- event_id > ulp->max_async_event_id)
- continue;
-
- /* Read max_async_event_id first before testing the bitmap. */
- smp_rmb();
- if (test_bit(event_id, ulp->async_events_bmap))
- ops->ulp_async_notifier(ulp->handle, cmpl);
- }
- rcu_read_unlock();
-}
-
-static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp_id,
- unsigned long *events_bmap, u16 max_id)
+int bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap,
+ u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
+ ulp = edev->ulp_tbl;
ulp->async_events_bmap = events_bmap;
/* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb();
@@ -465,38 +296,121 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
return 0;
}
+EXPORT_SYMBOL(bnxt_register_async_events);
-static const struct bnxt_en_ops bnxt_en_ops_tbl = {
- .bnxt_register_device = bnxt_register_dev,
- .bnxt_unregister_device = bnxt_unregister_dev,
- .bnxt_request_msix = bnxt_req_msix_vecs,
- .bnxt_free_msix = bnxt_free_msix_vecs,
- .bnxt_send_fw_msg = bnxt_send_msg,
- .bnxt_register_fw_async_events = bnxt_register_async_events,
-};
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
+{
+ struct bnxt_aux_priv *aux_priv;
+ struct auxiliary_device *adev;
+
+ /* Skip if no auxiliary device init was done. */
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ aux_priv = bp->aux_priv;
+ adev = &aux_priv->aux_dev;
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
+static void bnxt_aux_dev_release(struct device *dev)
{
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_en_dev *edev;
+ struct bnxt_aux_priv *aux_priv =
+ container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
+
+ ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv->edev->ulp_tbl);
+ kfree(aux_priv->edev);
+ kfree(aux_priv);
+}
+
+static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
+{
+ edev->net = bp->dev;
+ edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
- edev = bp->edev;
- if (!edev) {
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return ERR_PTR(-ENOMEM);
- edev->en_ops = &bnxt_en_ops_tbl;
- edev->net = dev;
- edev->pdev = bp->pdev;
- edev->l2_db_size = bp->db_size;
- edev->l2_db_size_nc = bp->db_size;
- bp->edev = edev;
- }
- edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
- return bp->edev;
+ if (bp->flags & BNXT_FLAG_VF)
+ edev->flags |= BNXT_EN_FLAG_VF;
+
+ edev->chip_num = bp->chip_num;
+ edev->hw_ring_stats_size = bp->hw_ring_stats_size;
+ edev->pf_port_id = bp->pf.port_id;
+ edev->en_state = bp->state;
+
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
+}
+
+void bnxt_rdma_aux_device_init(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ struct bnxt_aux_priv *aux_priv;
+ struct bnxt_en_dev *edev;
+ struct bnxt_ulp *ulp;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ bp->aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
+ if (!bp->aux_priv)
+ goto exit;
+
+ bp->aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
+ if (bp->aux_priv->id < 0) {
+ netdev_warn(bp->dev,
+ "ida alloc failed for ROCE auxiliary device\n");
+ kfree(bp->aux_priv);
+ goto exit;
+ }
+
+ aux_priv = bp->aux_priv;
+ aux_dev = &aux_priv->aux_dev;
+ aux_dev->id = aux_priv->id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &bp->pdev->dev;
+ aux_dev->dev.release = bnxt_aux_dev_release;
+
+ rc = auxiliary_device_init(aux_dev);
+ if (rc) {
+ ida_free(&bnxt_aux_dev_ids, bp->aux_priv->id);
+ kfree(bp->aux_priv);
+ goto exit;
+ }
+
+ /* From this point, all cleanup will happen via the .release callback &
+ * any error unwinding will need to include a call to
+ * auxiliary_device_uninit.
+ */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ goto aux_dev_uninit;
+
+ ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+ if (!ulp)
+ goto aux_dev_uninit;
+
+ edev->ulp_tbl = ulp;
+ aux_priv->edev = edev;
+ bp->edev = edev;
+ bnxt_set_edev_info(edev, bp);
+
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ netdev_warn(bp->dev,
+ "Failed to add auxiliary device for ROCE\n");
+ goto aux_dev_uninit;
+ }
+
+ return;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(aux_dev);
+exit:
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
}
-EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 42b50abc3e91..80cbc4b6130a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -15,6 +15,8 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
+#define BNXT_MAX_ROCE_MSIX 9
+#define BNXT_MAX_VF_ROCE_MSIX 2
struct hwrm_async_event_cmpl;
struct bnxt;
@@ -26,12 +28,6 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- /* async_notifier() cannot sleep (in BH context) */
- void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
- void (*ulp_stop)(void *);
- void (*ulp_start)(void *);
- void (*ulp_sriov_config)(void *, int);
- void (*ulp_shutdown)(void *);
void (*ulp_irq_stop)(void *);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -57,6 +53,7 @@ struct bnxt_ulp {
struct bnxt_en_dev {
struct net_device *net;
struct pci_dev *pdev;
+ struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
@@ -64,8 +61,10 @@ struct bnxt_en_dev {
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
- const struct bnxt_en_ops *en_ops;
- struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ #define BNXT_EN_FLAG_VF 0x10
+#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
+
+ struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
* bytes mapped by L2
* driver.
@@ -74,24 +73,19 @@ struct bnxt_en_dev {
* bytes mapped as non-
* cacheable.
*/
+ u16 chip_num;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ unsigned long en_state; /* Could be checked in
+ * RoCE driver suspend
+ * mode only. Will be
+ * updated in resume.
+ */
};
-struct bnxt_en_ops {
- int (*bnxt_register_device)(struct bnxt_en_dev *, unsigned int,
- struct bnxt_ulp_ops *, void *);
- int (*bnxt_unregister_device)(struct bnxt_en_dev *, unsigned int);
- int (*bnxt_request_msix)(struct bnxt_en_dev *, unsigned int,
- struct bnxt_msix_entry *, int);
- int (*bnxt_free_msix)(struct bnxt_en_dev *, unsigned int);
- int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, unsigned int,
- struct bnxt_fw_msg *);
- int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, unsigned int,
- unsigned long *, u16);
-};
-
-static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
- if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
+ if (edev && edev->ulp_tbl)
return true;
return false;
}
@@ -102,10 +96,15 @@ int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
-void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
-
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
+void bnxt_rdma_aux_device_init(struct bnxt *bp);
+int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
+ void *handle);
+void bnxt_unregister_dev(struct bnxt_en_dev *edev);
+int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
+int bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 36d5202c0aee..5843c93b1711 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -422,9 +422,11 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
+ xdp_features_set_redirect_target(dev, true);
} else {
int rx, tx;
+ xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
bnxt_get_max_rings(bp, &rx, &tx, true);
if (rx > 1) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 21973046b12b..d937daa8ee88 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2316,6 +2316,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
__func__, p_index, ring->c_index,
ring->read_ptr, dma_length_status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, dev, "oversized packet\n");
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index f55d9d9c01a8..3a4b6cb7b7b9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -77,14 +77,18 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts) {
device_set_wakeup_enable(kdev, 1);
/* Avoid unbalanced enable_irq_wake calls */
- if (priv->wol_irq_disabled)
+ if (priv->wol_irq_disabled) {
enable_irq_wake(priv->wol_irq);
+ enable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(kdev, 0);
/* Avoid unbalanced disable_irq_wake calls */
- if (!priv->wol_irq_disabled)
+ if (!priv->wol_irq_disabled) {
disable_irq_wake(priv->wol_irq);
+ disable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = true;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index b615176338b2..be042905ada2 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -176,15 +176,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- u32 reg;
-
- if (!GENET_IS_V5(priv)) {
- /* Speed settings are set in bcmgenet_mii_setup() */
- reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
- reg |= LED_ACT_SOURCE_MAC;
- bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
- }
-
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
@@ -217,6 +208,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
if (!phy_name) {
phy_name = "MoCA";
+ if (!GENET_IS_V5(priv))
+ port_ctrl |= LED_ACT_SOURCE_MAC;
bcmgenet_moca_phy_setup(priv);
}
break;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 9c410f93a103..14dfec4db8f9 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -768,8 +768,6 @@
#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
-#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
-
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
* and bitfields that are common across both devices, use macb_{read,write}l
@@ -819,11 +817,6 @@ struct macb_dma_desc_ptp {
u32 ts_1;
u32 ts_2;
};
-
-struct gem_tx_ts {
- struct sk_buff *skb;
- struct macb_dma_desc_ptp desc_ptp;
-};
#endif
/* DMA descriptor bitfields */
@@ -1224,12 +1217,6 @@ struct macb_queue {
void *rx_buffers;
struct napi_struct napi_rx;
struct queue_stats stats;
-
-#ifdef CONFIG_MACB_USE_HWSTAMP
- struct work_struct tx_ts_task;
- unsigned int tx_ts_head, tx_ts_tail;
- struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE];
-#endif
};
struct ethtool_rx_fs_item {
@@ -1340,14 +1327,14 @@ enum macb_bd_control {
void gem_ptp_init(struct net_device *ndev);
void gem_ptp_remove(struct net_device *ndev);
-int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des);
+void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
-static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
+static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
{
- if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED)
- return -ENOTSUPP;
+ if (bp->tstamp_config.tx_type == TSTAMP_DISABLED)
+ return;
- return gem_ptp_txstamp(queue, skb, desc);
+ gem_ptp_txstamp(bp, skb, desc);
}
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
@@ -1363,11 +1350,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
static inline void gem_ptp_init(struct net_device *ndev) { }
static inline void gem_ptp_remove(struct net_device *ndev) { }
-static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
-{
- return -1;
-}
-
+static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6cda31520c42..6e141a8bbf43 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -334,7 +334,7 @@ static int macb_mdio_wait_for_idle(struct macb *bp)
1, MACB_MDIO_TIMEOUT);
}
-static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
{
struct macb *bp = bus->priv;
int status;
@@ -347,35 +347,62 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (status < 0)
goto mdio_read_exit;
- if (regnum & MII_ADDR_C45) {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_ADDR)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(DATA, regnum & 0xFFFF)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
-
- status = macb_mdio_wait_for_idle(bp);
- if (status < 0)
- goto mdio_read_exit;
-
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
- } else {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
- | MACB_BF(RW, MACB_MAN_C22_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_C22_CODE)));
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+
+mdio_read_exit:
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
+}
+
+static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad,
+ int regnum)
+{
+ struct macb *bp = bus->priv;
+ int status;
+
+ status = pm_runtime_get_sync(&bp->pdev->dev);
+ if (status < 0) {
+ pm_runtime_put_noidle(&bp->pdev->dev);
+ goto mdio_pm_exit;
}
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
goto mdio_read_exit;
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
mdio_read_exit:
@@ -385,8 +412,8 @@ mdio_pm_exit:
return status;
}
-static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
struct macb *bp = bus->priv;
int status;
@@ -399,37 +426,63 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (status < 0)
goto mdio_write_exit;
- if (regnum & MII_ADDR_C45) {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_ADDR)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(DATA, regnum & 0xFFFF)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
-
- status = macb_mdio_wait_for_idle(bp);
- if (status < 0)
- goto mdio_write_exit;
-
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)
- | MACB_BF(DATA, value)));
- } else {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
- | MACB_BF(RW, MACB_MAN_C22_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_C22_CODE)
- | MACB_BF(DATA, value)));
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)
+ | MACB_BF(DATA, value)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+mdio_write_exit:
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
+}
+
+static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum,
+ u16 value)
+{
+ struct macb *bp = bus->priv;
+ int status;
+
+ status = pm_runtime_get_sync(&bp->pdev->dev);
+ if (status < 0) {
+ pm_runtime_put_noidle(&bp->pdev->dev);
+ goto mdio_pm_exit;
}
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
goto mdio_write_exit;
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)
+ | MACB_BF(DATA, value)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
mdio_write_exit:
pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
@@ -902,8 +955,10 @@ static int macb_mii_init(struct macb *bp)
}
bp->mii_bus->name = "MACB_mii_bus";
- bp->mii_bus->read = &macb_mdio_read;
- bp->mii_bus->write = &macb_mdio_write;
+ bp->mii_bus->read = &macb_mdio_read_c22;
+ bp->mii_bus->write = &macb_mdio_write_c22;
+ bp->mii_bus->read_c45 = &macb_mdio_read_c45;
+ bp->mii_bus->write_c45 = &macb_mdio_write_c45;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
@@ -1191,13 +1246,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
/* First, update TX stats if needed */
if (skb) {
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- !ptp_one_step_sync(skb) &&
- gem_ptp_do_txstamp(queue, skb, desc) == 0) {
- /* skb now belongs to timestamp buffer
- * and will be removed later
- */
- tx_skb->skb = NULL;
- }
+ !ptp_one_step_sync(skb))
+ gem_ptp_do_txstamp(bp, skb, desc);
+
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
@@ -2253,6 +2304,12 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (bp->hw_dma_cap & HW_DMA_CAP_PTP))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+#endif
+
is_lso = (skb_shinfo(skb)->gso_size != 0);
if (is_lso) {
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index e6cb20aaa76a..f962a95068a0 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -292,79 +292,39 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
}
}
-static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb,
- struct macb_dma_desc_ptp *desc_ptp)
+void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb,
+ struct macb_dma_desc *desc)
{
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec64 ts;
-
- gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb, &shhwtstamps);
-}
-
-int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
- struct macb_dma_desc *desc)
-{
- unsigned long tail = READ_ONCE(queue->tx_ts_tail);
- unsigned long head = queue->tx_ts_head;
struct macb_dma_desc_ptp *desc_ptp;
- struct gem_tx_ts *tx_timestamp;
-
- if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl))
- return -EINVAL;
+ struct timespec64 ts;
- if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
- return -ENOMEM;
+ if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) {
+ dev_warn_ratelimited(&bp->pdev->dev,
+ "Timestamp not set in TX BD as expected\n");
+ return;
+ }
- desc_ptp = macb_ptp_desc(queue->bp, desc);
+ desc_ptp = macb_ptp_desc(bp, desc);
/* Unlikely but check */
- if (!desc_ptp)
- return -EINVAL;
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_timestamp = &queue->tx_timestamps[head];
- tx_timestamp->skb = skb;
+ if (!desc_ptp) {
+ dev_warn_ratelimited(&bp->pdev->dev,
+ "Timestamp not supported in BD\n");
+ return;
+ }
+
/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
dma_rmb();
- tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
- tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
- /* move head */
- smp_store_release(&queue->tx_ts_head,
- (head + 1) & (PTP_TS_BUFFER_SIZE - 1));
-
- schedule_work(&queue->tx_ts_task);
- return 0;
-}
+ gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
-static void gem_tx_timestamp_flush(struct work_struct *work)
-{
- struct macb_queue *queue =
- container_of(work, struct macb_queue, tx_ts_task);
- unsigned long head, tail;
- struct gem_tx_ts *tx_ts;
-
- /* take current head */
- head = smp_load_acquire(&queue->tx_ts_head);
- tail = queue->tx_ts_tail;
-
- while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) {
- tx_ts = &queue->tx_timestamps[tail];
- gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
- /* cleanup */
- dev_kfree_skb_any(tx_ts->skb);
- /* remove old tail */
- smp_store_release(&queue->tx_ts_tail,
- (tail + 1) & (PTP_TS_BUFFER_SIZE - 1));
- tail = queue->tx_ts_tail;
- }
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
}
void gem_ptp_init(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct macb_queue *queue;
- unsigned int q;
bp->ptp_clock_info = gem_ptp_caps_template;
@@ -384,11 +344,6 @@ void gem_ptp_init(struct net_device *dev)
}
spin_lock_init(&bp->tsu_clk_lock);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue->tx_ts_head = 0;
- queue->tx_ts_tail = 0;
- INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
- }
gem_ptp_init_tsu(bp);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f2f95493ec89..8b25313c7f6b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2218,6 +2218,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+
/* MTU range: 64 - 9200 */
netdev->min_mtu = NIC_HW_MIN_FRS;
netdev->max_mtu = NIC_HW_MAX_FRS;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 9cbce1faab26..7db2403c4c9c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6490,21 +6490,21 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
-static int cxgb4_xfrm_add_state(struct xfrm_state *x)
+static int cxgb4_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct adapter *adap = netdev2adap(x->xso.dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
+ NL_SET_ERR_MSG_MOD(extack, "crypto uld critical resource is under use");
return -EBUSY;
}
ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack);
out_unlock:
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index be96f1dc0372..d4a862a9fd7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -4,7 +4,7 @@
#ifndef __CXGB4_TC_MQPRIO_H__
#define __CXGB4_TC_MQPRIO_H__
-#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index ca21794281d6..3731c93f8f95 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -80,7 +80,8 @@ static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
@@ -226,65 +227,66 @@ out:
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
int res = 0;
if (x->props.aalgo != SADB_AALG_NONE) {
- pr_debug("Cannot offload authenticated xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
return -EINVAL;
}
if (x->props.calgo != SADB_X_CALG_NONE) {
- pr_debug("Cannot offload compressed xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
return -EINVAL;
}
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
- pr_debug("Only IPv4/6 xfrm state offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm state offloaded");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
- pr_debug("Only transport and tunnel xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm offload");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
- pr_debug("Only ESP xfrm state offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state offloaded");
return -EINVAL;
}
if (x->encap) {
- pr_debug("Encapsulated xfrm state not offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state not offloaded");
return -EINVAL;
}
if (!x->aead) {
- pr_debug("Cannot offload xfrm states without aead\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128 &&
x->aead->alg_icv_len != 96) {
- pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 96b & 128b");
+ return -EINVAL;
}
if ((x->aead->alg_key_len != 128 + 32) &&
(x->aead->alg_key_len != 256 + 32)) {
- pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ NL_SET_ERR_MSG_MOD(extack, "cannot offload xfrm states with AEAD key length other than 128/256 bit");
return -EINVAL;
}
if (x->tfcpad) {
- pr_debug("Cannot offload xfrm states with tfc padding\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
return -EINVAL;
}
if (!x->geniv) {
- pr_debug("Cannot offload xfrm states without geniv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
return -EINVAL;
}
if (strcmp(x->geniv, "seqiv")) {
- pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
return -EINVAL;
}
if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- pr_debug("Unsupported xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
index b6e3b16623de..b98135f65eb7 100644
--- a/drivers/net/ethernet/engleder/Makefile
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_TSNEP) += tsnep.o
tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
- tsnep_rxnfc.o $(tsnep-y)
+ tsnep_rxnfc.o tsnep_xdp.o
tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index f93ba48bac3f..058c2bcf31a7 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -65,7 +65,11 @@ struct tsnep_tx_entry {
u32 properties;
- struct sk_buff *skb;
+ u32 type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
size_t len;
DEFINE_DMA_UNMAP_ADDR(dma);
};
@@ -78,8 +82,6 @@ struct tsnep_tx {
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
- /* TX ring lock */
- spinlock_t lock;
struct tsnep_tx_entry entry[TSNEP_RING_SIZE];
int write;
int read;
@@ -107,6 +109,7 @@ struct tsnep_rx {
struct tsnep_adapter *adapter;
void __iomem *addr;
int queue_index;
+ int tx_queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -123,6 +126,8 @@ struct tsnep_rx {
u32 dropped;
u32 multicast;
u32 alloc_failed;
+
+ struct xdp_rxq_info xdp_rxq;
};
struct tsnep_queue {
@@ -172,6 +177,8 @@ struct tsnep_adapter {
int rxnfc_count;
int rxnfc_max;
+ struct bpf_prog *xdp_prog;
+
int num_tx_queues;
struct tsnep_tx tx[TSNEP_MAX_QUEUES];
int num_rx_queues;
@@ -204,6 +211,9 @@ int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
struct ethtool_rxnfc *cmd);
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
+
#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
int tsnep_ethtool_get_test_count(void);
void tsnep_ethtool_get_test_strings(u8 *data);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 00e2108f2ca4..6982aaa928b5 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -26,9 +26,11 @@
#include <linux/etherdevice.h>
#include <linux/phy.h>
#include <linux/iopoll.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
-#define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
+#define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4)
#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -43,6 +45,14 @@
#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
+#define TSNEP_TX_TYPE_SKB BIT(0)
+#define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
+#define TSNEP_TX_TYPE_XDP_TX BIT(2)
+#define TSNEP_TX_TYPE_XDP_NDO BIT(3)
+
+#define TSNEP_XDP_TX BIT(0)
+#define TSNEP_XDP_REDIRECT BIT(1)
+
static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
{
iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
@@ -120,9 +130,6 @@ static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
u32 md;
int retval;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
md = ECM_MD_READ;
if (!adapter->suppress_preamble)
md |= ECM_MD_PREAMBLE;
@@ -144,9 +151,6 @@ static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
u32 md;
int retval;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
md = ECM_MD_WRITE;
if (!adapter->suppress_preamble)
md |= ECM_MD_PREAMBLE;
@@ -306,10 +310,12 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
struct tsnep_tx_entry *entry = &tx->entry[index];
entry->properties = 0;
+ /* xdpf is union with skb */
if (entry->skb) {
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
- if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
/* toggle user flag to prevent false acknowledge
@@ -378,15 +384,19 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
for (i = 0; i < count; i++) {
entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
- if (i == 0) {
+ if (!i) {
len = skb_headlen(skb);
dma = dma_map_single(dmadev, skb->data, len,
DMA_TO_DEVICE);
+
+ entry->type = TSNEP_TX_TYPE_SKB;
} else {
len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
dma = skb_frag_dma_map(dmadev,
&skb_shinfo(skb)->frags[i - 1],
0, len, DMA_TO_DEVICE);
+
+ entry->type = TSNEP_TX_TYPE_SKB_FRAG;
}
if (dma_mapping_error(dmadev, dma))
return -ENOMEM;
@@ -413,12 +423,13 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
if (entry->len) {
- if (i == 0)
+ if (entry->type & TSNEP_TX_TYPE_SKB)
dma_unmap_single(dmadev,
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
DMA_TO_DEVICE);
- else
+ else if (entry->type &
+ (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO))
dma_unmap_page(dmadev,
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
@@ -434,7 +445,6 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
- unsigned long flags;
int count = 1;
struct tsnep_tx_entry *entry;
int length;
@@ -444,16 +454,12 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
- spin_lock_irqsave(&tx->lock, flags);
-
if (tsnep_tx_desc_available(tx) < count) {
/* ring full, shall not happen because queue is stopped if full
* below
*/
netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
- spin_unlock_irqrestore(&tx->lock, flags);
-
return NETDEV_TX_BUSY;
}
@@ -468,10 +474,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
tx->dropped++;
- spin_unlock_irqrestore(&tx->lock, flags);
-
- netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
-
return NETDEV_TX_OK;
}
length = retval;
@@ -481,7 +483,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
- i == (count - 1));
+ i == count - 1);
tx->write = (tx->write + count) % TSNEP_RING_SIZE;
skb_tx_timestamp(skb);
@@ -496,23 +498,146 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
}
- spin_unlock_irqrestore(&tx->lock, flags);
-
return NETDEV_TX_OK;
}
+static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
+ struct skb_shared_info *shinfo, int count, u32 type)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct page *page;
+ skb_frag_t *frag;
+ unsigned int len;
+ int map_len = 0;
+ dma_addr_t dma;
+ void *data;
+ int i;
+
+ frag = NULL;
+ len = xdpf->len;
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
+ if (type & TSNEP_TX_TYPE_XDP_NDO) {
+ data = unlikely(frag) ? skb_frag_address(frag) :
+ xdpf->data;
+ dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->type = TSNEP_TX_TYPE_XDP_NDO;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag) :
+ virt_to_page(xdpf->data);
+ dma = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma += skb_frag_off(frag);
+ else
+ dma += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dmadev, dma, len,
+ DMA_BIDIRECTIONAL);
+
+ entry->type = TSNEP_TX_TYPE_XDP_TX;
+ }
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
+
+ if (i + 1 < count) {
+ frag = &shinfo->frags[i];
+ len = skb_frag_size(frag);
+ }
+ }
+
+ return map_len;
+}
+
+/* This function requires __netif_tx_lock is held by the caller. */
+static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf,
+ struct tsnep_tx *tx, u32 type)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct tsnep_tx_entry *entry;
+ int count, length, retval, i;
+
+ count = 1;
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ count += shinfo->nr_frags;
+
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count))
+ return false;
+
+ entry = &tx->entry[tx->write];
+ entry->xdpf = xdpf;
+
+ retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type);
+ if (retval < 0) {
+ tsnep_tx_unmap(tx, tx->write, count);
+ entry->xdpf = NULL;
+
+ tx->dropped++;
+
+ return false;
+ }
+ length = retval;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
+ i == count - 1);
+ tx->write = (tx->write + count) % TSNEP_RING_SIZE;
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ return true;
+}
+
+static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
+{
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+}
+
+static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
+ struct xdp_buff *xdp,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ bool xmit;
+
+ if (unlikely(!xdpf))
+ return false;
+
+ __netif_tx_lock(tx_nq, smp_processor_id());
+
+ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
+
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ if (xmit)
+ txq_trans_cond_update(tx_nq);
+
+ __netif_tx_unlock(tx_nq);
+
+ return xmit;
+}
+
static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
{
struct tsnep_tx_entry *entry;
struct netdev_queue *nq;
- unsigned long flags;
int budget = 128;
int length;
int count;
nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
-
- spin_lock_irqsave(&tx->lock, flags);
+ __netif_tx_lock(nq, smp_processor_id());
do {
if (tx->read == tx->write)
@@ -530,12 +655,17 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
dma_rmb();
count = 1;
- if (skb_shinfo(entry->skb)->nr_frags > 0)
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
+ else if (!(entry->type & TSNEP_TX_TYPE_SKB) &&
+ xdp_frame_has_frags(entry->xdpf))
+ count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags;
length = tsnep_tx_unmap(tx, tx->read, count);
- if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
@@ -555,7 +685,11 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
skb_tstamp_tx(entry->skb, &hwtstamps);
}
- napi_consume_skb(entry->skb, budget);
+ if (entry->type & TSNEP_TX_TYPE_SKB)
+ napi_consume_skb(entry->skb, napi_budget);
+ else
+ xdp_return_frame_rx_napi(entry->xdpf);
+ /* xdpf is union with skb */
entry->skb = NULL;
tx->read = (tx->read + count) % TSNEP_RING_SIZE;
@@ -571,18 +705,19 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
netif_tx_wake_queue(nq);
}
- spin_unlock_irqrestore(&tx->lock, flags);
+ __netif_tx_unlock(nq);
- return (budget != 0);
+ return budget != 0;
}
static bool tsnep_tx_pending(struct tsnep_tx *tx)
{
- unsigned long flags;
struct tsnep_tx_entry *entry;
+ struct netdev_queue *nq;
bool pending = false;
- spin_lock_irqsave(&tx->lock, flags);
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+ __netif_tx_lock(nq, smp_processor_id());
if (tx->read != tx->write) {
entry = &tx->entry[tx->read];
@@ -592,7 +727,7 @@ static bool tsnep_tx_pending(struct tsnep_tx *tx)
pending = true;
}
- spin_unlock_irqrestore(&tx->lock, flags);
+ __netif_tx_unlock(nq);
return pending;
}
@@ -618,8 +753,6 @@ static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
tx->owner_counter = 1;
tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
- spin_lock_init(&tx->lock);
-
return 0;
}
@@ -695,9 +828,9 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
pp_params.pool_size = TSNEP_RING_SIZE;
pp_params.nid = dev_to_node(dmadev);
pp_params.dev = dmadev;
- pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
- pp_params.offset = TSNEP_SKB_PAD;
+ pp_params.offset = TSNEP_RX_OFFSET;
rx->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx->page_pool)) {
retval = PTR_ERR(rx->page_pool);
@@ -732,7 +865,7 @@ static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
entry->page = page;
entry->len = TSNEP_MAX_RX_BUF_SIZE;
entry->dma = page_pool_get_dma_addr(entry->page);
- entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
}
static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
@@ -826,6 +959,62 @@ static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
return i;
}
+static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
+ struct xdp_buff *xdp, int *status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ unsigned int length;
+ unsigned int sync;
+ u32 act;
+
+ length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
+ sync = max(sync, length);
+
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
+ goto out_failure;
+ *status |= TSNEP_XDP_REDIRECT;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
+ sync, true);
+ return true;
+ }
+}
+
+static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ if (status & TSNEP_XDP_TX) {
+ __netif_tx_lock(tx_nq, smp_processor_id());
+ tsnep_xdp_xmit_flush(tx);
+ __netif_tx_unlock(tx_nq);
+ }
+
+ if (status & TSNEP_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
int length)
{
@@ -836,14 +1025,14 @@ static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
- __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN);
+ skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE);
+ __skb_put(skb, length - ETH_FCS_LEN);
if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
struct tsnep_rx_inline *rx_inline =
(struct tsnep_rx_inline *)(page_address(page) +
- TSNEP_SKB_PAD);
+ TSNEP_RX_OFFSET);
skb_shinfo(skb)->tx_flags |=
SKBTX_HW_TSTAMP_NETDEV;
@@ -861,15 +1050,28 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
int budget)
{
struct device *dmadev = rx->adapter->dmadev;
- int desc_available;
- int done = 0;
enum dma_data_direction dma_dir;
struct tsnep_rx_entry *entry;
+ struct netdev_queue *tx_nq;
+ struct bpf_prog *prog;
+ struct xdp_buff xdp;
struct sk_buff *skb;
+ struct tsnep_tx *tx;
+ int desc_available;
+ int xdp_status = 0;
+ int done = 0;
int length;
desc_available = tsnep_rx_desc_available(rx);
dma_dir = page_pool_get_dma_dir(rx->page_pool);
+ prog = READ_ONCE(rx->adapter->xdp_prog);
+ if (prog) {
+ tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
+ rx->tx_queue_index);
+ tx = &rx->adapter->tx[rx->tx_queue_index];
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
+ }
while (likely(done < budget) && (rx->read != rx->write)) {
entry = &rx->entry[rx->read];
@@ -903,21 +1105,47 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
*/
dma_rmb();
- prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
+ prefetch(page_address(entry->page) + TSNEP_RX_OFFSET);
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
- dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
- length, dma_dir);
+ dma_sync_single_range_for_cpu(dmadev, entry->dma,
+ TSNEP_RX_OFFSET, length, dma_dir);
+
+ /* RX metadata with timestamps is in front of actual data,
+ * subtract metadata size to get length of actual data and
+ * consider metadata size as offset of actual data during RX
+ * processing
+ */
+ length -= TSNEP_RX_INLINE_METADATA_SIZE;
rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
desc_available++;
+ if (prog) {
+ bool consume;
+
+ xdp_prepare_buff(&xdp, page_address(entry->page),
+ XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
+ length, false);
+
+ consume = tsnep_xdp_run_prog(rx, prog, &xdp,
+ &xdp_status, tx_nq, tx);
+ if (consume) {
+ rx->packets++;
+ rx->bytes += length;
+
+ entry->page = NULL;
+
+ continue;
+ }
+ }
+
skb = tsnep_build_skb(rx, entry->page, length);
if (skb) {
page_pool_release_page(rx->page_pool, entry->page);
rx->packets++;
- rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
+ rx->bytes += length;
if (skb->pkt_type == PACKET_MULTICAST)
rx->multicast++;
@@ -930,6 +1158,9 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
entry->page = NULL;
}
+ if (xdp_status)
+ tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
+
if (desc_available)
tsnep_rx_refill(rx, desc_available, false);
@@ -1086,17 +1317,73 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
memset(queue->name, 0, sizeof(queue->name));
}
+static void tsnep_queue_close(struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+
+ tsnep_free_irq(queue, first);
+
+ if (rx && xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+
+ netif_napi_del(&queue->napi);
+}
+
+static int tsnep_queue_open(struct tsnep_adapter *adapter,
+ struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+ struct tsnep_tx *tx = queue->tx;
+ int retval;
+
+ queue->adapter = adapter;
+
+ netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll);
+
+ if (rx) {
+ /* choose TX queue for XDP_TX */
+ if (tx)
+ rx->tx_queue_index = tx->queue_index;
+ else if (rx->queue_index < adapter->num_tx_queues)
+ rx->tx_queue_index = rx->queue_index;
+ else
+ rx->tx_queue_index = 0;
+
+ retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev,
+ rx->queue_index, queue->napi.napi_id);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->page_pool);
+ if (retval)
+ goto failed;
+ }
+
+ retval = tsnep_request_irq(queue, first);
+ if (retval) {
+ netif_err(adapter, drv, adapter->netdev,
+ "can't get assigned irq %d.\n", queue->irq);
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tsnep_queue_close(queue, first);
+
+ return retval;
+}
+
static int tsnep_netdev_open(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
- int i;
- void __iomem *addr;
int tx_queue_index = 0;
int rx_queue_index = 0;
- int retval;
+ void __iomem *addr;
+ int i, retval;
for (i = 0; i < adapter->num_queues; i++) {
- adapter->queue[i].adapter = adapter;
if (adapter->queue[i].tx) {
addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
retval = tsnep_tx_open(adapter, addr, tx_queue_index,
@@ -1107,21 +1394,16 @@ static int tsnep_netdev_open(struct net_device *netdev)
}
if (adapter->queue[i].rx) {
addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
- retval = tsnep_rx_open(adapter, addr,
- rx_queue_index,
+ retval = tsnep_rx_open(adapter, addr, rx_queue_index,
adapter->queue[i].rx);
if (retval)
goto failed;
rx_queue_index++;
}
- retval = tsnep_request_irq(&adapter->queue[i], i == 0);
- if (retval) {
- netif_err(adapter, drv, adapter->netdev,
- "can't get assigned irq %d.\n",
- adapter->queue[i].irq);
+ retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0);
+ if (retval)
goto failed;
- }
}
retval = netif_set_real_num_tx_queues(adapter->netdev,
@@ -1139,8 +1421,6 @@ static int tsnep_netdev_open(struct net_device *netdev)
goto phy_failed;
for (i = 0; i < adapter->num_queues; i++) {
- netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
- tsnep_poll);
napi_enable(&adapter->queue[i].napi);
tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
@@ -1150,10 +1430,9 @@ static int tsnep_netdev_open(struct net_device *netdev)
phy_failed:
tsnep_disable_irq(adapter, ECM_INT_LINK);
- tsnep_phy_close(adapter);
failed:
for (i = 0; i < adapter->num_queues; i++) {
- tsnep_free_irq(&adapter->queue[i], i == 0);
+ tsnep_queue_close(&adapter->queue[i], i == 0);
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
@@ -1175,9 +1454,8 @@ static int tsnep_netdev_close(struct net_device *netdev)
tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
napi_disable(&adapter->queue[i].napi);
- netif_napi_del(&adapter->queue[i].napi);
- tsnep_free_irq(&adapter->queue[i], i == 0);
+ tsnep_queue_close(&adapter->queue[i], i == 0);
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
@@ -1330,6 +1608,67 @@ static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
return ns_to_ktime(timestamp);
}
+static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu)
+{
+ if (cpu >= TSNEP_MAX_QUEUES)
+ cpu &= TSNEP_MAX_QUEUES - 1;
+
+ while (cpu >= adapter->num_tx_queues)
+ cpu -= adapter->num_tx_queues;
+
+ return &adapter->tx[cpu];
+}
+
+static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **xdp, u32 flags)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+ u32 cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct tsnep_tx *tx;
+ int nxmit;
+ bool xmit;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ tx = tsnep_xdp_get_tx(adapter, cpu);
+ nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock(nq, cpu);
+
+ for (nxmit = 0; nxmit < n; nxmit++) {
+ xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx,
+ TSNEP_TX_TYPE_XDP_NDO);
+ if (!xmit)
+ break;
+
+ /* avoid transmit queue timeout since we share it with the slow
+ * path
+ */
+ txq_trans_cond_update(nq);
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ tsnep_xdp_xmit_flush(tx);
+
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
static const struct net_device_ops tsnep_netdev_ops = {
.ndo_open = tsnep_netdev_open,
.ndo_stop = tsnep_netdev_close,
@@ -1341,6 +1680,8 @@ static const struct net_device_ops tsnep_netdev_ops = {
.ndo_set_features = tsnep_netdev_set_features,
.ndo_get_tstamp = tsnep_netdev_get_tstamp,
.ndo_setup_tc = tsnep_tc_setup,
+ .ndo_bpf = tsnep_netdev_bpf,
+ .ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
};
static int tsnep_mac_init(struct tsnep_adapter *adapter)
@@ -1585,6 +1926,10 @@ static int tsnep_probe(struct platform_device *pdev)
netdev->features = NETIF_F_SG;
netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c
index c4c6e1357317..d083e6684f12 100644
--- a/drivers/net/ethernet/engleder/tsnep_tc.c
+++ b/drivers/net/ethernet/engleder/tsnep_tc.c
@@ -403,12 +403,33 @@ static int tsnep_taprio(struct tsnep_adapter *adapter,
return 0;
}
+static int tsnep_tc_query_caps(struct tsnep_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return tsnep_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_TAPRIO:
return tsnep_taprio(adapter, type_data);
default:
diff --git a/drivers/net/ethernet/engleder/tsnep_xdp.c b/drivers/net/ethernet/engleder/tsnep_xdp.c
new file mode 100644
index 000000000000..4d14cb1fd772
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_xdp.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include <linux/if_vlan.h>
+#include <net/xdp_sock_drv.h>
+
+#include "tsnep.h"
+
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_prog *old_prog;
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 6c8c78018ce6..139fe66f8bcd 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -182,6 +182,12 @@ static int ftmac100_start_hw(struct ftmac100 *priv)
if (netdev->mtu > ETH_DATA_LEN)
maccr |= FTMAC100_MACCR_RX_FTL;
+ /* Add other bits as needed */
+ if (netdev->flags & IFF_PROMISC)
+ maccr |= FTMAC100_MACCR_RCV_ALL;
+ if (netdev->flags & IFF_ALLMULTI)
+ maccr |= FTMAC100_MACCR_RX_MULTIPKT;
+
iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 027fff9f7db0..9318a2554056 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -244,6 +244,10 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= net_dev->hw_features;
net_dev->vlan_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
if (is_valid_ether_addr(mac_addr)) {
memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
eth_hw_addr_set(net_dev, mac_addr);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 2e79d18fc3c7..a62cffaf6ff1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4596,6 +4596,12 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+ if (priv->dpni_attrs.wriop_version >= DPAA2_WRIOP_VERSION(3, 0, 0) &&
+ priv->dpni_attrs.num_queues <= 8)
+ net_dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
if (priv->dpni_attrs.vlan_filter_entries)
net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index cdc0ff89388a..9bc099cf3cb1 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,7 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
+config FSL_ENETC_CORE
+ tristate
+ help
+ This module supports common functionality between the PF and VF
+ drivers for the NXP ENETC controller.
+
+ If compiled as module (M), the module name is fsl-enetc-core.
+
config FSL_ENETC
tristate "ENETC PF driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
select PHYLINK
@@ -16,7 +25,8 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
select FSL_ENETC_MDIO
select PHYLINK
select DIMLIB
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index e0e8dfd13793..b13cbbabb2ea 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -1,14 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+obj-$(CONFIG_FSL_ENETC_CORE) += fsl-enetc-core.o
+fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-y := enetc_pf.o $(common-objs)
+fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
-fsl-enetc-vf-y := enetc_vf.o $(common-objs)
+fsl-enetc-vf-y := enetc_vf.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index e96449eedfb5..2fc712b24d12 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -11,14 +11,26 @@
#include <net/pkt_sched.h>
#include <net/tso.h>
+u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg)
+{
+ return enetc_port_rd(&si->hw, reg);
+}
+EXPORT_SYMBOL_GPL(enetc_port_mac_rd);
+
+void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
+{
+ enetc_port_wr(&si->hw, reg, val);
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val);
+}
+EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
- int i;
- for (i = 0; i < priv->num_rx_rings; i++)
- if (priv->rx_ring[i]->xdp.prog)
- return num_tx_rings - num_possible_cpus();
+ if (priv->xdp_prog)
+ return num_tx_rings - num_possible_cpus();
return num_tx_rings;
}
@@ -243,8 +255,8 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
if (udp)
val |= ENETC_PM0_SINGLE_STEP_CH;
- enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
- enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
+ enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP,
+ val);
} else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
@@ -651,6 +663,7 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
return enetc_start_xmit(skb, ndev);
}
+EXPORT_SYMBOL_GPL(enetc_xmit);
static irqreturn_t enetc_msix(int irq, void *data)
{
@@ -1305,6 +1318,10 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
xdp_tx_swbd->xdp_frame = NULL;
n++;
+
+ if (!xdp_frame_has_frags(xdp_frame))
+ goto out;
+
xdp_tx_swbd = &xdp_tx_arr[n];
shinfo = xdp_get_shared_info_from_frame(xdp_frame);
@@ -1334,7 +1351,7 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
n++;
xdp_tx_swbd = &xdp_tx_arr[n];
}
-
+out:
xdp_tx_arr[n - 1].is_eof = true;
xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
@@ -1384,22 +1401,19 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
return xdp_tx_frm_cnt;
}
+EXPORT_SYMBOL_GPL(enetc_xdp_xmit);
static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
struct xdp_buff *xdp_buff, u16 size)
{
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
- struct skb_shared_info *shinfo;
/* To be used for XDP_TX */
rx_swbd->len = size;
xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
rx_ring->buffer_offset, size, false);
-
- shinfo = xdp_get_shared_info_from_buff(xdp_buff);
- shinfo->nr_frags = 0;
}
static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
@@ -1407,11 +1421,23 @@ static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
- skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
+ skb_frag_t *frag;
/* To be used for XDP_TX */
rx_swbd->len = size;
+ if (!xdp_buff_has_frags(xdp_buff)) {
+ xdp_buff_set_frags_flag(xdp_buff);
+ shinfo->xdp_frags_size = size;
+ shinfo->nr_frags = 0;
+ } else {
+ shinfo->xdp_frags_size += size;
+ }
+
+ if (page_is_pfmemalloc(rx_swbd->page))
+ xdp_buff_set_frag_pfmemalloc(xdp_buff);
+
+ frag = &shinfo->frags[shinfo->nr_frags];
skb_frag_off_set(frag, rx_swbd->page_offset);
skb_frag_size_set(frag, size);
__skb_frag_set_page(frag, rx_swbd->page);
@@ -1584,20 +1610,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
- /* xdp_return_frame does not support S/G in the sense
- * that it leaks the fragments (__xdp_return should not
- * call page_frag_free only for the initial buffer).
- * Until XDP_REDIRECT gains support for S/G let's keep
- * the code structure in place, but dead. We drop the
- * S/G frames ourselves to avoid memory leaks which
- * would otherwise leave the kernel OOM.
- */
- if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
- enetc_xdp_drop(rx_ring, orig_i, i);
- rx_ring->stats.xdp_redirect_sg++;
- break;
- }
-
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
if (unlikely(err)) {
enetc_xdp_drop(rx_ring, orig_i, i);
@@ -1713,204 +1725,263 @@ void enetc_get_si_caps(struct enetc_si *si)
if (val & ENETC_SIPCAPR0_QBV)
si->hw_features |= ENETC_SI_F_QBV;
+ if (val & ENETC_SIPCAPR0_QBU)
+ si->hw_features |= ENETC_SI_F_QBU;
+
if (val & ENETC_SIPCAPR0_PSFP)
si->hw_features |= ENETC_SI_F_PSFP;
}
+EXPORT_SYMBOL_GPL(enetc_get_si_caps);
-static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res)
{
- r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
- &r->bd_dma_base, GFP_KERNEL);
- if (!r->bd_base)
+ size_t bd_base_size = res->bd_count * res->bd_size;
+
+ res->bd_base = dma_alloc_coherent(res->dev, bd_base_size,
+ &res->bd_dma_base, GFP_KERNEL);
+ if (!res->bd_base)
return -ENOMEM;
/* h/w requires 128B alignment */
- if (!IS_ALIGNED(r->bd_dma_base, 128)) {
- dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
- r->bd_dma_base);
+ if (!IS_ALIGNED(res->bd_dma_base, 128)) {
+ dma_free_coherent(res->dev, bd_base_size, res->bd_base,
+ res->bd_dma_base);
return -EINVAL;
}
return 0;
}
-static int enetc_alloc_txbdr(struct enetc_bdr *txr)
+static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res)
+{
+ size_t bd_base_size = res->bd_count * res->bd_size;
+
+ dma_free_coherent(res->dev, bd_base_size, res->bd_base,
+ res->bd_dma_base);
+}
+
+static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res,
+ struct device *dev, size_t bd_count)
{
int err;
- txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
- if (!txr->tx_swbd)
+ res->dev = dev;
+ res->bd_count = bd_count;
+ res->bd_size = sizeof(union enetc_tx_bd);
+
+ res->tx_swbd = vzalloc(bd_count * sizeof(*res->tx_swbd));
+ if (!res->tx_swbd)
return -ENOMEM;
- err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
+ err = enetc_dma_alloc_bdr(res);
if (err)
goto err_alloc_bdr;
- txr->tso_headers = dma_alloc_coherent(txr->dev,
- txr->bd_count * TSO_HEADER_SIZE,
- &txr->tso_headers_dma,
+ res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE,
+ &res->tso_headers_dma,
GFP_KERNEL);
- if (!txr->tso_headers) {
+ if (!res->tso_headers) {
err = -ENOMEM;
goto err_alloc_tso;
}
- txr->next_to_clean = 0;
- txr->next_to_use = 0;
-
return 0;
err_alloc_tso:
- dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
- txr->bd_base, txr->bd_dma_base);
- txr->bd_base = NULL;
+ enetc_dma_free_bdr(res);
err_alloc_bdr:
- vfree(txr->tx_swbd);
- txr->tx_swbd = NULL;
+ vfree(res->tx_swbd);
+ res->tx_swbd = NULL;
return err;
}
-static void enetc_free_txbdr(struct enetc_bdr *txr)
+static void enetc_free_tx_resource(const struct enetc_bdr_resource *res)
{
- int size, i;
-
- for (i = 0; i < txr->bd_count; i++)
- enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
-
- size = txr->bd_count * sizeof(union enetc_tx_bd);
-
- dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
- txr->tso_headers, txr->tso_headers_dma);
- txr->tso_headers = NULL;
-
- dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
- txr->bd_base = NULL;
-
- vfree(txr->tx_swbd);
- txr->tx_swbd = NULL;
+ dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE,
+ res->tso_headers, res->tso_headers_dma);
+ enetc_dma_free_bdr(res);
+ vfree(res->tx_swbd);
}
-static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
+static struct enetc_bdr_resource *
+enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
{
+ struct enetc_bdr_resource *tx_res;
int i, err;
+ tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL);
+ if (!tx_res)
+ return ERR_PTR(-ENOMEM);
+
for (i = 0; i < priv->num_tx_rings; i++) {
- err = enetc_alloc_txbdr(priv->tx_ring[i]);
+ struct enetc_bdr *tx_ring = priv->tx_ring[i];
+ err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev,
+ tx_ring->bd_count);
if (err)
goto fail;
}
- return 0;
+ return tx_res;
fail:
while (i-- > 0)
- enetc_free_txbdr(priv->tx_ring[i]);
+ enetc_free_tx_resource(&tx_res[i]);
- return err;
+ kfree(tx_res);
+
+ return ERR_PTR(err);
}
-static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
+static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res,
+ size_t num_resources)
{
- int i;
+ size_t i;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_free_txbdr(priv->tx_ring[i]);
+ for (i = 0; i < num_resources; i++)
+ enetc_free_tx_resource(&tx_res[i]);
+
+ kfree(tx_res);
}
-static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
+static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res,
+ struct device *dev, size_t bd_count,
+ bool extended)
{
- size_t size = sizeof(union enetc_rx_bd);
int err;
- rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
- if (!rxr->rx_swbd)
- return -ENOMEM;
-
+ res->dev = dev;
+ res->bd_count = bd_count;
+ res->bd_size = sizeof(union enetc_rx_bd);
if (extended)
- size *= 2;
+ res->bd_size *= 2;
+
+ res->rx_swbd = vzalloc(bd_count * sizeof(struct enetc_rx_swbd));
+ if (!res->rx_swbd)
+ return -ENOMEM;
- err = enetc_dma_alloc_bdr(rxr, size);
+ err = enetc_dma_alloc_bdr(res);
if (err) {
- vfree(rxr->rx_swbd);
+ vfree(res->rx_swbd);
return err;
}
- rxr->next_to_clean = 0;
- rxr->next_to_use = 0;
- rxr->next_to_alloc = 0;
- rxr->ext_en = extended;
-
return 0;
}
-static void enetc_free_rxbdr(struct enetc_bdr *rxr)
+static void enetc_free_rx_resource(const struct enetc_bdr_resource *res)
{
- int size;
-
- size = rxr->bd_count * sizeof(union enetc_rx_bd);
-
- dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
- rxr->bd_base = NULL;
-
- vfree(rxr->rx_swbd);
- rxr->rx_swbd = NULL;
+ enetc_dma_free_bdr(res);
+ vfree(res->rx_swbd);
}
-static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
+static struct enetc_bdr_resource *
+enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended)
{
- bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+ struct enetc_bdr_resource *rx_res;
int i, err;
+ rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL);
+ if (!rx_res)
+ return ERR_PTR(-ENOMEM);
+
for (i = 0; i < priv->num_rx_rings; i++) {
- err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
+ struct enetc_bdr *rx_ring = priv->rx_ring[i];
+ err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev,
+ rx_ring->bd_count, extended);
if (err)
goto fail;
}
- return 0;
+ return rx_res;
fail:
while (i-- > 0)
- enetc_free_rxbdr(priv->rx_ring[i]);
+ enetc_free_rx_resource(&rx_res[i]);
- return err;
+ kfree(rx_res);
+
+ return ERR_PTR(err);
}
-static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
+static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res,
+ size_t num_resources)
+{
+ size_t i;
+
+ for (i = 0; i < num_resources; i++)
+ enetc_free_rx_resource(&rx_res[i]);
+
+ kfree(rx_res);
+}
+
+static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring,
+ const struct enetc_bdr_resource *res)
+{
+ tx_ring->bd_base = res ? res->bd_base : NULL;
+ tx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
+ tx_ring->tx_swbd = res ? res->tx_swbd : NULL;
+ tx_ring->tso_headers = res ? res->tso_headers : NULL;
+ tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0;
+}
+
+static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring,
+ const struct enetc_bdr_resource *res)
+{
+ rx_ring->bd_base = res ? res->bd_base : NULL;
+ rx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
+ rx_ring->rx_swbd = res ? res->rx_swbd : NULL;
+}
+
+static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv,
+ const struct enetc_bdr_resource *res)
{
int i;
- for (i = 0; i < priv->num_rx_rings; i++)
- enetc_free_rxbdr(priv->rx_ring[i]);
+ if (priv->tx_res)
+ enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings);
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ enetc_assign_tx_resource(priv->tx_ring[i],
+ res ? &res[i] : NULL);
+ }
+
+ priv->tx_res = res;
}
-static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv,
+ const struct enetc_bdr_resource *res)
{
int i;
- if (!tx_ring->tx_swbd)
- return;
+ if (priv->rx_res)
+ enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings);
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ enetc_assign_rx_resource(priv->rx_ring[i],
+ res ? &res[i] : NULL);
+ }
+
+ priv->rx_res = res;
+}
+
+static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+{
+ int i;
for (i = 0; i < tx_ring->bd_count; i++) {
struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
enetc_free_tx_frame(tx_ring, tx_swbd);
}
-
- tx_ring->next_to_clean = 0;
- tx_ring->next_to_use = 0;
}
static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
{
int i;
- if (!rx_ring->rx_swbd)
- return;
-
for (i = 0; i < rx_ring->bd_count; i++) {
struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
@@ -1922,10 +1993,6 @@ static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
__free_page(rx_swbd->page);
rx_swbd->page = NULL;
}
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
- rx_ring->next_to_alloc = 0;
}
static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
@@ -1980,6 +2047,7 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_configure_si);
void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
{
@@ -1999,6 +2067,7 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
priv->tx_ictt = ENETC_TXIC_TIMETHR;
}
+EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
{
@@ -2011,11 +2080,13 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_alloc_si_resources);
void enetc_free_si_resources(struct enetc_ndev_priv *priv)
{
kfree(priv->cls_rules);
}
+EXPORT_SYMBOL_GPL(enetc_free_si_resources);
static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
@@ -2039,7 +2110,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
/* enable Tx ints by setting pkt thr to 1 */
enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
- tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio);
+ tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio);
if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tbmr |= ENETC_TBMR_VIH;
@@ -2051,10 +2122,11 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
tx_ring->idr = hw->reg + ENETC_SITXIDR;
}
-static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
+ bool extended)
{
int idx = rx_ring->index;
- u32 rbmr;
+ u32 rbmr = 0;
enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
lower_32_bits(rx_ring->bd_dma_base));
@@ -2081,8 +2153,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
/* enable Rx ints by setting pkt thr to 1 */
enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
- rbmr = ENETC_RBMR_EN;
-
+ rx_ring->ext_en = extended;
if (rx_ring->ext_en)
rbmr |= ENETC_RBMR_BDS;
@@ -2092,15 +2163,18 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_alloc = 0;
+
enetc_lock_mdio();
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
enetc_unlock_mdio();
- /* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}
-static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
@@ -2109,10 +2183,42 @@ static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
enetc_setup_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_setup_rxbdr(hw, priv->rx_ring[i]);
+ enetc_setup_rxbdr(hw, priv->rx_ring[i], extended);
+}
+
+static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int idx = tx_ring->index;
+ u32 tbmr;
+
+ tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
+ tbmr |= ENETC_TBMR_EN;
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+}
+
+static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+ u32 rbmr;
+
+ rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
+ rbmr |= ENETC_RBMR_EN;
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+}
+
+static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_enable_txbdr(hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}
-static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
int idx = rx_ring->index;
@@ -2120,13 +2226,30 @@ static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
}
-static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
- int delay = 8, timeout = 100;
- int idx = tx_ring->index;
+ int idx = rx_ring->index;
/* disable EN bit on ring */
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+}
+
+static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_disable_txbdr(hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_disable_rxbdr(hw, priv->rx_ring[i]);
+}
+
+static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int delay = 8, timeout = 100;
+ int idx = tx_ring->index;
/* wait for busy to clear */
while (delay < timeout &&
@@ -2140,18 +2263,13 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
idx);
}
-static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_wait_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_clear_txbdr(hw, priv->tx_ring[i]);
-
- for (i = 0; i < priv->num_rx_rings; i++)
- enetc_clear_rxbdr(hw, priv->rx_ring[i]);
-
- udelay(1);
+ enetc_wait_txbdr(hw, priv->tx_ring[i]);
}
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
@@ -2267,8 +2385,11 @@ static int enetc_phylink_connect(struct net_device *ndev)
struct ethtool_eee edata;
int err;
- if (!priv->phylink)
- return 0; /* phy-less mode */
+ if (!priv->phylink) {
+ /* phy-less mode */
+ netif_carrier_on(ndev);
+ return 0;
+ }
err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
if (err) {
@@ -2280,6 +2401,8 @@ static int enetc_phylink_connect(struct net_device *ndev)
memset(&edata, 0, sizeof(struct ethtool_eee));
phylink_ethtool_set_eee(priv->phylink, &edata);
+ phylink_start(priv->phylink);
+
return 0;
}
@@ -2321,20 +2444,21 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
- if (priv->phylink)
- phylink_start(priv->phylink);
- else
- netif_carrier_on(ndev);
+ enetc_enable_bdrs(priv);
netif_tx_start_all_queues(ndev);
}
+EXPORT_SYMBOL_GPL(enetc_start);
int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int num_stack_tx_queues;
+ struct enetc_bdr_resource *tx_res, *rx_res;
+ bool extended;
int err;
+ extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+
err = enetc_setup_irqs(priv);
if (err)
return err;
@@ -2343,34 +2467,28 @@ int enetc_open(struct net_device *ndev)
if (err)
goto err_phy_connect;
- err = enetc_alloc_tx_resources(priv);
- if (err)
+ tx_res = enetc_alloc_tx_resources(priv);
+ if (IS_ERR(tx_res)) {
+ err = PTR_ERR(tx_res);
goto err_alloc_tx;
+ }
- err = enetc_alloc_rx_resources(priv);
- if (err)
+ rx_res = enetc_alloc_rx_resources(priv, extended);
+ if (IS_ERR(rx_res)) {
+ err = PTR_ERR(rx_res);
goto err_alloc_rx;
-
- num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
-
- err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
- if (err)
- goto err_set_queues;
+ }
enetc_tx_onestep_tstamp_init(priv);
- enetc_setup_bdrs(priv);
+ enetc_assign_tx_resources(priv, tx_res);
+ enetc_assign_rx_resources(priv, rx_res);
+ enetc_setup_bdrs(priv, extended);
enetc_start(ndev);
return 0;
-err_set_queues:
- enetc_free_rx_resources(priv);
err_alloc_rx:
- enetc_free_tx_resources(priv);
+ enetc_free_tx_resources(tx_res, priv->num_tx_rings);
err_alloc_tx:
if (priv->phylink)
phylink_disconnect_phy(priv->phylink);
@@ -2379,6 +2497,7 @@ err_phy_connect:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_open);
void enetc_stop(struct net_device *ndev)
{
@@ -2387,6 +2506,8 @@ void enetc_stop(struct net_device *ndev)
netif_tx_stop_all_queues(ndev);
+ enetc_disable_bdrs(priv);
+
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
ENETC_BDR_INT_BASE_IDX + i);
@@ -2396,104 +2517,205 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
- if (priv->phylink)
- phylink_stop(priv->phylink);
- else
- netif_carrier_off(ndev);
+ enetc_wait_bdrs(priv);
enetc_clear_interrupts(priv);
}
+EXPORT_SYMBOL_GPL(enetc_stop);
int enetc_close(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
enetc_stop(ndev);
- enetc_clear_bdrs(priv);
- if (priv->phylink)
+ if (priv->phylink) {
+ phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
+ } else {
+ netif_carrier_off(ndev);
+ }
+
enetc_free_rxtx_rings(priv);
- enetc_free_rx_resources(priv);
- enetc_free_tx_resources(priv);
+
+ /* Avoids dangling pointers and also frees old resources */
+ enetc_assign_rx_resources(priv, NULL);
+ enetc_assign_tx_resources(priv, NULL);
+
enetc_free_irqs(priv);
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_close);
-int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
+ int (*cb)(struct enetc_ndev_priv *priv, void *ctx),
+ void *ctx)
+{
+ struct enetc_bdr_resource *tx_res, *rx_res;
+ int err;
+
+ ASSERT_RTNL();
+
+ /* If the interface is down, run the callback right away,
+ * without reconfiguration.
+ */
+ if (!netif_running(priv->ndev)) {
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+ }
+
+ tx_res = enetc_alloc_tx_resources(priv);
+ if (IS_ERR(tx_res)) {
+ err = PTR_ERR(tx_res);
+ goto out;
+ }
+
+ rx_res = enetc_alloc_rx_resources(priv, extended);
+ if (IS_ERR(rx_res)) {
+ err = PTR_ERR(rx_res);
+ goto out_free_tx_res;
+ }
+
+ enetc_stop(priv->ndev);
+ enetc_free_rxtx_rings(priv);
+
+ /* Interface is down, run optional callback now */
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ goto out_restart;
+ }
+
+ enetc_assign_tx_resources(priv, tx_res);
+ enetc_assign_rx_resources(priv, rx_res);
+ enetc_setup_bdrs(priv, extended);
+ enetc_start(priv->ndev);
+
+ return 0;
+
+out_restart:
+ enetc_setup_bdrs(priv, extended);
+ enetc_start(priv->ndev);
+ enetc_free_rx_resources(rx_res, priv->num_rx_rings);
+out_free_tx_res:
+ enetc_free_tx_resources(tx_res, priv->num_tx_rings);
+out:
+ return err;
+}
+
+static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i,
+ priv->tx_ring[i]->prio);
+}
+
+static void enetc_reset_tc_mqprio(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct tc_mqprio_qopt *mqprio = type_data;
struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int num_stack_tx_queues;
- u8 num_tc;
int i;
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
- mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = mqprio->num_tc;
- if (!num_tc) {
- netdev_reset_tc(ndev);
- netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ priv->min_num_stack_tx_queues = num_possible_cpus();
- /* Reset all ring priorities to 0 */
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = 0;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
- }
+ /* Reset all ring priorities to 0 */
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+
+ enetc_debug_tx_ring_prios(priv);
+}
+
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
+ int num_stack_tx_queues = 0;
+ u8 num_tc = mqprio->num_tc;
+ struct enetc_bdr *tx_ring;
+ int offset, count;
+ int err, tc, q;
+ if (!num_tc) {
+ enetc_reset_tc_mqprio(ndev);
return 0;
}
- /* Check if we have enough BD rings available to accommodate all TCs */
- if (num_tc > num_stack_tx_queues) {
- netdev_err(ndev, "Max %d traffic classes supported\n",
- priv->num_tx_rings);
- return -EINVAL;
- }
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
- /* For the moment, we use only one BD ring per TC.
- *
- * Configure num_tc BD rings with increasing priorities.
- */
- for (i = 0; i < num_tc; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = i;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ for (tc = 0; tc < num_tc; tc++) {
+ offset = mqprio->offset[tc];
+ count = mqprio->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+
+ for (q = offset; q < offset + count; q++) {
+ tx_ring = priv->tx_ring[q];
+ /* The prio_tc_map is skb_tx_hash()'s way of selecting
+ * between TX queues based on skb->priority. As such,
+ * there's nothing to offload based on it.
+ * Make the mqprio "traffic class" be the priority of
+ * this ring group, and leave the Tx IPV to traffic
+ * class mapping as its default mapping value of 1:1.
+ */
+ tx_ring->prio = tc;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
}
- /* Reset the number of netdev queues based on the TC count */
- netif_set_real_num_tx_queues(ndev, num_tc);
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
- netdev_set_num_tc(ndev, num_tc);
+ priv->min_num_stack_tx_queues = num_stack_tx_queues;
- /* Each TC is associated with one netdev queue */
- for (i = 0; i < num_tc; i++)
- netdev_set_tc_queue(ndev, i, 1, i);
+ enetc_debug_tx_ring_prios(priv);
return 0;
+
+err_reset_tc:
+ enetc_reset_tc_mqprio(ndev);
+ return err;
}
+EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
-static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
- struct netlink_ext_ack *extack)
+static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
{
- struct enetc_ndev_priv *priv = netdev_priv(dev);
- struct bpf_prog *old_prog;
- bool is_up;
- int i;
-
- /* The buffer layout is changing, so we need to drain the old
- * RX buffers and seed new ones.
- */
- is_up = netif_running(dev);
- if (is_up)
- dev_close(dev);
+ struct bpf_prog *old_prog, *prog = ctx;
+ int num_stack_tx_queues;
+ int err, i;
old_prog = xchg(&priv->xdp_prog, prog);
+
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err) {
+ xchg(&priv->xdp_prog, old_prog);
+ return err;
+ }
+
if (old_prog)
bpf_prog_put(old_prog);
@@ -2508,23 +2730,46 @@ static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
rx_ring->buffer_offset = ENETC_RXB_PAD;
}
- if (is_up)
- return dev_open(dev, extack);
-
return 0;
}
-int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
+static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
- switch (xdp->command) {
+ int num_xdp_tx_queues = prog ? num_possible_cpus() : 0;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ bool extended;
+
+ if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
+ priv->num_tx_rings) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
+ num_xdp_tx_queues,
+ priv->min_num_stack_tx_queues,
+ priv->num_tx_rings);
+ return -EBUSY;
+ }
+
+ extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+
+ /* The buffer layout is changing, so we need to drain the old
+ * RX buffers and seed new ones.
+ */
+ return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog);
+}
+
+int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
case XDP_SETUP_PROG:
- return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
+ return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack);
default:
return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_setup_bpf);
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
@@ -2556,6 +2801,7 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
return stats;
}
+EXPORT_SYMBOL_GPL(enetc_get_stats);
static int enetc_set_rss(struct net_device *ndev, int en)
{
@@ -2608,48 +2854,53 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
}
+EXPORT_SYMBOL_GPL(enetc_set_features);
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err, new_offloads = priv->active_offloads;
struct hwtstamp_config config;
- int ao;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
case HWTSTAMP_TX_ON:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
- priv->active_offloads |= ENETC_F_TX_TSTAMP;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
- priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
default:
return -ERANGE;
}
- ao = priv->active_offloads;
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
+ new_offloads &= ~ENETC_F_RX_TSTAMP;
break;
default:
- priv->active_offloads |= ENETC_F_RX_TSTAMP;
+ new_offloads |= ENETC_F_RX_TSTAMP;
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
- if (netif_running(ndev) && ao != priv->active_offloads) {
- enetc_close(ndev);
- enetc_open(ndev);
+ if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
+ bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP);
+
+ err = enetc_reconfigure(priv, extended, NULL, NULL);
+ if (err)
+ return err;
}
+ priv->active_offloads = new_offloads;
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -2691,10 +2942,12 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
return phylink_mii_ioctl(priv->phylink, rq, cmd);
}
+EXPORT_SYMBOL_GPL(enetc_ioctl);
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ int num_stack_tx_queues;
int first_xdp_tx_ring;
int i, n, err, nvec;
int v_tx_rings;
@@ -2771,6 +3024,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
}
}
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err)
+ goto fail;
+
+ err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
+ if (err)
+ goto fail;
+
+ priv->min_num_stack_tx_queues = num_possible_cpus();
first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
@@ -2792,6 +3056,7 @@ fail:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_alloc_msix);
void enetc_free_msix(struct enetc_ndev_priv *priv)
{
@@ -2821,6 +3086,7 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
/* disable all MSIX for this device */
pci_free_irq_vectors(priv->si->pdev);
}
+EXPORT_SYMBOL_GPL(enetc_free_msix);
static void enetc_kfree_si(struct enetc_si *si)
{
@@ -2910,6 +3176,7 @@ err_dma:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_pci_probe);
void enetc_pci_remove(struct pci_dev *pdev)
{
@@ -2921,3 +3188,6 @@ void enetc_pci_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
+EXPORT_SYMBOL_GPL(enetc_pci_remove);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index c6d8cc15c270..8010f31cd10d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -70,7 +70,6 @@ struct enetc_ring_stats {
unsigned int xdp_tx_drops;
unsigned int xdp_redirect;
unsigned int xdp_redirect_failures;
- unsigned int xdp_redirect_sg;
unsigned int recycles;
unsigned int recycle_failures;
unsigned int win_drop;
@@ -86,6 +85,23 @@ struct enetc_xdp_data {
#define ENETC_TX_RING_DEFAULT_SIZE 2048
#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2)
+struct enetc_bdr_resource {
+ /* Input arguments saved for teardown */
+ struct device *dev; /* for DMA mapping */
+ size_t bd_count;
+ size_t bd_size;
+
+ /* Resource proper */
+ void *bd_base; /* points to Rx or Tx BD ring */
+ dma_addr_t bd_dma_base;
+ union {
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_rx_swbd *rx_swbd;
+ };
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
+};
+
struct enetc_bdr {
struct device *dev; /* for DMA mapping */
struct net_device *ndev;
@@ -213,8 +229,9 @@ enum enetc_errata {
ENETC_ERR_UCMCSWP = BIT(1),
};
-#define ENETC_SI_F_QBV BIT(0)
-#define ENETC_SI_F_PSFP BIT(1)
+#define ENETC_SI_F_PSFP BIT(0)
+#define ENETC_SI_F_QBV BIT(1)
+#define ENETC_SI_F_QBU BIT(2)
/* PCI IEP device data */
struct enetc_si {
@@ -297,7 +314,6 @@ struct psfp_cap {
};
#define ENETC_F_TX_TSTAMP_MASK 0xff
-/* TODO: more hardware offloads */
enum enetc_active_offloads {
/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
ENETC_F_TX_TSTAMP = BIT(0),
@@ -306,6 +322,7 @@ enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(8),
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
+ ENETC_F_QBU = BIT(11),
};
enum enetc_flags_bit {
@@ -345,11 +362,16 @@ struct enetc_ndev_priv {
struct enetc_bdr **xdp_tx_ring;
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
+ const struct enetc_bdr_resource *tx_res;
+ const struct enetc_bdr_resource *rx_res;
struct enetc_cls_rule *cls_rules;
struct psfp_cap psfp_cap;
+ /* Minimum number of TX queues required by the network stack */
+ unsigned int min_num_stack_tx_queues;
+
struct phylink *phylink;
int ic_mode;
u32 tx_ictt;
@@ -360,6 +382,11 @@ struct enetc_ndev_priv {
struct work_struct tx_onestep_tstamp;
struct sk_buff_head tx_skbs;
+
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates
+ */
+ struct mutex mm_lock;
};
/* Messaging */
@@ -378,6 +405,8 @@ struct enetc_msg_cmd_set_primary_mac {
extern int enetc_phc_index;
/* SI common */
+u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
+void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
void enetc_pci_remove(struct pci_dev *pdev);
int enetc_alloc_msix(struct enetc_ndev_priv *priv);
@@ -397,12 +426,13 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev);
void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
-int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
+int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
/* control buffer descriptor ring (CBDR) */
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index af68dc46a795..20bfdf7fb4b4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -44,6 +44,7 @@ int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_setup_cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
{
@@ -57,6 +58,7 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
cbdr->bd_base = NULL;
cbdr->dma_dev = NULL;
}
+EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
@@ -127,6 +129,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_send_cmd);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
{
@@ -140,6 +143,7 @@ int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
return enetc_send_cmd(si, &cbd);
}
+EXPORT_SYMBOL_GPL(enetc_clear_mac_flt_entry);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map)
@@ -165,6 +169,7 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
return enetc_send_cmd(si, &cbd);
}
+EXPORT_SYMBOL_GPL(enetc_set_mac_flt_entry);
/* Set entry in RFS table */
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
@@ -197,6 +202,7 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
return err;
}
+EXPORT_SYMBOL_GPL(enetc_set_fs_entry);
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
@@ -242,9 +248,11 @@ int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
{
return enetc_cmd_rss_table(si, table, count, true);
}
+EXPORT_SYMBOL_GPL(enetc_get_rss_table);
/* Set RSS table */
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
{
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
+EXPORT_SYMBOL_GPL(enetc_set_rss_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index c8369e3752b0..bca68edfbe9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
+#include <linux/ethtool_netlink.h>
#include <linux/net_tstamp.h>
#include <linux/module.h>
#include "enetc.h"
@@ -197,7 +198,6 @@ static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
"Rx ring %2d recycle failures",
"Rx ring %2d redirects",
"Rx ring %2d redirect failures",
- "Rx ring %2d redirect S/G",
};
static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
@@ -291,7 +291,6 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = priv->rx_ring[i]->stats.recycle_failures;
data[o++] = priv->rx_ring[i]->stats.xdp_redirect;
data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures;
- data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg;
}
if (!enetc_si_is_pf(priv->si))
@@ -301,14 +300,32 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
}
+static void enetc_pause_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_pause_stats *pause_stats)
+{
+ pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac));
+ pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac));
+}
+
static void enetc_get_pause_stats(struct net_device *ndev,
struct ethtool_pause_stats *pause_stats)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(0));
- pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(0));
+ switch (pause_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_pause_stats(hw, 0, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_pause_stats(hw, 1, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_pause_stats(ndev, pause_stats);
+ break;
+ }
}
static void enetc_mac_stats(struct enetc_hw *hw, int mac,
@@ -385,8 +402,20 @@ static void enetc_get_eth_mac_stats(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- enetc_mac_stats(hw, 0, mac_stats);
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_mac_stats(hw, 0, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mac_stats(hw, 1, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_mac_stats(ndev, mac_stats);
+ break;
+ }
}
static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
@@ -394,8 +423,20 @@ static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- enetc_ctrl_stats(hw, 0, ctrl_stats);
+ switch (ctrl_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_ctrl_stats(hw, 1, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_ctrl_stats(ndev, ctrl_stats);
+ break;
+ }
}
static void enetc_get_rmon_stats(struct net_device *ndev,
@@ -404,8 +445,20 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+ switch (rmon_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_rmon_stats(hw, 1, rmon_stats, ranges);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_rmon_stats(ndev, rmon_stats);
+ break;
+ }
}
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
@@ -651,6 +704,7 @@ void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
}
+EXPORT_SYMBOL_GPL(enetc_set_rss_key);
static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
const u8 *key, const u8 hfunc)
@@ -864,6 +918,166 @@ static int enetc_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(priv->phylink, cmd);
}
+static void enetc_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return;
+
+ s->MACMergeFrameAssErrorCount = enetc_port_rd(hw, ENETC_MMFAECR);
+ s->MACMergeFrameSmdErrorCount = enetc_port_rd(hw, ENETC_MMFSECR);
+ s->MACMergeFrameAssOkCount = enetc_port_rd(hw, ENETC_MMFAOCR);
+ s->MACMergeFragCountRx = enetc_port_rd(hw, ENETC_MMFCRXR);
+ s->MACMergeFragCountTx = enetc_port_rd(hw, ENETC_MMFCTXR);
+ s->MACMergeHoldCount = enetc_port_rd(hw, ENETC_MMHCR);
+}
+
+static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ u32 lafs, rafs, val;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ state->pmac_enabled = !!(val & ENETC_PFPMR_PMACE);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ switch (ENETC_MMCSR_GET_VSTS(val)) {
+ case 0:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ break;
+ case 2:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ break;
+ case 3:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+ break;
+ case 4:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ break;
+ case 5:
+ default:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
+ break;
+ }
+
+ rafs = ENETC_MMCSR_GET_RAFS(val);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(rafs);
+ lafs = ENETC_MMCSR_GET_LAFS(val);
+ state->rx_min_frag_size = ethtool_mm_frag_size_add_to_min(lafs);
+ state->tx_enabled = !!(val & ENETC_MMCSR_LPE); /* mirror of MMCSR_ME */
+ state->tx_active = !!(val & ENETC_MMCSR_LPA);
+ state->verify_enabled = !(val & ENETC_MMCSR_VDIS);
+ state->verify_time = ENETC_MMCSR_GET_VT(val);
+ /* A verifyTime of 128 ms would exceed the 7 bit width
+ * of the ENETC_MMCSR_VT field
+ */
+ state->max_verify_time = 127;
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+ u32 val, add_frag_size;
+ int err;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &add_frag_size, extack);
+ if (err)
+ return err;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ if (cfg->pmac_enabled)
+ val |= ENETC_PFPMR_PMACE;
+ else
+ val &= ~ENETC_PFPMR_PMACE;
+ enetc_port_wr(hw, ENETC_PFPMR, val);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (cfg->verify_enabled)
+ val &= ~ENETC_MMCSR_VDIS;
+ else
+ val |= ENETC_MMCSR_VDIS;
+
+ if (cfg->tx_enabled)
+ priv->active_offloads |= ENETC_F_QBU;
+ else
+ priv->active_offloads &= ~ENETC_F_QBU;
+
+ /* If link is up, enable MAC Merge right away */
+ if (!!(priv->active_offloads & ENETC_F_QBU) &&
+ !(val & ENETC_MMCSR_LINK_FAIL))
+ val |= ENETC_MMCSR_ME;
+
+ val &= ~ENETC_MMCSR_VT_MASK;
+ val |= ENETC_MMCSR_VT(cfg->verify_time);
+
+ val &= ~ENETC_MMCSR_RAFS_MASK;
+ val |= ENETC_MMCSR_RAFS(add_frag_size);
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+/* When the link is lost, the verification state machine goes to the FAILED
+ * state and doesn't restart on its own after a new link up event.
+ * According to 802.3 Figure 99-8 - Verify state diagram, the LINK_FAIL bit
+ * should have been sufficient to re-trigger verification, but for ENETC it
+ * doesn't. As a workaround, we need to toggle the Merge Enable bit to
+ * re-trigger verification when link comes up.
+ */
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 val;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (link) {
+ val &= ~ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val |= ENETC_MMCSR_ME;
+ } else {
+ val |= ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val &= ~ENETC_MMCSR_ME;
+ }
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ mutex_unlock(&priv->mm_lock);
+}
+EXPORT_SYMBOL_GPL(enetc_mm_link_state_update);
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -894,6 +1108,9 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.set_wol = enetc_set_wol,
.get_pauseparam = enetc_get_pauseparam,
.set_pauseparam = enetc_set_pauseparam,
+ .get_mm = enetc_get_mm,
+ .set_mm = enetc_set_mm,
+ .get_mm_stats = enetc_get_mm_stats,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
@@ -926,3 +1143,4 @@ void enetc_set_ethtool_ops(struct net_device *ndev)
else
ndev->ethtool_ops = &enetc_vf_ethtool_ops;
}
+EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 18ca1f42b1f7..de2e0ee8cdcb 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -18,9 +18,10 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
-#define ENETC_SIPCAPR0_QBV BIT(4)
#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
+#define ENETC_SIPCAPR0_QBV BIT(4)
+#define ENETC_SIPCAPR0_QBU BIT(3)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -213,7 +214,6 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */
#define ENETC_PFPMR 0x1900
#define ENETC_PFPMR_PMACE BIT(1)
-#define ENETC_PFPMR_MWLM BIT(0)
#define ENETC_EMDIO_BASE 0x1c00
#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
@@ -222,11 +222,35 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
#define ENETC_MMCSR 0x1f00
-#define ENETC_MMCSR_ME BIT(16)
+#define ENETC_MMCSR_LINK_FAIL BIT(31)
+#define ENETC_MMCSR_VT_MASK GENMASK(29, 23) /* Verify Time */
+#define ENETC_MMCSR_VT(x) (((x) << 23) & ENETC_MMCSR_VT_MASK)
+#define ENETC_MMCSR_GET_VT(x) (((x) & ENETC_MMCSR_VT_MASK) >> 23)
+#define ENETC_MMCSR_TXSTS_MASK GENMASK(22, 21) /* Merge Status */
+#define ENETC_MMCSR_GET_TXSTS(x) (((x) & ENETC_MMCSR_TXSTS_MASK) >> 21)
+#define ENETC_MMCSR_VSTS_MASK GENMASK(20, 18) /* Verify Status */
+#define ENETC_MMCSR_GET_VSTS(x) (((x) & ENETC_MMCSR_VSTS_MASK) >> 18)
+#define ENETC_MMCSR_VDIS BIT(17) /* Verify Disabled */
+#define ENETC_MMCSR_ME BIT(16) /* Merge Enabled */
+#define ENETC_MMCSR_RAFS_MASK GENMASK(9, 8) /* Remote Additional Fragment Size */
+#define ENETC_MMCSR_RAFS(x) (((x) << 8) & ENETC_MMCSR_RAFS_MASK)
+#define ENETC_MMCSR_GET_RAFS(x) (((x) & ENETC_MMCSR_RAFS_MASK) >> 8)
+#define ENETC_MMCSR_LAFS_MASK GENMASK(4, 3) /* Local Additional Fragment Size */
+#define ENETC_MMCSR_GET_LAFS(x) (((x) & ENETC_MMCSR_LAFS_MASK) >> 3)
+#define ENETC_MMCSR_LPA BIT(2) /* Local Preemption Active */
+#define ENETC_MMCSR_LPE BIT(1) /* Local Preemption Enabled */
+#define ENETC_MMCSR_LPS BIT(0) /* Local Preemption Supported */
+#define ENETC_MMFAECR 0x1f08
+#define ENETC_MMFSECR 0x1f0c
+#define ENETC_MMFAOCR 0x1f10
+#define ENETC_MMFCRXR 0x1f14
+#define ENETC_MMFCTXR 0x1f18
+#define ENETC_MMHCR 0x1f1c
#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
+#define ENETC_PMAC_OFFSET 0x1000
+
#define ENETC_PM0_CMD_CFG 0x8008
-#define ENETC_PM1_CMD_CFG 0x9008
#define ENETC_PM0_TX_EN BIT(0)
#define ENETC_PM0_RX_EN BIT(1)
#define ENETC_PM0_PROMISC BIT(4)
@@ -245,11 +269,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_PAUSE_QUANTA 0x8054
#define ENETC_PM0_PAUSE_THRESH 0x8064
-#define ENETC_PM1_PAUSE_QUANTA 0x9054
-#define ENETC_PM1_PAUSE_THRESH 0x9064
#define ENETC_PM0_SINGLE_STEP 0x80c0
-#define ENETC_PM1_SINGLE_STEP 0x90c0
#define ENETC_PM0_SINGLE_STEP_CH BIT(7)
#define ENETC_PM0_SINGLE_STEP_EN BIT(31)
#define ENETC_SET_SINGLE_STEP_OFFSET(v) (((v) & 0xff) << 8)
@@ -279,57 +300,57 @@ enum enetc_bdr_type {TX, RX};
/* Port MAC counters: Port MAC 0 corresponds to the eMAC and
* Port MAC 1 to the pMAC.
*/
-#define ENETC_PM_REOCT(mac) (0x8100 + 0x1000 * (mac))
-#define ENETC_PM_RALN(mac) (0x8110 + 0x1000 * (mac))
-#define ENETC_PM_RXPF(mac) (0x8118 + 0x1000 * (mac))
-#define ENETC_PM_RFRM(mac) (0x8120 + 0x1000 * (mac))
-#define ENETC_PM_RFCS(mac) (0x8128 + 0x1000 * (mac))
-#define ENETC_PM_RVLAN(mac) (0x8130 + 0x1000 * (mac))
-#define ENETC_PM_RERR(mac) (0x8138 + 0x1000 * (mac))
-#define ENETC_PM_RUCA(mac) (0x8140 + 0x1000 * (mac))
-#define ENETC_PM_RMCA(mac) (0x8148 + 0x1000 * (mac))
-#define ENETC_PM_RBCA(mac) (0x8150 + 0x1000 * (mac))
-#define ENETC_PM_RDRP(mac) (0x8158 + 0x1000 * (mac))
-#define ENETC_PM_RPKT(mac) (0x8160 + 0x1000 * (mac))
-#define ENETC_PM_RUND(mac) (0x8168 + 0x1000 * (mac))
-#define ENETC_PM_R64(mac) (0x8170 + 0x1000 * (mac))
-#define ENETC_PM_R127(mac) (0x8178 + 0x1000 * (mac))
-#define ENETC_PM_R255(mac) (0x8180 + 0x1000 * (mac))
-#define ENETC_PM_R511(mac) (0x8188 + 0x1000 * (mac))
-#define ENETC_PM_R1023(mac) (0x8190 + 0x1000 * (mac))
-#define ENETC_PM_R1522(mac) (0x8198 + 0x1000 * (mac))
-#define ENETC_PM_R1523X(mac) (0x81A0 + 0x1000 * (mac))
-#define ENETC_PM_ROVR(mac) (0x81A8 + 0x1000 * (mac))
-#define ENETC_PM_RJBR(mac) (0x81B0 + 0x1000 * (mac))
-#define ENETC_PM_RFRG(mac) (0x81B8 + 0x1000 * (mac))
-#define ENETC_PM_RCNP(mac) (0x81C0 + 0x1000 * (mac))
-#define ENETC_PM_RDRNTP(mac) (0x81C8 + 0x1000 * (mac))
-#define ENETC_PM_TEOCT(mac) (0x8200 + 0x1000 * (mac))
-#define ENETC_PM_TOCT(mac) (0x8208 + 0x1000 * (mac))
-#define ENETC_PM_TCRSE(mac) (0x8210 + 0x1000 * (mac))
-#define ENETC_PM_TXPF(mac) (0x8218 + 0x1000 * (mac))
-#define ENETC_PM_TFRM(mac) (0x8220 + 0x1000 * (mac))
-#define ENETC_PM_TFCS(mac) (0x8228 + 0x1000 * (mac))
-#define ENETC_PM_TVLAN(mac) (0x8230 + 0x1000 * (mac))
-#define ENETC_PM_TERR(mac) (0x8238 + 0x1000 * (mac))
-#define ENETC_PM_TUCA(mac) (0x8240 + 0x1000 * (mac))
-#define ENETC_PM_TMCA(mac) (0x8248 + 0x1000 * (mac))
-#define ENETC_PM_TBCA(mac) (0x8250 + 0x1000 * (mac))
-#define ENETC_PM_TPKT(mac) (0x8260 + 0x1000 * (mac))
-#define ENETC_PM_TUND(mac) (0x8268 + 0x1000 * (mac))
-#define ENETC_PM_T64(mac) (0x8270 + 0x1000 * (mac))
-#define ENETC_PM_T127(mac) (0x8278 + 0x1000 * (mac))
-#define ENETC_PM_T255(mac) (0x8280 + 0x1000 * (mac))
-#define ENETC_PM_T511(mac) (0x8288 + 0x1000 * (mac))
-#define ENETC_PM_T1023(mac) (0x8290 + 0x1000 * (mac))
-#define ENETC_PM_T1522(mac) (0x8298 + 0x1000 * (mac))
-#define ENETC_PM_T1523X(mac) (0x82A0 + 0x1000 * (mac))
-#define ENETC_PM_TCNP(mac) (0x82C0 + 0x1000 * (mac))
-#define ENETC_PM_TDFR(mac) (0x82D0 + 0x1000 * (mac))
-#define ENETC_PM_TMCOL(mac) (0x82D8 + 0x1000 * (mac))
-#define ENETC_PM_TSCOL(mac) (0x82E0 + 0x1000 * (mac))
-#define ENETC_PM_TLCOL(mac) (0x82E8 + 0x1000 * (mac))
-#define ENETC_PM_TECOL(mac) (0x82F0 + 0x1000 * (mac))
+#define ENETC_PM_REOCT(mac) (0x8100 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RALN(mac) (0x8110 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RXPF(mac) (0x8118 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFRM(mac) (0x8120 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFCS(mac) (0x8128 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RVLAN(mac) (0x8130 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RERR(mac) (0x8138 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RUCA(mac) (0x8140 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RMCA(mac) (0x8148 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RBCA(mac) (0x8150 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RDRP(mac) (0x8158 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RPKT(mac) (0x8160 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RUND(mac) (0x8168 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R64(mac) (0x8170 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R127(mac) (0x8178 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R255(mac) (0x8180 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R511(mac) (0x8188 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1023(mac) (0x8190 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1522(mac) (0x8198 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1523X(mac) (0x81A0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_ROVR(mac) (0x81A8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RJBR(mac) (0x81B0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFRG(mac) (0x81B8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RCNP(mac) (0x81C0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RDRNTP(mac) (0x81C8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TEOCT(mac) (0x8200 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TOCT(mac) (0x8208 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TCRSE(mac) (0x8210 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TXPF(mac) (0x8218 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TFRM(mac) (0x8220 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TFCS(mac) (0x8228 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TVLAN(mac) (0x8230 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TERR(mac) (0x8238 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TUCA(mac) (0x8240 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TMCA(mac) (0x8248 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TBCA(mac) (0x8250 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TPKT(mac) (0x8260 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TUND(mac) (0x8268 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T64(mac) (0x8270 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T127(mac) (0x8278 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T255(mac) (0x8280 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T511(mac) (0x8288 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1023(mac) (0x8290 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1522(mac) (0x8298 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1523X(mac) (0x82A0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TCNP(mac) (0x82C0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TDFR(mac) (0x82D0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TMCOL(mac) (0x82D8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TSCOL(mac) (0x82E0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TLCOL(mac) (0x82E8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TECOL(mac) (0x82F0 + ENETC_PMAC_OFFSET * (mac))
/* Port counters */
#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 1c8f5cc6dec4..998aaa394e9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -55,7 +55,8 @@ static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
is_busy, !is_busy, 10, 10 * 1000);
}
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
u32 mdio_ctl, mdio_cfg;
@@ -63,14 +64,39 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
int ret;
mdio_cfg = ENETC_EMDIO_CFG;
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_cfg |= MDIO_CFG_ENC45;
- } else {
- /* clause 22 (ie 1G) */
- dev_addr = regnum & 0x1f;
- mdio_cfg &= ~MDIO_CFG_ENC45;
- }
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* write the value */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_write_c22);
+
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ mdio_cfg |= MDIO_CFG_ENC45;
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
@@ -83,13 +109,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
- if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(mdio_priv);
- if (ret)
- return ret;
- }
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
/* write the value */
enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
@@ -100,9 +124,9 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
return 0;
}
-EXPORT_SYMBOL_GPL(enetc_mdio_write);
+EXPORT_SYMBOL_GPL(enetc_mdio_write_c45);
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
u32 mdio_ctl, mdio_cfg;
@@ -110,14 +134,51 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
int ret;
mdio_cfg = ENETC_EMDIO_CFG;
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_cfg |= MDIO_CFG_ENC45;
- } else {
- dev_addr = regnum & 0x1f;
- mdio_cfg &= ~MDIO_CFG_ENC45;
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and device addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* initiate the read */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* return all Fs if nothing was there */
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
+ return 0xffff;
}
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_read_c22);
+
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ u16 value;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ mdio_cfg |= MDIO_CFG_ENC45;
+
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
@@ -129,13 +190,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
- if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(mdio_priv);
- if (ret)
- return ret;
- }
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
/* initiate the read */
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
@@ -156,7 +215,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return value;
}
-EXPORT_SYMBOL_GPL(enetc_mdio_read);
+EXPORT_SYMBOL_GPL(enetc_mdio_read_c45);
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index dafb26f81f95..a1b595bd7993 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -39,8 +39,10 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
}
bus->name = ENETC_MDIO_BUS_NAME;
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 9f6c4f5c0a6c..7cd22d370caa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -319,24 +319,23 @@ static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
u32 reg;
- reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ reg = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
if (reg & ENETC_PM0_IFM_RG) {
/* RGMII mode */
reg = (reg & ~ENETC_PM0_IFM_RLP) |
(en ? ENETC_PM0_IFM_RLP : 0);
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, reg);
} else {
/* assume SGMII mode */
- reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ reg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
reg = (reg & ~ENETC_PM0_CMD_XGLP) |
(en ? ENETC_PM0_CMD_XGLP : 0);
reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
(en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, reg);
}
}
@@ -538,65 +537,50 @@ void enetc_reset_ptcmsdur(struct enetc_hw *hw)
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw)
+static void enetc_configure_port_mac(struct enetc_si *si)
{
- enetc_port_wr(hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+ struct enetc_hw *hw = &si->hw;
- enetc_reset_ptcmsdur(hw);
+ enetc_port_mac_wr(si, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+ enetc_reset_ptcmsdur(hw);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
/* On LS1028A, the MAC RX FIFO defaults to 2, which is too high
* and may lead to RX lock-up under traffic. Set it to 1 instead,
* as recommended by the hardware team.
*/
- enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
+ enetc_port_mac_wr(si, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
}
-static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+static void enetc_mac_config(struct enetc_si *si, phy_interface_t phy_mode)
{
u32 val;
if (phy_interface_mode_is_rgmii(phy_mode)) {
- val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
if (phy_mode == PHY_INTERFACE_MODE_USXGMII) {
val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
}
-static void enetc_mac_enable(struct enetc_hw *hw, bool en)
+static void enetc_mac_enable(struct enetc_si *si, bool en)
{
- u32 val = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ u32 val = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0;
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, val);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, val);
-}
-
-static void enetc_configure_port_pmac(struct enetc_hw *hw)
-{
- u32 temp;
-
- /* Set pMAC step lock */
- temp = enetc_port_rd(hw, ENETC_PFPMR);
- enetc_port_wr(hw, ENETC_PFPMR,
- temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
-
- temp = enetc_port_rd(hw, ENETC_MMCSR);
- enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, val);
}
static void enetc_configure_port(struct enetc_pf *pf)
@@ -604,9 +588,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
- enetc_configure_port_pmac(hw);
-
- enetc_configure_port_mac(hw);
+ enetc_configure_port_mac(pf->si);
enetc_port_si_configure(pf->si);
@@ -825,6 +807,9 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features |= NETIF_F_RXHASH;
ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
priv->active_offloads |= ENETC_F_QCI;
@@ -848,8 +833,10 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
return -ENOMEM;
bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = &pf->si->hw;
@@ -885,8 +872,10 @@ static int enetc_imdio_create(struct enetc_pf *pf)
return -ENOMEM;
bus->name = "Freescale ENETC internal MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
bus->phy_mask = ~0;
mdio_priv = bus->priv;
@@ -994,14 +983,14 @@ static void enetc_pl_mac_config(struct phylink_config *config,
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
- enetc_mac_config(&pf->si->hw, state->interface);
+ enetc_mac_config(pf->si, state->interface);
}
-static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
+static void enetc_force_rgmii_mac(struct enetc_si *si, int speed, int duplex)
{
u32 old_val, val;
- old_val = val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ old_val = val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
if (speed == SPEED_1000) {
val &= ~ENETC_PM0_IFM_SSP_MASK;
@@ -1022,7 +1011,7 @@ static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
if (val == old_val)
return;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
static void enetc_pl_mac_link_up(struct phylink_config *config,
@@ -1034,6 +1023,7 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
u32 pause_off_thresh = 0, pause_on_thresh = 0;
u32 init_quanta = 0, refresh_quanta = 0;
struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_si *si = pf->si;
struct enetc_ndev_priv *priv;
u32 rbmr, cmd_cfg;
int idx;
@@ -1045,7 +1035,7 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
if (!phylink_autoneg_inband(mode) &&
phy_interface_mode_is_rgmii(interface))
- enetc_force_rgmii_mac(hw, speed, duplex);
+ enetc_force_rgmii_mac(si, speed, duplex);
/* Flow control */
for (idx = 0; idx < priv->num_rx_rings; idx++) {
@@ -1081,24 +1071,24 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
}
- enetc_port_wr(hw, ENETC_PM0_PAUSE_QUANTA, init_quanta);
- enetc_port_wr(hw, ENETC_PM1_PAUSE_QUANTA, init_quanta);
- enetc_port_wr(hw, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
- enetc_port_wr(hw, ENETC_PM1_PAUSE_THRESH, refresh_quanta);
+ enetc_port_mac_wr(si, ENETC_PM0_PAUSE_QUANTA, init_quanta);
+ enetc_port_mac_wr(si, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh);
enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh);
- cmd_cfg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ cmd_cfg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
if (rx_pause)
cmd_cfg &= ~ENETC_PM0_PAUSE_IGN;
else
cmd_cfg |= ENETC_PM0_PAUSE_IGN;
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, cmd_cfg);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, cmd_cfg);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, cmd_cfg);
+
+ enetc_mac_enable(si, true);
- enetc_mac_enable(hw, true);
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, true);
}
static void enetc_pl_mac_link_down(struct phylink_config *config,
@@ -1106,8 +1096,15 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
phy_interface_t interface)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_si *si = pf->si;
+ struct enetc_ndev_priv *priv;
- enetc_mac_enable(&pf->si->hw, false);
+ priv = netdev_priv(si->ndev);
+
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, false);
+
+ enetc_mac_enable(si, false);
}
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
@@ -1300,6 +1297,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
priv = netdev_priv(ndev);
+ mutex_init(&priv->mm_lock);
+
enetc_init_si_rings_params(priv);
err = enetc_alloc_si_resources(priv);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index fcebb54224c0..130ebf6853e6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -136,29 +136,21 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
{
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- struct enetc_bdr *tx_ring;
- int err;
- int i;
+ int err, i;
/* TSD and Qbv are mutually exclusive in hardware */
for (i = 0; i < priv->num_tx_rings; i++)
if (priv->tx_ring[i]->tsd_enable)
return -EBUSY;
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = taprio->enable ? i : 0;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
- }
+ err = enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
+ if (err)
+ return err;
err = enetc_setup_taprio(ndev, taprio);
if (err) {
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = taprio->enable ? 0 : i;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
- }
+ taprio->mqprio.qopt.num_tc = 0;
+ enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
}
return err;
@@ -1611,6 +1603,13 @@ int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
struct enetc_si *si = priv->si;
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2341597408d1..c73e25f8995e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -56,12 +56,12 @@
#include <linux/fec.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/prefetch.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -1987,47 +1987,74 @@ static int fec_enet_mdio_wait(struct fec_enet_private *fep)
return ret;
}
-static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
int ret = 0, frame_start, frame_addr, frame_op;
- bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- if (is_c45) {
- frame_start = FEC_MMFR_ST_C45;
+ /* C22 read */
+ frame_op = FEC_MMFR_OP_READ;
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
- /* write address */
- frame_addr = (regnum >> 16);
- writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | (regnum & 0xFFFF),
- fep->hwp + FEC_MII_DATA);
+ /* start a read op */
+ writel(frame_start | frame_op |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
- /* wait for end of transfer */
- ret = fec_enet_mdio_wait(fep);
- if (ret) {
- netdev_err(fep->netdev, "MDIO address write timeout\n");
- goto out;
- }
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO read timeout\n");
+ goto out;
+ }
- frame_op = FEC_MMFR_OP_READ_C45;
+ ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
- } else {
- /* C22 read */
- frame_op = FEC_MMFR_OP_READ;
- frame_start = FEC_MMFR_ST;
- frame_addr = regnum;
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret = 0, frame_start, frame_op;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
}
+ frame_op = FEC_MMFR_OP_READ_C45;
+
/* start a read op */
writel(frame_start | frame_op |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
ret = fec_enet_mdio_wait(fep);
@@ -2045,45 +2072,69 @@ out:
return ret;
}
-static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
int ret, frame_start, frame_addr;
- bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- if (is_c45) {
- frame_start = FEC_MMFR_ST_C45;
+ /* C22 write */
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+
+ /* start a write op */
+ writel(frame_start | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret)
+ netdev_err(fep->netdev, "MDIO write timeout\n");
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum, u16 value)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret, frame_start;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ frame_start = FEC_MMFR_ST_C45;
- /* write address */
- frame_addr = (regnum >> 16);
- writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | (regnum & 0xFFFF),
- fep->hwp + FEC_MII_DATA);
+ /* write address */
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
- /* wait for end of transfer */
- ret = fec_enet_mdio_wait(fep);
- if (ret) {
- netdev_err(fep->netdev, "MDIO address write timeout\n");
- goto out;
- }
- } else {
- /* C22 write */
- frame_start = FEC_MMFR_ST;
- frame_addr = regnum;
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
}
/* start a write op */
writel(frame_start | FEC_MMFR_OP_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | FEC_MMFR_DATA(value),
- fep->hwp + FEC_MII_DATA);
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
ret = fec_enet_mdio_wait(fep);
@@ -2381,8 +2432,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
}
fep->mii_bus->name = "fec_enet_mii_bus";
- fep->mii_bus->read = fec_enet_mdio_read;
- fep->mii_bus->write = fec_enet_mdio_write;
+ fep->mii_bus->read = fec_enet_mdio_read_c22;
+ fep->mii_bus->write = fec_enet_mdio_write_c22;
+ fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
+ fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
@@ -3982,10 +4035,10 @@ free_queue_mem:
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
- int err, phy_reset;
- bool active_high = false;
+ struct gpio_desc *phy_reset;
int msec = 1, phy_post_delay = 0;
struct device_node *np = pdev->dev.of_node;
+ int err;
if (!np)
return 0;
@@ -3995,33 +4048,26 @@ static int fec_reset_phy(struct platform_device *pdev)
if (!err && msec > 1000)
msec = 1;
- phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- if (phy_reset == -EPROBE_DEFER)
- return phy_reset;
- else if (!gpio_is_valid(phy_reset))
- return 0;
-
err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
/* valid reset duration should be less than 1s */
if (!err && phy_post_delay > 1000)
return -EINVAL;
- active_high = of_property_read_bool(np, "phy-reset-active-high");
+ phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(phy_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
+ "failed to get phy-reset-gpios\n");
- err = devm_gpio_request_one(&pdev->dev, phy_reset,
- active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "phy-reset");
- if (err) {
- dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
- return err;
- }
+ if (!phy_reset)
+ return 0;
if (msec > 20)
msleep(msec);
else
usleep_range(msec * 1000, msec * 1000 + 1000);
- gpio_set_value_cansleep(phy_reset, !active_high);
+ gpiod_set_value_cansleep(phy_reset, 0);
if (!phy_post_delay)
return 0;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index d7d39a58cd80..a13b4ba4d6e1 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -128,30 +128,49 @@ static int xgmac_wait_until_done(struct device *dev,
return 0;
}
-/*
- * Write value to the PHY for this device to the register at regnum,waiting
- * until the write is done before it returns. All PHY configuration has to be
- * done through the TSEC1 MIIM regs.
- */
-static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+static int xgmac_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
- uint16_t dev_addr;
+ bool endian = priv->is_little_endian;
+ u16 dev_addr = regnum & 0x1f;
u32 mdio_ctl, mdio_stat;
int ret;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ mdio_stat &= ~MDIO_STAT_ENC;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ /* Write the value to the register */
+ xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xgmac_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
bool endian = priv->is_little_endian;
+ u32 mdio_ctl, mdio_stat;
+ int ret;
mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
- if (regnum & MII_ADDR_C45) {
- /* Clause 45 (ie 10G) */
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_stat |= MDIO_STAT_ENC;
- } else {
- /* Clause 22 (ie 1G) */
- dev_addr = regnum & 0x1f;
- mdio_stat &= ~MDIO_STAT_ENC;
- }
+ mdio_stat |= MDIO_STAT_ENC;
xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
@@ -164,13 +183,11 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
/* Set the register address */
- if (regnum & MII_ADDR_C45) {
- xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs, endian);
- if (ret)
- return ret;
- }
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
/* Write the value to the register */
xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
@@ -182,31 +199,82 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
return 0;
}
-/*
- * Reads from register regnum in the PHY for device dev, returning the value.
+/* Reads from register regnum in the PHY for device dev, returning the value.
* Clears miimcom first. All PHY configuration has to be done through the
* TSEC1 MIIM regs.
*/
-static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static int xgmac_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ bool endian = priv->is_little_endian;
+ u16 dev_addr = regnum & 0x1f;
unsigned long flags;
- uint16_t dev_addr;
uint32_t mdio_stat;
uint32_t mdio_ctl;
int ret;
- bool endian = priv->is_little_endian;
mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_stat |= MDIO_STAT_ENC;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the Port and Device Addrs */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ if (priv->has_a009885)
+ /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
+ * must read back the data register within 16 MDC cycles.
+ */
+ local_irq_save(flags);
+
+ /* Initiate the read */
+ xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ goto irq_restore;
+
+ /* Return all Fs if nothing was there */
+ if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+ !priv->has_a011043) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
+ ret = 0xffff;
} else {
- dev_addr = regnum & 0x1f;
- mdio_stat &= ~MDIO_STAT_ENC;
+ ret = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", ret);
}
+irq_restore:
+ if (priv->has_a009885)
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+/* Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first. All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ bool endian = priv->is_little_endian;
+ u32 mdio_stat, mdio_ctl;
+ unsigned long flags;
+ int ret;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ mdio_stat |= MDIO_STAT_ENC;
+
xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
ret = xgmac_wait_until_free(&bus->dev, regs, endian);
@@ -218,13 +286,11 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
/* Set the register address */
- if (regnum & MII_ADDR_C45) {
- xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs, endian);
- if (ret)
- return ret;
- }
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
if (priv->has_a009885)
/* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
@@ -326,10 +392,11 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
bus->name = "Freescale XGMAC MDIO Bus";
- bus->read = xgmac_mdio_read;
- bus->write = xgmac_mdio_write;
+ bus->read = xgmac_mdio_read_c22;
+ bus->write = xgmac_mdio_write_c22;
+ bus->read_c45 = xgmac_mdio_read_c45;
+ bus->write_c45 = xgmac_mdio_write_c45;
bus->parent = &pdev->dev;
- bus->probe_capabilities = MDIOBUS_C22_C45;
snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start);
priv = bus->priv;
diff --git a/drivers/net/ethernet/fungible/funeth/Kconfig b/drivers/net/ethernet/fungible/funeth/Kconfig
index c72ad9386400..e742e7663449 100644
--- a/drivers/net/ethernet/fungible/funeth/Kconfig
+++ b/drivers/net/ethernet/fungible/funeth/Kconfig
@@ -5,7 +5,7 @@
config FUN_ETH
tristate "Fungible Ethernet device driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
select FUN_CORE
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index b4cce30e526a..df86770731ad 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -1160,6 +1160,11 @@ static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
WRITE_ONCE(rxqs[i]->xdp_prog, prog);
}
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+
dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU;
old_prog = xchg(&fp->xdp_prog, prog);
if (old_prog)
@@ -1765,6 +1770,7 @@ static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->mpls_features = netdev->vlan_features;
netdev->hw_enc_features = netdev->hw_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = FUN_MAX_MTU;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 5b40f9c53196..07111c241e0e 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -327,7 +327,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
static int gve_alloc_notify_blocks(struct gve_priv *priv)
{
int num_vecs_requested = priv->num_ntfy_blks + 1;
- char *name = priv->dev->name;
unsigned int active_cpus;
int vecs_enabled;
int i, j;
@@ -371,8 +370,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
/* Setup Management Vector - the last vector */
- snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
- name);
+ snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
+ pci_name(priv->pdev));
err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
if (err) {
@@ -401,8 +400,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
struct gve_notify_block *block = &priv->ntfy_blocks[i];
int msix_idx = i;
- snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
- name, i);
+ snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
+ i, pci_name(priv->pdev));
block->priv = priv;
err = request_irq(priv->msix_vectors[msix_idx].vector,
gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 740850b64aff..5df19c604d09 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -554,11 +554,11 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
argv4.package.count = 1;
argv4.package.elements = &obj_args;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
- &hns_dsaf_acpi_dsm_guid, 0,
- HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
-
- if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_PORT_TYPE_FUNC, &argv4,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
return phy_if;
phy_if = obj->integer.value ?
@@ -601,11 +601,11 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
argv4.package.count = 1;
argv4.package.elements = &obj_args;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
- &hns_dsaf_acpi_dsm_guid, 0,
- HNS_OP_GET_SFP_STAT_FUNC, &argv4);
-
- if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_SFP_STAT_FUNC, &argv4,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
return -ENODEV;
*sfp_prsnt = obj->integer.value;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 17137de9338c..40f4306449eb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -32,6 +32,7 @@
#include <linux/pkt_sched.h>
#include <linux/types.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#define HNAE3_MOD_VERSION "1.0"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b4c4fb873568..25be7f8ac7cd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -20,6 +20,7 @@
#include <net/gro.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 142415c84c6b..a0b46e7d863e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018-2019 Hisilicon Limited. */
#include <linux/device.h>
+#include <linux/sched/clock.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index 3d3b69605423..9a939c0b217f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -114,7 +114,6 @@ int hclge_devlink_init(struct hclge_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 6efd768cc07c..3f35227ef1fa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2016-2017 Hisilicon Limited. */
+#include <linux/sched/clock.h>
+
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index a6c3c5e8f0ab..1b535142c65a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -116,7 +116,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index c2ae1b4f9a5f..9232caaf0bdc 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -206,7 +206,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
}
/**
- * hns_mdio_write - access phy register
+ * hns_mdio_write_c22 - access phy register
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
@@ -214,21 +214,19 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
*
* Return 0 on success, negative on failure
*/
-static int hns_mdio_write(struct mii_bus *bus,
- int phy_id, int regnum, u16 data)
+static int hns_mdio_write_c22(struct mii_bus *bus,
+ int phy_id, int regnum, u16 data)
{
- int ret;
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
- u8 devad = ((regnum >> 16) & 0x1f);
- u8 is_c45 = !!(regnum & MII_ADDR_C45);
u16 reg = (u16)(regnum & 0xffff);
- u8 op;
u16 cmd_reg_cfg;
+ int ret;
+ u8 op;
dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
bus->id, mdio_dev->vbase);
- dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x, write data=%d\n",
- phy_id, is_c45, devad, reg, data);
+ dev_dbg(&bus->dev, "phy id=%d, reg=%#x, write data=%d\n",
+ phy_id, reg, data);
/* wait for ready */
ret = hns_mdio_wait_ready(bus);
@@ -237,58 +235,91 @@ static int hns_mdio_write(struct mii_bus *bus,
return ret;
}
- if (!is_c45) {
- cmd_reg_cfg = reg;
- op = MDIO_C22_WRITE;
- } else {
- /* config the cmd-reg to write addr*/
- MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
- MDIO_ADDR_DATA_S, reg);
+ cmd_reg_cfg = reg;
+ op = MDIO_C22_WRITE;
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_WRITE_ADDR, phy_id, devad);
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+ MDIO_WDATA_DATA_S, data);
- /* check for read or write opt is finished */
- ret = hns_mdio_wait_ready(bus);
- if (ret) {
- dev_err(&bus->dev, "MDIO bus is busy\n");
- return ret;
- }
+ hns_mdio_cmd_write(mdio_dev, false, op, phy_id, cmd_reg_cfg);
+
+ return 0;
+}
+
+/**
+ * hns_mdio_write_c45 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @devad: device address to read
+ * @regnum: register num
+ * @data: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum, u16 data)
+{
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 cmd_reg_cfg;
+ int ret;
+ u8 op;
+
+ dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x, write data=%d\n",
+ phy_id, devad, reg, data);
+
+ /* wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ /* config the cmd-reg to write addr*/
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
- /* config the data needed writing */
- cmd_reg_cfg = devad;
- op = MDIO_C45_WRITE_DATA;
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
}
+ /* config the data needed writing */
+ cmd_reg_cfg = devad;
+ op = MDIO_C45_WRITE_DATA;
+
MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
MDIO_WDATA_DATA_S, data);
- hns_mdio_cmd_write(mdio_dev, is_c45, op, phy_id, cmd_reg_cfg);
+ hns_mdio_cmd_write(mdio_dev, true, op, phy_id, cmd_reg_cfg);
return 0;
}
/**
- * hns_mdio_read - access phy register
+ * hns_mdio_read_c22 - access phy register
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
*
* Return phy register value
*/
-static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
- int ret;
- u16 reg_val;
- u8 devad = ((regnum >> 16) & 0x1f);
- u8 is_c45 = !!(regnum & MII_ADDR_C45);
- u16 reg = (u16)(regnum & 0xffff);
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 reg_val;
+ int ret;
dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
bus->id, mdio_dev->vbase);
- dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x!\n",
- phy_id, is_c45, devad, reg);
+ dev_dbg(&bus->dev, "phy id=%d, reg=%#x!\n", phy_id, reg);
/* Step 1: wait for ready */
ret = hns_mdio_wait_ready(bus);
@@ -297,29 +328,74 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
}
- if (!is_c45) {
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C22_READ, phy_id, reg);
- } else {
- MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
- MDIO_ADDR_DATA_S, reg);
+ hns_mdio_cmd_write(mdio_dev, false, MDIO_C22_READ, phy_id, reg);
- /* Step 2; config the cmd-reg to write addr*/
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_WRITE_ADDR, phy_id, devad);
+ /* Step 2: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
- /* Step 3: check for read or write opt is finished */
- ret = hns_mdio_wait_ready(bus);
- if (ret) {
- dev_err(&bus->dev, "MDIO bus is busy\n");
- return ret;
- }
+ reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+ if (reg_val) {
+ dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+ return -EBUSY;
+ }
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_READ, phy_id, devad);
+ /* Step 3; get out data*/
+ reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+ MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+ return reg_val;
+}
+
+/**
+ * hns_mdio_read_c45 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @devad: device address to read
+ * @regnum: register num
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum)
+{
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 reg_val;
+ int ret;
+
+ dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x!\n",
+ phy_id, devad, reg);
+
+ /* Step 1: wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ /* Step 2; config the cmd-reg to write addr*/
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* Step 3: check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
}
- /* Step 5: waiting for MDIO_COMMAND_REG's mdio_start==0,*/
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_READ, phy_id, devad);
+
+ /* Step 5: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/
/* check for read or write opt is finished */
ret = hns_mdio_wait_ready(bus);
if (ret) {
@@ -438,8 +514,10 @@ static int hns_mdio_probe(struct platform_device *pdev)
}
new_bus->name = MDIO_BUS_NAME;
- new_bus->read = hns_mdio_read;
- new_bus->write = hns_mdio_write;
+ new_bus->read = hns_mdio_read_c22;
+ new_bus->write = hns_mdio_write_c22;
+ new_bus->read_c45 = hns_mdio_read_c45;
+ new_bus->write_c45 = hns_mdio_write_c45;
new_bus->reset = hns_mdio_reset;
new_bus->priv = mdio_dev;
new_bus->parent = &pdev->dev;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e19a6bb3f444..146ca1d8031b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -250,10 +250,11 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
struct ibmvnic_sub_crq_queue *queue;
- int num_rxqs = adapter->num_active_rx_scrqs;
- int num_txqs = adapter->num_active_tx_scrqs;
+ int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
+ int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
unsigned int num_cpu, cpu;
+ bool is_rx_queue;
int rc = 0;
netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
@@ -273,14 +274,24 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
/* next available cpu to assign irq to */
cpu = cpumask_next(-1, cpu_online_mask);
- for (i = 0; i < num_txqs; i++) {
- queue = txqs[i];
+ for (i = 0; i < total_queues; i++) {
+ is_rx_queue = false;
+ /* balance core load by alternating rx and tx assignments
+ * ex: TX0 -> RX0 -> TX1 -> RX1 etc.
+ */
+ if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) {
+ queue = rxqs[i_rxqs++];
+ is_rx_queue = true;
+ } else {
+ queue = txqs[i_txqs++];
+ }
+
rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
stride);
if (rc)
goto out;
- if (!queue)
+ if (!queue || is_rx_queue)
continue;
rc = __netif_set_xps_queue(adapter->netdev,
@@ -291,14 +302,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
__func__, i, rc);
}
- for (i = 0; i < num_rxqs; i++) {
- queue = rxqs[i];
- rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
- stride);
- if (rc)
- goto out;
- }
-
out:
if (rc) {
netdev_warn(adapter->netdev,
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 3facb55b7161..a3c84bf05e44 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -337,6 +337,9 @@ config ICE_HWTS
the PTP clock driver precise cross-timestamp ioctl
(PTP_SYS_OFFSET_PRECISE).
+config ICE_GNSS
+ def_bool GNSS = y || GNSS = ICE
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 59e82d131d88..721f86fd5802 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -110,9 +110,9 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
static int e1000_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
+ u32 speed, supported, advertising, lp_advertising, lpa_t;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 speed, supported, advertising;
if (hw->phy.media_type == e1000_media_type_copper) {
supported = (SUPPORTED_10baseT_Half |
@@ -120,7 +120,9 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
+ SUPPORTED_Asym_Pause |
SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
SUPPORTED_TP);
if (hw->phy.type == e1000_phy_ife)
supported &= ~SUPPORTED_1000baseT_Full;
@@ -192,10 +194,16 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
if (hw->phy.media_type != e1000_media_type_copper)
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ lpa_t = mii_stat1000_to_ethtool_lpa_t(adapter->phy_regs.stat1000);
+ lp_advertising = lpa_t |
+ mii_lpa_to_ethtool_lpa_t(adapter->phy_regs.lpa);
+
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 04acd1a992fa..e1eb1de88bf9 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7418,9 +7418,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_pci_reg;
- /* AER (Advanced Error Reporting) hooks */
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
/* PCI config space info */
err = pci_save_state(pdev);
@@ -7708,7 +7705,6 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -7775,9 +7771,6 @@ static void e1000_remove(struct pci_dev *pdev)
free_netdev(netdev);
- /* AER disable */
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 060b263348ce..08c3d477dd6f 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2,6 +2,7 @@
/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
+#include <linux/ethtool.h>
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
@@ -1011,6 +1012,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg &=
~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ phy->autoneg_advertised &=
+ ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
break;
case e1000_fc_rx_pause:
/* Rx Flow control is enabled, and Tx Flow control is
@@ -1024,6 +1027,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg |=
(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ phy->autoneg_advertised |=
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
break;
case e1000_fc_tx_pause:
/* Tx Flow control is enabled, and Rx Flow control is
@@ -1031,6 +1036,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM;
mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP;
+ phy->autoneg_advertised |= ADVERTISED_Asym_Pause;
+ phy->autoneg_advertised &= ~ADVERTISED_Pause;
break;
case e1000_fc_full:
/* Flow control (both Rx and Tx) is enabled by a software
@@ -1038,6 +1045,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg |=
(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ phy->autoneg_advertised |=
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
break;
default:
e_dbg("Flow control param set incorrectly\n");
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index b473cb7d7c57..027d721feb18 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2127,8 +2127,6 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -2227,7 +2225,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -2281,8 +2278,6 @@ static void fm10k_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 3a1c28ca5bb4..60ce4d15d82a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -33,6 +33,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/udp_tunnel.h>
@@ -1287,9 +1288,9 @@ void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_ptp_alloc_pins(struct i40e_pf *pf);
int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+int i40e_get_partition_bw_setting(struct i40e_pf *pf);
+int i40e_set_partition_bw_setting(struct i40e_pf *pf);
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 42439f725aa4..86fac8f959bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -47,9 +47,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
@@ -74,9 +74,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
* i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
@@ -115,11 +115,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
+ int ret_code;
int i;
/* We'll be allocating the buffer info memory first, then we can
@@ -182,10 +182,10 @@ unwind_alloc_arq_bufs:
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_dma_mem *bi;
+ int ret_code;
int i;
/* No mapped memory needed yet, just the buffer info structures */
@@ -266,9 +266,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+static int i40e_config_asq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -295,9 +295,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+static int i40e_config_arq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -334,9 +334,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_asq(struct i40e_hw *hw)
+static int i40e_init_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -393,9 +393,9 @@ init_adminq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_arq(struct i40e_hw *hw)
+static int i40e_init_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -445,9 +445,9 @@ init_adminq_exit:
*
* The main shutdown routine for the Admin Send Queue
**/
-static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+static int i40e_shutdown_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&hw->aq.asq_mutex);
@@ -479,9 +479,9 @@ shutdown_asq_out:
*
* The main shutdown routine for the Admin Receive Queue
**/
-static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+static int i40e_shutdown_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&hw->aq.arq_mutex);
@@ -582,12 +582,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
-i40e_status i40e_init_adminq(struct i40e_hw *hw)
+int i40e_init_adminq(struct i40e_hw *hw)
{
u16 cfg_ptr, oem_hi, oem_lo;
u16 eetrack_lo, eetrack_hi;
- i40e_status ret_code;
int retry = 0;
+ int ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -780,7 +780,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-static i40e_status
+static int
i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -788,12 +788,12 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context)
{
- i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
+ int status = 0;
u32 val = 0;
if (hw->aq.asq.count == 0) {
@@ -984,7 +984,7 @@ asq_send_command_error:
* Acquires the lock and calls the main send command execution
* routine.
**/
-i40e_status
+int
i40e_asq_send_command_atomic(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -992,7 +992,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context)
{
- i40e_status status;
+ int status;
mutex_lock(&hw->aq.asq_mutex);
status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
@@ -1003,7 +1003,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
return status;
}
-i40e_status
+int
i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
@@ -1026,7 +1026,7 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
* routine. Returns the last Admin Queue status in aq_status
* to avoid race conditions in access to hw->aq.asq_last_status.
**/
-i40e_status
+int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -1035,7 +1035,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
bool is_atomic_context,
enum i40e_admin_queue_err *aq_status)
{
- i40e_status status;
+ int status;
mutex_lock(&hw->aq.asq_mutex);
status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
@@ -1048,7 +1048,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
return status;
}
-i40e_status
+int
i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -1084,14 +1084,14 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *pending)
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
{
- i40e_status ret_code = 0;
u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index cb8689222c8b..a6c9a9e343d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -20,16 +20,16 @@ enum i40e_memory_type {
};
/* prototype for functions used for dynamic memory allocation */
-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem,
- enum i40e_memory_type type,
- u64 size, u32 alignment);
-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem);
-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem,
- u32 size);
-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem);
+int i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+int i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+int i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+int i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 10d7a982a5b9..639c5a1ca853 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -541,9 +541,9 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
{
struct i40e_pf *pf = ldev->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status err;
+ int err;
- err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
+ err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_RDMA,
0, msg, len, NULL);
if (err)
dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
@@ -674,7 +674,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
bool update = true;
- i40e_status err;
+ int err;
/* TODO: for now do not allow setting VF's VSI setting */
if (is_vf)
@@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (err) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (err) {
dev_info(&pf->pdev->dev,
- "update VSI ctxt for PE failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "update VSI ctxt for PE failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 8f764ff5c990..ed88e38d488b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -14,9 +14,9 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+int i40e_set_mac_type(struct i40e_hw *hw)
{
- i40e_status status = 0;
+ int status = 0;
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
@@ -125,154 +125,6 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
}
/**
- * i40e_stat_str - convert status err code to a string
- * @hw: pointer to the HW structure
- * @stat_err: the status error code to convert
- **/
-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
-{
- switch (stat_err) {
- case 0:
- return "OK";
- case I40E_ERR_NVM:
- return "I40E_ERR_NVM";
- case I40E_ERR_NVM_CHECKSUM:
- return "I40E_ERR_NVM_CHECKSUM";
- case I40E_ERR_PHY:
- return "I40E_ERR_PHY";
- case I40E_ERR_CONFIG:
- return "I40E_ERR_CONFIG";
- case I40E_ERR_PARAM:
- return "I40E_ERR_PARAM";
- case I40E_ERR_MAC_TYPE:
- return "I40E_ERR_MAC_TYPE";
- case I40E_ERR_UNKNOWN_PHY:
- return "I40E_ERR_UNKNOWN_PHY";
- case I40E_ERR_LINK_SETUP:
- return "I40E_ERR_LINK_SETUP";
- case I40E_ERR_ADAPTER_STOPPED:
- return "I40E_ERR_ADAPTER_STOPPED";
- case I40E_ERR_INVALID_MAC_ADDR:
- return "I40E_ERR_INVALID_MAC_ADDR";
- case I40E_ERR_DEVICE_NOT_SUPPORTED:
- return "I40E_ERR_DEVICE_NOT_SUPPORTED";
- case I40E_ERR_PRIMARY_REQUESTS_PENDING:
- return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
- case I40E_ERR_INVALID_LINK_SETTINGS:
- return "I40E_ERR_INVALID_LINK_SETTINGS";
- case I40E_ERR_AUTONEG_NOT_COMPLETE:
- return "I40E_ERR_AUTONEG_NOT_COMPLETE";
- case I40E_ERR_RESET_FAILED:
- return "I40E_ERR_RESET_FAILED";
- case I40E_ERR_SWFW_SYNC:
- return "I40E_ERR_SWFW_SYNC";
- case I40E_ERR_NO_AVAILABLE_VSI:
- return "I40E_ERR_NO_AVAILABLE_VSI";
- case I40E_ERR_NO_MEMORY:
- return "I40E_ERR_NO_MEMORY";
- case I40E_ERR_BAD_PTR:
- return "I40E_ERR_BAD_PTR";
- case I40E_ERR_RING_FULL:
- return "I40E_ERR_RING_FULL";
- case I40E_ERR_INVALID_PD_ID:
- return "I40E_ERR_INVALID_PD_ID";
- case I40E_ERR_INVALID_QP_ID:
- return "I40E_ERR_INVALID_QP_ID";
- case I40E_ERR_INVALID_CQ_ID:
- return "I40E_ERR_INVALID_CQ_ID";
- case I40E_ERR_INVALID_CEQ_ID:
- return "I40E_ERR_INVALID_CEQ_ID";
- case I40E_ERR_INVALID_AEQ_ID:
- return "I40E_ERR_INVALID_AEQ_ID";
- case I40E_ERR_INVALID_SIZE:
- return "I40E_ERR_INVALID_SIZE";
- case I40E_ERR_INVALID_ARP_INDEX:
- return "I40E_ERR_INVALID_ARP_INDEX";
- case I40E_ERR_INVALID_FPM_FUNC_ID:
- return "I40E_ERR_INVALID_FPM_FUNC_ID";
- case I40E_ERR_QP_INVALID_MSG_SIZE:
- return "I40E_ERR_QP_INVALID_MSG_SIZE";
- case I40E_ERR_QP_TOOMANY_WRS_POSTED:
- return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
- case I40E_ERR_INVALID_FRAG_COUNT:
- return "I40E_ERR_INVALID_FRAG_COUNT";
- case I40E_ERR_QUEUE_EMPTY:
- return "I40E_ERR_QUEUE_EMPTY";
- case I40E_ERR_INVALID_ALIGNMENT:
- return "I40E_ERR_INVALID_ALIGNMENT";
- case I40E_ERR_FLUSHED_QUEUE:
- return "I40E_ERR_FLUSHED_QUEUE";
- case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
- return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
- case I40E_ERR_INVALID_IMM_DATA_SIZE:
- return "I40E_ERR_INVALID_IMM_DATA_SIZE";
- case I40E_ERR_TIMEOUT:
- return "I40E_ERR_TIMEOUT";
- case I40E_ERR_OPCODE_MISMATCH:
- return "I40E_ERR_OPCODE_MISMATCH";
- case I40E_ERR_CQP_COMPL_ERROR:
- return "I40E_ERR_CQP_COMPL_ERROR";
- case I40E_ERR_INVALID_VF_ID:
- return "I40E_ERR_INVALID_VF_ID";
- case I40E_ERR_INVALID_HMCFN_ID:
- return "I40E_ERR_INVALID_HMCFN_ID";
- case I40E_ERR_BACKING_PAGE_ERROR:
- return "I40E_ERR_BACKING_PAGE_ERROR";
- case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
- return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
- case I40E_ERR_INVALID_PBLE_INDEX:
- return "I40E_ERR_INVALID_PBLE_INDEX";
- case I40E_ERR_INVALID_SD_INDEX:
- return "I40E_ERR_INVALID_SD_INDEX";
- case I40E_ERR_INVALID_PAGE_DESC_INDEX:
- return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
- case I40E_ERR_INVALID_SD_TYPE:
- return "I40E_ERR_INVALID_SD_TYPE";
- case I40E_ERR_MEMCPY_FAILED:
- return "I40E_ERR_MEMCPY_FAILED";
- case I40E_ERR_INVALID_HMC_OBJ_INDEX:
- return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
- case I40E_ERR_INVALID_HMC_OBJ_COUNT:
- return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
- case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
- return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
- case I40E_ERR_SRQ_ENABLED:
- return "I40E_ERR_SRQ_ENABLED";
- case I40E_ERR_ADMIN_QUEUE_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_ERROR";
- case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
- return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
- case I40E_ERR_BUF_TOO_SHORT:
- return "I40E_ERR_BUF_TOO_SHORT";
- case I40E_ERR_ADMIN_QUEUE_FULL:
- return "I40E_ERR_ADMIN_QUEUE_FULL";
- case I40E_ERR_ADMIN_QUEUE_NO_WORK:
- return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
- case I40E_ERR_BAD_IWARP_CQE:
- return "I40E_ERR_BAD_IWARP_CQE";
- case I40E_ERR_NVM_BLANK_MODE:
- return "I40E_ERR_NVM_BLANK_MODE";
- case I40E_ERR_NOT_IMPLEMENTED:
- return "I40E_ERR_NOT_IMPLEMENTED";
- case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
- return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
- case I40E_ERR_DIAG_TEST_FAILED:
- return "I40E_ERR_DIAG_TEST_FAILED";
- case I40E_ERR_NOT_READY:
- return "I40E_ERR_NOT_READY";
- case I40E_NOT_SUPPORTED:
- return "I40E_NOT_SUPPORTED";
- case I40E_ERR_FIRMWARE_API_VERSION:
- return "I40E_ERR_FIRMWARE_API_VERSION";
- case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
- return hw->err_str;
-}
-
-/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -355,13 +207,13 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
+int i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
@@ -384,15 +236,15 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
*
* Internal function to get or set RSS look up table
**/
-static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
- u16 vsi_id, bool pf_lut,
- u8 *lut, u16 lut_size,
- bool set)
+static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_lut *cmd_resp =
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ int status;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -437,8 +289,8 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
*
* get the RSS lookup table, PF or VSI type
**/
-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
false);
@@ -454,8 +306,8 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* set the RSS lookup table, PF or VSI type
**/
-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
}
@@ -469,16 +321,16 @@ i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
**/
-static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key,
- bool set)
+static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_key *cmd_resp =
(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ int status;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -509,9 +361,9 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
* @key: pointer to key info struct
*
**/
-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
}
@@ -524,9 +376,9 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
*
* set the RSS key per VSI
**/
-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
@@ -796,10 +648,10 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
* hw_addr, back, device_id, vendor_id, subsystem_device_id,
* subsystem_vendor_id, and revision_id
**/
-i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+int i40e_init_shared_code(struct i40e_hw *hw)
{
- i40e_status status = 0;
u32 port, ari, func_rid;
+ int status = 0;
i40e_set_mac_type(hw);
@@ -836,15 +688,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
* @addrs: the requestor's mac addr store
* @cmd_details: pointer to command details structure or NULL
**/
-static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
- u16 *flags,
- struct i40e_aqc_mac_address_read_data *addrs,
- struct i40e_asq_cmd_details *cmd_details)
+static int
+i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_mac_address_read *cmd_data =
(struct i40e_aqc_mac_address_read *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
@@ -863,14 +716,14 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
* @mac_addr: address to write
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
- u16 flags, u8 *mac_addr,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_mac_address_write *cmd_data =
(struct i40e_aqc_mac_address_write *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
@@ -893,11 +746,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
*
* Reads the adapter's MAC address from register
**/
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
u16 flags = 0;
+ int status;
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
@@ -914,11 +767,11 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
*
* Reads the adapter's Port MAC address
**/
-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
u16 flags = 0;
+ int status;
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (status)
@@ -972,13 +825,13 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
*
* Reads the part number string from the EEPROM.
**/
-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
{
- i40e_status status = 0;
u16 pba_word = 0;
u16 pba_size = 0;
u16 pba_ptr = 0;
+ int status = 0;
u16 i = 0;
status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
@@ -1087,8 +940,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
* @hw: pointer to the hardware structure
* @retry_limit: how many times to retry before failure
**/
-static i40e_status i40e_poll_globr(struct i40e_hw *hw,
- u32 retry_limit)
+static int i40e_poll_globr(struct i40e_hw *hw,
+ u32 retry_limit)
{
u32 cnt, reg = 0;
@@ -1114,7 +967,7 @@ static i40e_status i40e_poll_globr(struct i40e_hw *hw,
* Assuming someone else has triggered a global reset,
* assure the global reset is complete and then reset the PF
**/
-i40e_status i40e_pf_reset(struct i40e_hw *hw)
+int i40e_pf_reset(struct i40e_hw *hw)
{
u32 cnt = 0;
u32 cnt1 = 0;
@@ -1453,15 +1306,16 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
*
* Returns the various PHY abilities supported on the Port.
**/
-i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
- bool qualified_modules, bool report_init,
- struct i40e_aq_get_phy_abilities_resp *abilities,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- i40e_status status;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
+ struct i40e_aq_desc desc;
+ int status;
if (!abilities)
return I40E_ERR_PARAM;
@@ -1532,14 +1386,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
* of the PHY Config parameters. This status will be indicated by the
* command response.
**/
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
- struct i40e_aq_set_phy_config *config,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aq_set_phy_config *cmd =
(struct i40e_aq_set_phy_config *)&desc.params.raw;
- enum i40e_status_code status;
+ int status;
if (!config)
return I40E_ERR_PARAM;
@@ -1554,7 +1408,7 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
return status;
}
-static noinline_for_stack enum i40e_status_code
+static noinline_for_stack int
i40e_set_fc_status(struct i40e_hw *hw,
struct i40e_aq_get_phy_abilities_resp *abilities,
bool atomic_restart)
@@ -1612,11 +1466,11 @@ i40e_set_fc_status(struct i40e_hw *hw,
*
* Set the requested flow control mode using set_phy_config.
**/
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_restart)
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code status;
+ int status;
*aq_failures = 0x0;
@@ -1655,13 +1509,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*
* Tell the firmware that the driver is taking over from PXE
**/
-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_clear_pxe *cmd =
(struct i40e_aqc_clear_pxe *)&desc.params.raw;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_clear_pxe_mode);
@@ -1683,14 +1537,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
*
* Sets up the link and restarts the Auto-Negotiation over the link.
**/
-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
- bool enable_link,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_link_restart_an *cmd =
(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_link_restart_an);
@@ -1715,17 +1569,17 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
*
* Returns the link status of the adapter.
**/
-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
- bool enable_lse, struct i40e_link_status *link,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_link_status *resp =
(struct i40e_aqc_get_link_status *)&desc.params.raw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
- i40e_status status;
bool tx_pause, rx_pause;
u16 command_flags;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
@@ -1811,14 +1665,14 @@ aq_get_link_info_exit:
*
* Set link interrupt mask.
**/
-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
- u16 mask,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_int_mask *cmd =
(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_int_mask);
@@ -1838,8 +1692,8 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
*
* Enable/disable loopback on a given port
*/
-i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_lb_mode *cmd =
@@ -1864,13 +1718,13 @@ i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
*
* Reset the external PHY.
**/
-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_debug *cmd =
(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
@@ -1905,9 +1759,9 @@ static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
*
* Add a VSI context to the hardware.
**/
-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -1915,7 +1769,7 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_vsi);
@@ -1949,15 +1803,15 @@ aq_add_vsi_exit:
* @seid: vsi number
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
- u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -1977,15 +1831,15 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
* @seid: vsi number
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
- u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2007,16 +1861,16 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
* @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details,
- bool rx_only_promisc)
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2047,14 +1901,15 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
* @set: set multicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2080,16 +1935,16 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- enum i40e_status_code status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2116,16 +1971,16 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- enum i40e_status_code status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2158,15 +2013,15 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable, u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2193,14 +2048,14 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
*
* Set or clear the broadcast promiscuous flag (filter) for a given VSI.
**/
-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
- u16 seid, bool set_filter,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2226,15 +2081,15 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
* @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2256,9 +2111,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
* @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -2266,7 +2121,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
@@ -2298,9 +2153,9 @@ aq_get_vsi_params_exit:
*
* Update a VSI context.
**/
-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -2308,7 +2163,7 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
@@ -2336,15 +2191,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
*
* Fill the buf with switch configuration returned from AdminQ command
**/
-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
- struct i40e_aqc_get_switch_config_resp *buf,
- u16 buf_size, u16 *start_seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *scfg =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_config);
@@ -2370,15 +2225,15 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
*
* Set switch configuration bits
**/
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags,
- u16 valid_flags, u8 mode,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_switch_config *scfg =
(struct i40e_aqc_set_switch_config *)&desc.params.raw;
- enum i40e_status_code status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_switch_config);
@@ -2407,16 +2262,16 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
*
* Get the firmware version from the admin queue commands
**/
-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
- u16 *fw_major_version, u16 *fw_minor_version,
- u32 *fw_build,
- u16 *api_major_version, u16 *api_minor_version,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_version *resp =
(struct i40e_aqc_get_version *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
@@ -2446,14 +2301,14 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
*
* Send the driver version to the firmware
**/
-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
- i40e_status status;
+ int status;
u16 len;
if (dv == NULL)
@@ -2488,9 +2343,9 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
*
* Side effect: LinkStatusEvent reporting becomes enabled
**/
-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
{
- i40e_status status = 0;
+ int status = 0;
if (hw->phy.get_link_info) {
status = i40e_update_link_info(hw);
@@ -2509,10 +2364,10 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
* i40e_update_link_info - update status of the HW network link
* @hw: pointer to the hw struct
**/
-noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
+noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- i40e_status status = 0;
+ int status = 0;
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status)
@@ -2559,19 +2414,19 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
* This asks the FW to add a VEB between the uplink and downlink
* elements. If the uplink SEID is 0, this will be a floating VEB.
**/
-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
- u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *veb_seid,
- bool enable_stats,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_veb *cmd =
(struct i40e_aqc_add_veb *)&desc.params.raw;
struct i40e_aqc_add_veb_completion *resp =
(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
- i40e_status status;
u16 veb_flags = 0;
+ int status;
/* SEIDs need to either both be set or both be 0 for floating VEB */
if (!!uplink_seid != !!downlink_seid)
@@ -2617,17 +2472,17 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
* This retrieves the parameters for a particular VEB, specified by
* uplink_seid, and returns them to the caller.
**/
-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
- u16 veb_seid, u16 *switch_id,
- bool *floating, u16 *statistic_index,
- u16 *vebs_used, u16 *vebs_free,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
(struct i40e_aqc_get_veb_parameters_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
if (veb_seid == 0)
return I40E_ERR_PARAM;
@@ -2711,7 +2566,7 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
*
* Add MAC/VLAN addresses to the HW filtering
**/
-i40e_status
+int
i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
@@ -2743,7 +2598,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
* It also calls _v2 versions of asq_send_command functions to
* get the aq_status on the stack.
**/
-i40e_status
+int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
@@ -2771,15 +2626,16 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
*
* Remove MAC/VLAN addresses from the HW filtering
**/
-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_remove_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_macvlan *cmd =
(struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
u16 buf_size;
+ int status;
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
@@ -2818,7 +2674,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* It also calls _v2 versions of asq_send_command functions to
* get the aq_status on the stack.
**/
-i40e_status
+int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
@@ -2866,19 +2722,19 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
**/
-static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
+static int i40e_mirrorrule_op(struct i40e_hw *hw,
+ u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+ u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_delete_mirror_rule *cmd =
(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
struct i40e_aqc_add_delete_mirror_rule_completion *resp =
(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- i40e_status status;
u16 buf_size;
+ int status;
buf_size = count * sizeof(*mr_list);
@@ -2926,10 +2782,11 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
{
if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
@@ -2957,10 +2814,11 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
@@ -2989,14 +2847,14 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
*
* send msg to vf
**/
-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
- u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_pf_vf_message *cmd =
(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
cmd->id = cpu_to_le32(vfid);
@@ -3024,14 +2882,14 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
*
* Read the register using the admin queue commands
**/
-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd_resp =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
if (reg_val == NULL)
return I40E_ERR_PARAM;
@@ -3059,14 +2917,14 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
*
* Write to a register using the admin queue commands
**/
-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
- u32 reg_addr, u64 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
@@ -3090,16 +2948,16 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
*
* requests common resource using the admin queue commands
**/
-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- enum i40e_aq_resource_access_type access,
- u8 sdp_number, u64 *timeout,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_request_resource *cmd_resp =
(struct i40e_aqc_request_resource *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
@@ -3129,15 +2987,15 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
*
* release common resource using the admin queue commands
**/
-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- u8 sdp_number,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_request_resource *cmd =
(struct i40e_aqc_request_resource *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
@@ -3161,15 +3019,15 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
*
* Read the NVM using the admin queue commands
**/
-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3207,14 +3065,14 @@ i40e_aq_read_nvm_exit:
*
* Erase the NVM sector using the admin queue commands
**/
-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, bool last_command,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3255,8 +3113,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
u16 id, ocp_cfg_word0;
- i40e_status status;
u8 major_rev;
+ int status;
u32 i = 0;
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
@@ -3497,14 +3355,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
*
* Get the device capabilities descriptions from the firmware
**/
-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
- void *buff, u16 buff_size, u16 *data_size,
- enum i40e_admin_queue_opc list_type_opc,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_list_capabilites *cmd;
struct i40e_aq_desc desc;
- i40e_status status = 0;
+ int status = 0;
cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
@@ -3546,15 +3404,15 @@ exit:
*
* Update the NVM using the admin queue commands
**/
-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command, u8 preservation_flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3599,13 +3457,13 @@ i40e_aq_update_nvm_exit:
*
* Rearrange NVM structure, available only for transition FW
**/
-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_nvm_update *cmd;
- i40e_status status;
struct i40e_aq_desc desc;
+ int status;
cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
@@ -3639,17 +3497,17 @@ i40e_aq_rearrange_nvm_exit:
*
* Requests the complete LLDP MIB (entire packet).
**/
-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
- u8 mib_type, void *buff, u16 buff_size,
- u16 *local_len, u16 *remote_len,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_get_mib *cmd =
(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
struct i40e_aqc_lldp_get_mib *resp =
(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
- i40e_status status;
+ int status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -3689,14 +3547,14 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
*
* Set the LLDP MIB.
**/
-enum i40e_status_code
+int
i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_lldp_set_local_mib *cmd;
- enum i40e_status_code status;
struct i40e_aq_desc desc;
+ int status;
cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
if (buff_size == 0 || !buff)
@@ -3728,14 +3586,14 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes
**/
-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
- bool enable_update,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_update_mib *cmd =
(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
@@ -3757,14 +3615,14 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
* Restore LLDP Agent factory settings if @restore set to True. In other case
* only returns factory setting in AQ response.
**/
-enum i40e_status_code
+int
i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_restore *cmd =
(struct i40e_aqc_lldp_restore *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3794,14 +3652,14 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
*
* Stop or Shutdown the embedded LLDP Agent
**/
-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
- bool persist,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_stop *cmd =
(struct i40e_aqc_lldp_stop *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
@@ -3829,13 +3687,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
*
* Start the embedded LLDP Agent on all ports.
**/
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_start *cmd =
(struct i40e_aqc_lldp_start *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
@@ -3861,14 +3719,14 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
* @dcb_enable: True if DCB configuration needs to be applied
*
**/
-enum i40e_status_code
+int
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_dcb_parameters *cmd =
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -3894,12 +3752,12 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
*
* Get CEE DCBX mode operational configuration from firmware
**/
-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
- void *buff, u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -3925,17 +3783,17 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
* and this function will call cpu_to_le16 to convert from Host byte order to
* Little Endian order.
**/
-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 protocol_index,
- u8 *filter_index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_udp_tunnel *cmd =
(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
struct i40e_aqc_del_udp_tunnel_completion *resp =
(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
@@ -3956,13 +3814,13 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
* @index: filter index
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_remove_udp_tunnel *cmd =
(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
@@ -3981,13 +3839,13 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
*
* This deletes a switch element from the switch.
**/
-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *cmd =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
- i40e_status status;
+ int status;
if (seid == 0)
return I40E_ERR_PARAM;
@@ -4011,11 +3869,11 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
* recomputed and modified. The retval field in the descriptor
* will be set to 0 when RPB is modified.
**/
-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
@@ -4035,15 +3893,15 @@ i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
*
* Generic command handler for Tx scheduler AQ commands
**/
-static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
void *buff, u16 buff_size,
- enum i40e_admin_queue_opc opcode,
+ enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_tx_sched_ind *cmd =
(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
- i40e_status status;
+ int status;
bool cmd_param_flag = false;
switch (opcode) {
@@ -4093,14 +3951,14 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
* @max_credit: Max BW limit credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_configure_vsi_bw_limit *cmd =
(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_vsi_bw_limit);
@@ -4121,10 +3979,10 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
* @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_configure_vsi_tc_bw,
@@ -4139,11 +3997,12 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
* @opcode: Tx scheduler AQ command opcode
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
- enum i40e_admin_queue_opc opcode,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
sizeof(*ets_data), opcode, cmd_details);
@@ -4156,7 +4015,8 @@ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
* @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+int
+i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
struct i40e_asq_cmd_details *cmd_details)
@@ -4173,10 +4033,11 @@ i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold VSI BW configuration
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_vsi_bw_config,
@@ -4190,10 +4051,11 @@ i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold VSI BW configuration per TC
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_vsi_ets_sla_config,
@@ -4207,10 +4069,11 @@ i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold switching component's per TC BW config
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_switching_comp_ets_config,
@@ -4224,10 +4087,11 @@ i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold current ETS configuration for the Physical Port
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_port_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_port_ets_config,
@@ -4241,10 +4105,11 @@ i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold switching component's BW configuration
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_switching_comp_bw_config,
@@ -4263,8 +4128,9 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
* Returns 0 if the values passed are valid and within
* range else returns an error.
**/
-static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings)
+static int
+i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
{
u32 fcoe_cntx_size, fcoe_filt_size;
u32 fcoe_fmax;
@@ -4350,11 +4216,11 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
* for a single PF. It is expected that these settings are programmed
* at the driver initialization time.
**/
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings)
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
{
- i40e_status ret = 0;
u32 hash_lut_size = 0;
+ int ret = 0;
u32 val;
if (!settings)
@@ -4424,11 +4290,11 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
* In return it will update the total number of perfect filter count in
* the stats member.
**/
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
- u8 *mac_addr, u16 ethtype, u16 flags,
- u16 vsi_seid, u16 queue, bool is_add,
- struct i40e_control_filter_stats *stats,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_control_packet_filter *cmd =
@@ -4437,7 +4303,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
(struct i40e_aqc_add_remove_control_packet_filter_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
if (vsi_seid == 0)
return I40E_ERR_PARAM;
@@ -4483,7 +4349,7 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
- i40e_status status;
+ int status;
status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
seid, 0, true, NULL,
@@ -4505,14 +4371,14 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
* is not passed then only register at 'reg_addr0' is read.
*
**/
-static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
- u32 reg_addr0, u32 *reg_val0,
- u32 reg_addr1, u32 *reg_val1)
+static int i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
{
struct i40e_aq_desc desc;
struct i40e_aqc_alternate_write *cmd_resp =
(struct i40e_aqc_alternate_write *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!reg_val0)
return I40E_ERR_PARAM;
@@ -4541,12 +4407,12 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
*
* Suspend port's Tx traffic
**/
-i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_tx_sched_ind *cmd;
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
@@ -4563,11 +4429,11 @@ i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
*
* Resume port's Tx traffic
**/
-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
@@ -4637,18 +4503,18 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* Dump internal FW/HW data for debug purposes.
*
**/
-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
- u8 table_id, u32 start_index, u16 buff_size,
- void *buff, u16 *ret_buff_size,
- u8 *ret_next_table, u32 *ret_next_index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_dump_internals *cmd =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
struct i40e_aqc_debug_dump_internals *resp =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
- i40e_status status;
+ int status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -4689,12 +4555,12 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
*
* Read bw from the alternate ram for the given pf
**/
-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
- u32 *max_bw, u32 *min_bw,
- bool *min_valid, bool *max_valid)
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
{
- i40e_status status;
u32 max_bw_addr, min_bw_addr;
+ int status;
/* Calculate the address of the min/max bw registers */
max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
@@ -4729,13 +4595,14 @@ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
*
* Configure partitions guaranteed/max bw
**/
-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
- struct i40e_aqc_configure_partition_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status;
- struct i40e_aq_desc desc;
u16 bwd_size = sizeof(*bw_data);
+ struct i40e_aq_desc desc;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);
@@ -4764,11 +4631,11 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
@@ -4809,11 +4676,11 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
*
* Writes specified PHY register value
**/
-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
@@ -4850,13 +4717,13 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
+ u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
- u8 port_num = hw->func_caps.mdio_port_num;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4924,13 +4791,13 @@ phy_read_end:
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
- u32 command = 0;
- u16 retry = 1000;
u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
+ u16 retry = 1000;
+ u32 command = 0;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4991,10 +4858,10 @@ phy_write_end:
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status;
+ int status;
switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
@@ -5030,10 +4897,10 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status;
+ int status;
switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
@@ -5082,17 +4949,17 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
*
* Blinks PHY link LED
**/
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval)
{
- i40e_status status = 0;
- u32 i;
- u16 led_ctl;
- u16 gpio_led_port;
- u16 led_reg;
u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+ u16 gpio_led_port;
u8 phy_addr = 0;
+ int status = 0;
+ u16 led_ctl;
u8 port_num;
+ u16 led_reg;
+ u32 i;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
@@ -5154,12 +5021,12 @@ phy_blinking_end:
* @led_addr: LED register address
* @reg_val: read register value
**/
-static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
- u32 *reg_val)
+static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
{
- enum i40e_status_code status;
u8 phy_addr = 0;
u8 port_num;
+ int status;
u32 i;
*reg_val = 0;
@@ -5188,12 +5055,12 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
* @led_addr: LED register address
* @reg_val: register value to write
**/
-static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
- u32 reg_val)
+static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
{
- enum i40e_status_code status;
u8 phy_addr = 0;
u8 port_num;
+ int status;
u32 i;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
@@ -5223,17 +5090,17 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
* @val: original value of register to use
*
**/
-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
- u16 *val)
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val)
{
- i40e_status status = 0;
u16 gpio_led_port;
u8 phy_addr = 0;
- u16 reg_val;
+ u32 reg_val_aq;
+ int status = 0;
u16 temp_addr;
+ u16 reg_val;
u8 port_num;
u32 i;
- u32 reg_val_aq;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status =
@@ -5278,12 +5145,12 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* Set led's on or off when controlled by the PHY
*
**/
-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
- u16 led_addr, u32 mode)
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode)
{
- i40e_status status = 0;
u32 led_ctl = 0;
u32 led_reg = 0;
+ int status = 0;
status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
@@ -5327,14 +5194,14 @@ restore_config:
* Use the firmware to read the Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!reg_val)
return I40E_ERR_PARAM;
@@ -5358,8 +5225,8 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
**/
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
- i40e_status status = 0;
bool use_register;
+ int status = 0;
int retry = 5;
u32 val = 0;
@@ -5393,14 +5260,14 @@ do_retry:
* Use the firmware to write to an Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
@@ -5420,8 +5287,8 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
**/
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
- i40e_status status = 0;
bool use_register;
+ int status = 0;
int retry = 5;
use_register = (((hw->aq.api_maj_ver == 1) &&
@@ -5483,16 +5350,16 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
* may use simple wrapper i40e_aq_set_phy_register.
**/
-enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr, bool page_change,
- bool set_mdio, u8 mdio_num,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_register);
@@ -5528,16 +5395,16 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
* may use simple wrapper i40e_aq_get_phy_register.
**/
-enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr, bool page_change,
- bool set_mdio, u8 mdio_num,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_register);
@@ -5568,18 +5435,17 @@ enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
* @error_info: returns error information
* @cmd_details: pointer to command details structure or NULL
**/
-enum
-i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
struct i40e_aqc_write_ddp_resp *resp;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_write_personalization_profile);
@@ -5612,15 +5478,14 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
* @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
-enum
-i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_personalization_profile_list);
@@ -5719,14 +5584,13 @@ i40e_find_section_in_profile(u32 section_type,
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
**/
-static enum
-i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
- struct i40e_profile_aq_section *aq)
+static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
{
- i40e_status status;
struct i40e_aq_desc desc;
u8 *msg = NULL;
u16 msglen;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
desc.flags |= cpu_to_le16(aq->flags);
@@ -5766,14 +5630,14 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
*
* Validates supported devices and profile's sections.
*/
-static enum i40e_status_code
+static int
i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id, bool rollback)
{
struct i40e_profile_section_header *sec = NULL;
- i40e_status status = 0;
struct i40e_section_table *sec_tbl;
u32 vendor_dev_id;
+ int status = 0;
u32 dev_cnt;
u32 sec_off;
u32 i;
@@ -5831,16 +5695,16 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Handles the download of a complete package.
*/
-enum i40e_status_code
+int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id)
{
- i40e_status status = 0;
- struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
struct i40e_profile_aq_section *ddp_aq;
- u32 section_size = 0;
+ struct i40e_section_table *sec_tbl;
u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ int status = 0;
u32 sec_off;
u32 i;
@@ -5894,15 +5758,15 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Rolls back previously loaded package.
*/
-enum i40e_status_code
+int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id)
{
struct i40e_profile_section_header *sec = NULL;
- i40e_status status = 0;
struct i40e_section_table *sec_tbl;
u32 offset = 0, info = 0;
u32 section_size = 0;
+ int status = 0;
u32 sec_off;
int i;
@@ -5946,15 +5810,15 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Register a profile to the list of loaded profiles.
*/
-enum i40e_status_code
+int
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
- i40e_status status = 0;
struct i40e_profile_section_header *sec = NULL;
struct i40e_profile_info *pinfo;
u32 offset = 0, info = 0;
+ int status = 0;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -5988,7 +5852,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
* of the function.
*
**/
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
@@ -5996,8 +5860,8 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- enum i40e_status_code status;
u16 buff_len;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
@@ -6025,7 +5889,7 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
* function.
*
**/
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
@@ -6033,8 +5897,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- i40e_status status;
u16 buff_len;
+ int status;
int i;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -6082,7 +5946,7 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
* of the function.
*
**/
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
@@ -6090,8 +5954,8 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- enum i40e_status_code status;
u16 buff_len;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
@@ -6119,7 +5983,7 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
* function.
*
**/
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
@@ -6127,8 +5991,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- i40e_status status;
u16 buff_len;
+ int status;
int i;
i40e_fill_default_direct_cmd_desc(&desc,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 673f341f4c0c..90638b67f8dc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -12,7 +12,7 @@
*
* Get the DCBX status from the Firmware
**/
-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
{
u32 reg;
@@ -497,15 +497,15 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
*
* Parse DCB configuration from the LLDPDU
**/
-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
{
- i40e_status ret = 0;
struct i40e_lldp_org_tlv *tlv;
- u16 type;
- u16 length;
u16 typelength;
u16 offset = 0;
+ int ret = 0;
+ u16 length;
+ u16 type;
if (!lldpmib || !dcbcfg)
return I40E_ERR_PARAM;
@@ -551,12 +551,12 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
*
* Query DCB configuration from the Firmware
**/
-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
{
- i40e_status ret = 0;
struct i40e_virt_mem mem;
+ int ret = 0;
u8 *lldpmib;
/* Allocate the LLDPDU */
@@ -767,9 +767,9 @@ static void i40e_cee_to_dcb_config(
*
* Get IEEE mode DCB configuration from the Firmware
**/
-static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
{
- i40e_status ret = 0;
+ int ret = 0;
/* IEEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
@@ -797,11 +797,11 @@ out:
*
* Get DCB configuration from the Firmware
**/
-i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+int i40e_get_dcb_config(struct i40e_hw *hw)
{
- i40e_status ret = 0;
- struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ int ret = 0;
/* If Firmware version < v4.33 on X710/XL710, IEEE only */
if ((hw->mac.type == I40E_MAC_XL710) &&
@@ -867,11 +867,11 @@ out:
*
* Update DCB configuration from the Firmware
**/
-i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
+int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
{
- i40e_status ret = 0;
struct i40e_lldp_variables lldp_cfg;
u8 adminstatus = 0;
+ int ret = 0;
if (!hw->func_caps.dcb)
return I40E_NOT_SUPPORTED;
@@ -940,13 +940,13 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
* Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
* Status of agent is reported via @lldp_status parameter.
**/
-enum i40e_status_code
+int
i40e_get_fw_lldp_status(struct i40e_hw *hw,
enum i40e_get_fw_lldp_status_resp *lldp_status)
{
struct i40e_virt_mem mem;
- i40e_status ret;
u8 *lldpmib;
+ int ret;
if (!lldp_status)
return I40E_ERR_PARAM;
@@ -1238,13 +1238,13 @@ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
*
* Set DCB configuration to the Firmware
**/
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+int i40e_set_dcb_config(struct i40e_hw *hw)
{
struct i40e_dcbx_config *dcbcfg;
struct i40e_virt_mem mem;
u8 mib_type, *lldpmib;
- i40e_status ret;
u16 miblen;
+ int ret;
/* update the hw local config */
dcbcfg = &hw->local_dcbx_config;
@@ -1274,8 +1274,8 @@ i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
*
* send DCB configuration to FW
**/
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
{
u16 length, offset = 0, tlvid, typelength;
struct i40e_lldp_org_tlv *tlv;
@@ -1888,13 +1888,13 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
*
* Reads the LLDP configuration data from NVM using passed addresses
**/
-static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg,
- u8 module, u32 word_offset)
+static int _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
{
u32 address, offset = (2 * word_offset);
- i40e_status ret;
__le16 raw_mem;
+ int ret;
u16 mem;
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -1950,10 +1950,10 @@ err_lldp_cfg:
*
* Reads the LLDP configuration data from NVM
**/
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg)
+int i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
{
- i40e_status ret = 0;
+ int ret = 0;
u32 mem;
if (!lldp_cfg)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2370ceecb061..6b60dc9b7736 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -264,20 +264,20 @@ void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
struct i40e_rx_pb_config *old_pb_cfg,
struct i40e_rx_pb_config *new_pb_cfg);
-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
- u16 *status);
-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg);
-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg);
-i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw,
- bool enable_mib_change);
-enum i40e_status_code
+int i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_get_dcb_config(struct i40e_hw *hw);
+int i40e_init_dcb(struct i40e_hw *hw,
+ bool enable_mib_change);
+int
i40e_get_fw_lldp_status(struct i40e_hw *hw,
enum i40e_get_fw_lldp_status_resp *lldp_status);
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg);
+int i40e_set_dcb_config(struct i40e_hw *hw);
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index e32c61909b31..195421d863ab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB ETS configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB ETS configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB PFC configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB PFC configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index e1069ae658ad..7e8183762fd9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -36,7 +36,7 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
{
struct i40e_ddp_profile_list *profile_list;
u8 buff[I40E_PROFILE_LIST_SIZE];
- i40e_status status;
+ int status;
int i;
status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
@@ -91,7 +91,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
{
struct i40e_ddp_profile_list *profile_list;
u8 buff[I40E_PROFILE_LIST_SIZE];
- i40e_status status;
+ int status;
int i;
status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
@@ -117,14 +117,14 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
*
* Register a profile to the list of loaded profiles.
*/
-static enum i40e_status_code
+static int
i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
struct i40e_profile_section_header *sec;
struct i40e_profile_info *pinfo;
- i40e_status status;
u32 offset = 0, info = 0;
+ int status;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -157,14 +157,14 @@ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Removes DDP profile from the NIC.
**/
-static enum i40e_status_code
+static int
i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
struct i40e_profile_section_header *sec;
struct i40e_profile_info *pinfo;
- i40e_status status;
u32 offset = 0, info = 0;
+ int status;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -270,12 +270,12 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
struct i40e_profile_segment *profile_hdr;
struct i40e_profile_info pinfo;
struct i40e_package_header *pkg_hdr;
- i40e_status status;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 track_id;
int istatus;
+ int status;
pkg_hdr = (struct i40e_package_header *)data;
if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index c9dcd6d92c83..9954493cd448 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -918,9 +918,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
- i40e_status ret;
- u16 vid;
unsigned int v;
+ int ret;
+ u16 vid;
cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
if (cnt != 2) {
@@ -1284,7 +1284,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
struct i40e_aq_desc *desc;
- i40e_status ret;
+ int ret;
desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
if (!desc)
@@ -1330,9 +1330,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
struct i40e_aq_desc *desc;
- i40e_status ret;
u16 buffer_len;
u8 *buff;
+ int ret;
desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
if (!desc)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index ef4d3762bf37..5b3519c6e362 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -10,8 +10,8 @@
* @reg: reg to be tested
* @mask: bits to be touched
**/
-static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
- u32 reg, u32 mask)
+static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
{
static const u32 patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
@@ -74,9 +74,9 @@ struct i40e_diag_reg_test_info i40e_reg_list[] = {
*
* Perform registers diagnostic test
**/
-i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+int i40e_diag_reg_test(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg, mask;
u32 i, j;
@@ -114,9 +114,9 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
*
* Perform EEPROM diagnostic test
**/
-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
+int i40e_diag_eeprom_test(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
u16 reg_val;
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index c3340f320a18..e641035c7297 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -22,7 +22,7 @@ struct i40e_diag_reg_test_info {
extern struct i40e_diag_reg_test_info i40e_reg_list[];
-i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+int i40e_diag_reg_test(struct i40e_hw *hw);
+int i40e_diag_eeprom_test(struct i40e_hw *hw);
#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 887a735fe2a7..4934ff58332c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1226,8 +1226,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
bool autoneg_changed = false;
- i40e_status status = 0;
int timeout = 50;
+ int status = 0;
int err = 0;
__u32 speed;
u8 autoneg;
@@ -1455,8 +1455,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
- "Set phy config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Set phy config failed, err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@@ -1465,8 +1465,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_update_link_info(hw);
if (status)
netdev_dbg(netdev,
- "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Updating link info failed with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
@@ -1485,7 +1485,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status status = 0;
+ int status = 0;
u32 flags = 0;
int err = 0;
@@ -1517,8 +1517,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
- "Set phy config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Set phy config failed, err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@@ -1531,8 +1531,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
* (e.g. no physical connection etc.)
*/
netdev_dbg(netdev,
- "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Updating link info failed with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
@@ -1547,7 +1547,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status status = 0;
+ int status = 0;
int err = 0;
u8 fec_cfg;
@@ -1634,12 +1634,12 @@ static int i40e_nway_reset(struct net_device *netdev)
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
- i40e_status ret = 0;
+ int ret = 0;
ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
if (ret) {
- netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -1699,9 +1699,9 @@ static int i40e_set_pauseparam(struct net_device *netdev,
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
- i40e_status status;
u8 aq_failures;
int err = 0;
+ int status;
u32 is_an;
/* Changing the port's flow control is not supported if this isn't the
@@ -1755,20 +1755,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
status = i40e_set_fc(hw, &aq_failures, link_up);
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -2583,8 +2583,8 @@ static u64 i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- i40e_status status;
bool link_up = false;
+ int status;
netif_info(pf, hw, netdev, "link test\n");
status = i40e_get_link_status(&pf->hw, &link_up);
@@ -2807,11 +2807,11 @@ static int i40e_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- i40e_status ret = 0;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
int blink_freq = 2;
u16 temp_status;
+ int ret = 0;
switch (state) {
case ETHTOOL_ID_ACTIVE:
@@ -5247,7 +5247,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 reset_needed = 0;
- i40e_status status;
+ int status;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@@ -5362,8 +5362,8 @@ flags_complete:
0, NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@@ -5435,9 +5435,8 @@ flags_complete:
return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
- "Starting FW LLDP agent failed: error: %s, %s\n",
- i40e_stat_str(&pf->hw,
- status),
+ "Starting FW LLDP agent failed: error: %pe, %s\n",
+ ERR_PTR(status),
i40e_aq_str(&pf->hw,
adq_err));
return -EINVAL;
@@ -5477,8 +5476,8 @@ static int i40e_get_module_info(struct net_device *netdev,
u32 sff8472_comp = 0;
u32 sff8472_swap = 0;
u32 sff8636_rev = 0;
- i40e_status status;
u32 type = 0;
+ int status;
/* Check if firmware supports reading module EEPROM. */
if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
@@ -5582,8 +5581,8 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
bool is_sfp = false;
- i40e_status status;
u32 value = 0;
+ int status;
int i;
if (!ee || !ee->len || !data)
@@ -5624,10 +5623,10 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp phy_cfg;
- enum i40e_status_code status = 0;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ int status = 0;
/* Get initial PHY capabilities */
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
@@ -5689,11 +5688,11 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code status = I40E_SUCCESS;
struct i40e_aq_set_phy_config config;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ int status = I40E_SUCCESS;
__le16 eee_capability;
/* Deny parameters we don't support */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 163ee8c6311c..46f7950a0049 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -17,17 +17,17 @@
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
**/
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 sd_index,
- enum i40e_sd_entry_type type,
- u64 direct_mode_sz)
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
+ int ret_code = I40E_SUCCESS;
struct i40e_dma_mem mem;
- i40e_status ret_code = I40E_SUCCESS;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
@@ -106,19 +106,19 @@ exit:
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
**/
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 pd_index,
- struct i40e_dma_mem *rsrc_pg)
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
struct i40e_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
- u64 *pd_addr;
+ int ret_code = 0;
u64 page_desc;
+ u64 *pd_addr;
if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
@@ -185,15 +185,15 @@ exit:
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
**/
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_sd_entry *sd_entry;
u32 sd_idx, rel_pd_idx;
+ int ret_code = 0;
u64 *pd_addr;
/* calculate index */
@@ -241,11 +241,11 @@ exit:
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
**/
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
@@ -269,9 +269,9 @@ exit:
* @idx: the page index
* @is_pf: used to distinguish between VF and PF
**/
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
@@ -290,11 +290,11 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
**/
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
@@ -318,9 +318,9 @@ exit:
* @idx: segment descriptor index to find the relevant page descriptor
* @is_pf: used to distinguish between VF and PF
**/
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 3113792afaff..9960da07a573 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -187,28 +187,28 @@ struct i40e_hmc_info {
/* add one more to the limit to correct our range */ \
*(pd_limit) += 1; \
}
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 sd_index,
- enum i40e_sd_entry_type type,
- u64 direct_mode_sz);
-
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 pd_index,
- struct i40e_dma_mem *rsrc_pg);
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d6e92ecddfbd..40c101f286d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -74,12 +74,12 @@ static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
* Assumptions:
* - HMC Resource Profile has been selected before calling this function.
**/
-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
- u32 rxq_num, u32 fcoe_cntx_num,
- u32 fcoe_filt_num)
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
{
struct i40e_hmc_obj_info *obj, *full_obj;
- i40e_status ret_code = 0;
+ int ret_code = 0;
u64 l2fpm_size;
u32 size_exp;
@@ -229,11 +229,11 @@ init_lan_hmc_out:
* 1. caller can deallocate the memory used by pd after this function
* returns.
**/
-static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+static int i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (!i40e_prep_remove_pd_page(hmc_info, idx))
ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
@@ -256,11 +256,11 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
* 1. caller can deallocate the memory used by backing storage after this
* function returns.
**/
-static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+static int i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (!i40e_prep_remove_sd_bp(hmc_info, idx))
ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
@@ -276,15 +276,15 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
**/
-static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_create_obj_info *info)
+static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
u32 pd_idx1 = 0, pd_lmt1 = 0;
u32 pd_idx = 0, pd_lmt = 0;
bool pd_error = false;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
u64 sd_size;
u32 i, j;
@@ -435,13 +435,13 @@ exit:
* - This function will be called after i40e_init_lan_hmc() and before
* any LAN/FCoE HMC objects can be created.
**/
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
- enum i40e_hmc_model model)
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
{
struct i40e_hmc_lan_create_obj_info info;
- i40e_status ret_code = 0;
u8 hmc_fn_id = hw->hmc.hmc_fn_id;
struct i40e_hmc_obj_info *obj;
+ int ret_code = 0;
/* Initialize part of the create object info struct */
info.hmc_info = &hw->hmc;
@@ -520,13 +520,13 @@ configure_lan_hmc_out:
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
**/
-static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_delete_obj_info *info)
+static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
u32 pd_idx, pd_lmt, rel_pd_idx;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
u32 i, j;
if (NULL == info) {
@@ -632,10 +632,10 @@ exit:
* This must be called by drivers as they are shutting down and being
* removed from the OS.
**/
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw)
{
struct i40e_hmc_lan_delete_obj_info info;
- i40e_status ret_code;
+ int ret_code;
info.hmc_info = &hw->hmc;
info.rsrc_type = I40E_HMC_LAN_FULL;
@@ -915,9 +915,9 @@ static void i40e_write_qword(u8 *hmc_bits,
* @context_bytes: pointer to the context bit array (DMA memory)
* @hmc_type: the type of HMC resource
**/
-static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
- u8 *context_bytes,
- enum i40e_hmc_lan_rsrc_type hmc_type)
+static int i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
{
/* clean the bit array */
memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
@@ -931,9 +931,9 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
* @ce_info: a description of the struct to be filled
* @dest: the struct to be filled
**/
-static i40e_status i40e_set_hmc_context(u8 *context_bytes,
- struct i40e_context_ele *ce_info,
- u8 *dest)
+static int i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
{
int f;
@@ -973,18 +973,18 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
* base pointer. This function is used for LAN Queue contexts.
**/
static
-i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
- enum i40e_hmc_lan_rsrc_type rsrc_type,
- u32 obj_idx)
+int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
{
struct i40e_hmc_info *hmc_info = &hw->hmc;
u32 obj_offset_in_sd, obj_offset_in_pd;
struct i40e_hmc_sd_entry *sd_entry;
struct i40e_hmc_pd_entry *pd_entry;
u32 pd_idx, pd_lmt, rel_pd_idx;
- i40e_status ret_code = 0;
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
if (NULL == hmc_info) {
ret_code = I40E_ERR_BAD_PTR;
@@ -1042,11 +1042,11 @@ exit:
* @hw: the hardware struct
* @queue: the queue we care about
**/
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue)
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_TX, queue);
@@ -1062,12 +1062,12 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
* @queue: the queue we care about
* @s: the struct to be filled
**/
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s)
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_TX, queue);
@@ -1083,11 +1083,11 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
* @hw: the hardware struct
* @queue: the queue we care about
**/
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue)
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_RX, queue);
@@ -1103,12 +1103,12 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
* @queue: the queue we care about
* @s: the struct to be filled
**/
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s)
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_RX, queue);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index c46a2c449e60..9f960404c2b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -137,22 +137,22 @@ struct i40e_hmc_lan_delete_obj_info {
u32 count;
};
-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
- u32 rxq_num, u32 fcoe_cntx_num,
- u32 fcoe_filt_num);
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
- enum i40e_hmc_model model);
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
-
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s);
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s);
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 52eec0a50492..467001db5070 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1817,13 +1817,13 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (vsi->type == I40E_VSI_MAIN) {
- i40e_status ret;
+ int ret;
ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
if (ret)
- netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
@@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS key, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS key, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -2349,7 +2349,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
{
struct i40e_hw *hw = &vsi->back->hw;
enum i40e_admin_queue_err aq_status;
- i40e_status aq_ret;
+ int aq_ret;
aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
&aq_status);
@@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
- "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
- vsi_name, i40e_stat_str(hw, aq_ret),
+ "ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
+ vsi_name, ERR_PTR(aq_ret),
i40e_aq_str(hw, aq_status));
}
}
@@ -2423,13 +2423,13 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
*
* Returns status indicating success or failure;
**/
-static i40e_status
+static int
i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_mac_filter *f)
{
bool enable = f->state == I40E_FILTER_NEW;
struct i40e_hw *hw = &vsi->back->hw;
- i40e_status aq_ret;
+ int aq_ret;
if (f->vlan == I40E_VLAN_ANY) {
aq_ret = i40e_aq_set_vsi_broadcast(hw,
@@ -2468,7 +2468,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret;
+ int aq_ret;
if (vsi->type == I40E_VSI_MAIN &&
pf->lan_veb != I40E_NO_VEB &&
@@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "Set default VSI failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "Set default VSI failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
} else {
@@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
true);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "set unicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "set unicast promisc failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
@@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
promisc, NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "set multicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "set multicast promisc failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@@ -2541,12 +2541,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
unsigned int vlan_filters = 0;
char vsi_name[16] = "PF";
int filter_list_len = 0;
- i40e_status aq_ret = 0;
u32 changed_flags = 0;
struct hlist_node *h;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
+ int aq_ret = 0;
int retval = 0;
u16 cmd_flags;
int list_size;
@@ -2814,9 +2814,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multi promisc failed on %s, err %s aq_err %s\n",
+ "set multi promisc failed on %s, err %pe aq_err %s\n",
vsi_name,
- i40e_stat_str(hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
@@ -2834,10 +2834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+ "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
cur_promisc ? "on" : "off",
vsi_name,
- i40e_stat_str(hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@@ -2965,7 +2965,7 @@ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;
/* Don't modify stripping options if a port VLAN is active */
if (vsi->info.pvid)
@@ -2985,8 +2985,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vlan stripping failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "update vlan stripping failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@@ -2999,7 +2999,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;
/* Don't modify stripping options if a port VLAN is active */
if (vsi->info.pvid)
@@ -3020,8 +3020,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vlan stripping failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "update vlan stripping failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@@ -3252,7 +3252,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
@@ -3265,8 +3265,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add pvid failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "add pvid failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
return -ENOENT;
@@ -3429,8 +3429,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u16 pf_q = vsi->base_queue + ring->queue_index;
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_hmc_obj_txq tx_ctx;
- i40e_status err = 0;
u32 qtx_ctl = 0;
+ int err = 0;
if (ring_is_xdp(ring))
ring->xsk_pool = i40e_xsk_pool(ring);
@@ -3554,7 +3554,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
u16 pf_q = vsi->base_queue + ring->queue_index;
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_hmc_obj_rxq rx_ctx;
- i40e_status err = 0;
+ int err = 0;
bool ok;
int ret;
@@ -5525,16 +5525,16 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
u32 tc_bw_max;
+ int ret;
int i;
/* Get the VSI level BW configuration */
ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5544,8 +5544,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5586,7 +5586,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
struct i40e_pf *pf = vsi->back;
- i40e_status ret;
+ int ret;
int i;
/* There is no need to reset BW when mqprio mode is on. */
@@ -5734,8 +5734,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -5790,8 +5790,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
&bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed querying vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Failed querying vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5857,8 +5857,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Update vsi tc config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5870,8 +5870,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed updating vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Failed updating vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5962,8 +5962,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
I40E_MAX_BW_INACTIVE_ACCUM, NULL);
if (ret)
dev_err(&pf->pdev->dev,
- "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
- max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
+ "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
+ max_tx_rate, seid, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@@ -6038,8 +6038,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret)
dev_info(&pf->pdev->dev,
- "Failed to delete cloud filter, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed to delete cloud filter, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
kfree(cfilter);
}
@@ -6173,8 +6173,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
kfree(lut);
return ret;
@@ -6272,8 +6272,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "add new vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "add new vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -6304,7 +6304,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
- i40e_status ret;
+ int ret;
int i;
memset(&bw_data, 0, sizeof(bw_data));
@@ -6340,9 +6340,9 @@ static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
- i40e_status ret;
- int i;
u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ int ret;
+ int i;
/* Enable ETS TCs with equal BW Share for now across all VSIs */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -6518,8 +6518,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
mode, NULL);
if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw,
hw->aq.asq_last_status));
@@ -6719,8 +6719,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "VEB bw config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "VEB bw config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6729,8 +6729,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed getting veb bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed getting veb bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
@@ -6813,8 +6813,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
ret = i40e_aq_resume_port_tx(hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Resume Port Tx failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Resume Port Tx failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@@ -6838,8 +6838,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Suspend Port Tx failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Suspend Port Tx failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@@ -6878,8 +6878,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
ret = i40e_set_dcb_config(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev,
- "Set DCB Config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Set DCB Config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6995,8 +6995,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
i40e_aqc_opc_modify_switching_comp_ets, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Modify Port ETS failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Modify Port ETS failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -7033,8 +7033,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
ret = i40e_aq_dcb_updated(&pf->hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "DCB Updated failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "DCB Updated failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -7117,8 +7117,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
i40e_aqc_opc_enable_switching_comp_ets, NULL);
if (err) {
dev_info(&pf->pdev->dev,
- "Enable Port ETS failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "Enable Port ETS failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
err = -ENOENT;
goto out;
@@ -7197,8 +7197,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
} else {
dev_info(&pf->pdev->dev,
- "Query for DCB configuration failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "Query for DCB configuration failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
@@ -7416,15 +7416,15 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
* @pf: board private structure
* @is_up: whether the link state should be forced up or down
**/
-static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
{
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config = {0};
bool non_zero_phy_type = is_up;
struct i40e_hw *hw = &pf->hw;
- i40e_status err;
u64 mask;
u8 speed;
+ int err;
/* Card might've been put in an unstable state by other drivers
* and applications, which causes incorrect speed values being
@@ -7436,8 +7436,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
- "failed to get phy cap., ret = %s last_status = %s\n",
- i40e_stat_str(hw, err),
+ "failed to get phy cap., ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -7448,8 +7448,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
- "failed to get phy cap., ret = %s last_status = %s\n",
- i40e_stat_str(hw, err),
+ "failed to get phy cap., ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -7493,8 +7493,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
- "set phy config ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ "set phy config ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return err;
}
@@ -7657,11 +7657,11 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
* This function deletes a mac filter on the channel VSI which serves as the
* macvlan. Returns 0 on success.
**/
-static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
- const u8 *macaddr, int *aq_err)
+static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
{
struct i40e_aqc_remove_macvlan_element_data element;
- i40e_status status;
+ int status;
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
@@ -7683,12 +7683,12 @@ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
* This function adds a mac filter on the channel VSI which serves as the
* macvlan. Returns 0 on success.
**/
-static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
- const u8 *macaddr, int *aq_err)
+static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
{
struct i40e_aqc_add_macvlan_element_data element;
- i40e_status status;
u16 cmd_flags = 0;
+ int status;
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
@@ -7834,8 +7834,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
rx_ring->netdev = NULL;
}
dev_info(&pf->pdev->dev,
- "Error adding mac filter on macvlan err %s, aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Error adding mac filter on macvlan err %pe, aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, aq_err));
netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
}
@@ -7907,8 +7907,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Update vsi tc config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -8123,8 +8123,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
ch->fwd = NULL;
} else {
dev_info(&pf->pdev->dev,
- "Error deleting mac filter on macvlan err %s, aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Error deleting mac filter on macvlan err %pe, aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, aq_err));
}
break;
@@ -8875,8 +8875,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
kfree(filter);
if (err) {
dev_err(&pf->pdev->dev,
- "Failed to delete cloud filter, err %s\n",
- i40e_stat_str(&pf->hw, err));
+ "Failed to delete cloud filter, err %pe\n",
+ ERR_PTR(err));
return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
}
@@ -9438,8 +9438,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
} else {
dev_info(&pf->pdev->dev,
- "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -9887,8 +9887,8 @@ static void i40e_link_event(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
u8 new_link_speed, old_link_speed;
- i40e_status status;
bool new_link, old_link;
+ int status;
#ifdef CONFIG_I40E_DCB
int err;
#endif /* CONFIG_I40E_DCB */
@@ -10099,9 +10099,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
struct i40e_arq_event_info event;
struct i40e_hw *hw = &pf->hw;
u16 pending, i = 0;
- i40e_status ret;
u16 opcode;
u32 oldval;
+ int ret;
u32 val;
/* Do not run clean AQ when PF reset fails */
@@ -10265,8 +10265,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@@ -10277,8 +10277,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi switch failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi switch failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -10301,8 +10301,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@@ -10313,8 +10313,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi switch failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi switch failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -10458,8 +10458,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
buf_len = data_size;
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
- "capability discovery failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "capability discovery failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENODEV;
@@ -10580,7 +10580,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
struct i40e_cloud_filter *cfilter;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
- i40e_status ret;
+ int ret;
/* Add cloud filters back if they exist */
hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
@@ -10596,8 +10596,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
if (ret) {
dev_dbg(&pf->pdev->dev,
- "Failed to rebuild cloud filter, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed to rebuild cloud filter, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -10615,7 +10615,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
static int i40e_rebuild_channels(struct i40e_vsi *vsi)
{
struct i40e_channel *ch, *ch_tmp;
- i40e_status ret;
+ int ret;
if (list_empty(&vsi->ch_list))
return 0;
@@ -10691,7 +10691,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret = 0;
+ int ret = 0;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
@@ -10796,7 +10796,7 @@ static void i40e_get_oem_version(struct i40e_hw *hw)
static int i40e_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
ret = i40e_pf_reset(hw);
if (ret) {
@@ -10821,7 +10821,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
u32 val;
int v;
@@ -10837,8 +10837,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
@@ -10949,8 +10949,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
- dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
@@ -11053,8 +11053,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -11082,9 +11082,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
ret = i40e_set_promiscuous(pf, pf->cur_promisc);
if (ret)
dev_warn(&pf->pdev->dev,
- "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+ "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
pf->cur_promisc ? "on" : "off",
- i40e_stat_str(&pf->hw, ret),
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_reset_all_vfs(pf, true);
@@ -12218,8 +12218,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
(struct i40e_aqc_get_set_rss_key_data *)seed);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot get RSS key, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot get RSS key, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -12232,8 +12232,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot get RSS lut, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot get RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -12508,11 +12508,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
* i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
+int i40e_get_partition_bw_setting(struct i40e_pf *pf)
{
- i40e_status status;
bool min_valid, max_valid;
u32 max_bw, min_bw;
+ int status;
status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
&min_valid, &max_valid);
@@ -12531,10 +12531,10 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
* i40e_set_partition_bw_setting - Set BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+int i40e_set_partition_bw_setting(struct i40e_pf *pf)
{
struct i40e_aqc_configure_partition_bw_data bw_data;
- i40e_status status;
+ int status;
memset(&bw_data, 0, sizeof(bw_data));
@@ -12553,12 +12553,12 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
* i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
{
/* Commit temporary BW setting to permanent NVM image */
enum i40e_admin_queue_err last_aq_status;
- i40e_status ret;
u16 nvm_word;
+ int ret;
if (pf->hw.partition_id != 1) {
dev_info(&pf->pdev->dev,
@@ -12573,8 +12573,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot acquire NVM for read access, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12590,8 +12590,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
i40e_release_nvm(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12604,8 +12604,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot acquire NVM for write access, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12624,8 +12624,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
i40e_release_nvm(&pf->hw);
if (ret)
dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "BW settings NOT SAVED, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
bw_commit_out:
@@ -12646,7 +12646,7 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
- i40e_status read_status = I40E_SUCCESS;
+ int read_status = I40E_SUCCESS;
u16 sr_emp_sr_settings_ptr = 0;
u16 features_enable = 0;
u16 link_behavior = 0;
@@ -12679,8 +12679,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
err_nvm:
dev_warn(&pf->pdev->dev,
- "total-port-shutdown feature is off due to read nvm error: %s\n",
- i40e_stat_str(&pf->hw, read_status));
+ "total-port-shutdown feature is off due to read nvm error: %pe\n",
+ ERR_PTR(read_status));
return ret;
}
@@ -13025,7 +13025,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
u8 type, filter_index;
- i40e_status ret;
+ int ret;
type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
I40E_AQC_TUNNEL_TYPE_NGE;
@@ -13033,8 +13033,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
NULL);
if (ret) {
- netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -13049,12 +13049,12 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
- i40e_status ret;
+ int ret;
ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
if (ret) {
- netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -13341,9 +13341,11 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
- if (!prog)
+ if (!prog) {
+ xdp_features_clear_redirect_target(vsi->netdev);
/* Wait until ndo_xsk_wakeup completes. */
synchronize_rcu();
+ }
i40e_reset_and_rebuild(pf, true, true);
}
@@ -13364,11 +13366,13 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
/* Kick start the NAPI context if there is an AF_XDP socket open
* on that queue id. This so that receiving will start.
*/
- if (need_reset && prog)
+ if (need_reset && prog) {
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->xdp_rings[i]->xsk_pool)
(void)i40e_xsk_wakeup(vsi->netdev, i,
XDP_WAKEUP_RX);
+ xdp_features_set_redirect_target(vsi->netdev, true);
+ }
return 0;
}
@@ -13803,6 +13807,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
* are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
@@ -13943,8 +13951,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -13973,8 +13981,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -13993,8 +14001,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -14016,9 +14024,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
* message and continue
*/
dev_info(&pf->pdev->dev,
- "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
enabled_tc,
- i40e_stat_str(&pf->hw, ret),
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -14112,8 +14120,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "add vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -14144,8 +14152,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
@@ -14591,8 +14599,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "query veb bw config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -14601,8 +14609,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&ets_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw ets config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "query veb bw ets config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -14798,8 +14806,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
/* get a VEB from the hardware */
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't add VEB, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't add VEB, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
@@ -14809,16 +14817,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
&veb->stats_idx, NULL, NULL, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get VEB statistics idx, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get VEB statistics idx, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get VEB bw info, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get VEB bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
@@ -15028,8 +15036,8 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
&next_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "get switch config failed err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "get switch config failed err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
kfree(aq_buf);
@@ -15074,8 +15082,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
ret = i40e_fetch_switch_configuration(pf, false);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't fetch switch config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't fetch switch config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@@ -15101,8 +15109,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@@ -15439,13 +15447,12 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
*
* Return 0 on success, negative on failure.
**/
-static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
+static int i40e_pf_loop_reset(struct i40e_pf *pf)
{
/* wait max 10 seconds for PF reset to succeed */
const unsigned long time_end = jiffies + 10 * HZ;
-
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
ret = i40e_pf_reset(hw);
while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
@@ -15491,9 +15498,9 @@ static bool i40e_check_fw_empr(struct i40e_pf *pf)
* Return 0 if NIC is healthy or negative value when there are issues
* with resets
**/
-static i40e_status i40e_handle_resets(struct i40e_pf *pf)
+static int i40e_handle_resets(struct i40e_pf *pf)
{
- const i40e_status pfr = i40e_pf_loop_reset(pf);
+ const int pfr = i40e_pf_loop_reset(pf);
const bool is_empr = i40e_check_fw_empr(pf);
if (is_empr || pfr != I40E_SUCCESS)
@@ -15591,7 +15598,6 @@ err_switch_setup:
timer_shutdown_sync(&pf->service_timer);
i40e_shutdown_adminq(hw);
iounmap(hw->hw_addr);
- pci_disable_pcie_error_reporting(pf->pdev);
pci_release_mem_regions(pf->pdev);
pci_disable_device(pf->pdev);
kfree(pf);
@@ -15631,13 +15637,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_aq_get_phy_abilities_resp abilities;
#ifdef CONFIG_I40E_DCB
enum i40e_get_fw_lldp_status_resp lldp_status;
- i40e_status status;
#endif /* CONFIG_I40E_DCB */
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
u16 wol_nvm_bits;
u16 link_status;
+#ifdef CONFIG_I40E_DCB
+ int status;
+#endif /* CONFIG_I40E_DCB */
int err;
u32 val;
u32 i;
@@ -15662,7 +15670,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
/* Now that we have a PCI connection, we need to do the
@@ -16006,8 +16013,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
- dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Reconfigure hardware for allowing smaller MSS in the case
@@ -16025,8 +16032,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
- dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -16158,8 +16165,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the requested speeds from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
- dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
@@ -16169,8 +16176,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
- dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* make sure the MFS hasn't been set lower than the default */
@@ -16220,7 +16227,6 @@ err_pf_reset:
err_ioremap:
kfree(pf);
err_pf_alloc:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -16241,7 +16247,7 @@ static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
- i40e_status ret_code;
+ int ret_code;
int i;
i40e_dbg_pf_exit(pf);
@@ -16368,7 +16374,6 @@ unmap:
kfree(pf);
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
@@ -16489,9 +16494,9 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
u8 mac_addr[6];
u16 flags = 0;
+ int ret;
/* Get current MAC address in case it's an LAA */
if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3a38bf8bcde7..9da0c87f0328 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -13,10 +13,10 @@
* in this file) as an equivalent of the FLASH part mapped into the SR.
* We are accessing FLASH always thru the Shadow RAM.
**/
-i40e_status i40e_init_nvm(struct i40e_hw *hw)
+int i40e_init_nvm(struct i40e_hw *hw)
{
struct i40e_nvm_info *nvm = &hw->nvm;
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 fla, gens;
u8 sr_size;
@@ -52,12 +52,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
* This function will request NVM ownership for reading
* via the proper Admin Command.
**/
-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
- enum i40e_aq_resource_access_type access)
+int i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
{
- i40e_status ret_code = 0;
u64 gtime, timeout;
u64 time_left = 0;
+ int ret_code = 0;
if (hw->nvm.blank_nvm_mode)
goto i40e_i40e_acquire_nvm_exit;
@@ -111,7 +111,7 @@ i40e_i40e_acquire_nvm_exit:
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
- i40e_status ret_code = I40E_SUCCESS;
+ int ret_code = I40E_SUCCESS;
u32 total_delay = 0;
if (hw->nvm.blank_nvm_mode)
@@ -138,9 +138,9 @@ void i40e_release_nvm(struct i40e_hw *hw)
*
* Polls the SRCTL Shadow RAM register done bit.
**/
-static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
u32 srctl, wait_cnt;
/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
@@ -165,10 +165,10 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
if (offset >= hw->nvm.sr_size) {
@@ -216,13 +216,13 @@ read_nvm_exit:
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
- u8 module_pointer, u32 offset,
- u16 words, void *data,
- bool last_command)
+static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
+ bool last_command)
{
- i40e_status ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -264,10 +264,10 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
*
* Reads one 16 bit word from the Shadow RAM using the AdminQ
**/
-static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
*data = le16_to_cpu(*(__le16 *)data);
@@ -286,8 +286,8 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
* Do not use this function except in cases where the nvm lock is already
* taken via i40e_acquire_nvm().
**/
-static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
- u16 offset, u16 *data)
+static int __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset, u16 *data)
{
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
return i40e_read_nvm_word_aq(hw, offset, data);
@@ -303,10 +303,10 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
*
* Reads one 16 bit word from the Shadow RAM.
**/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -330,17 +330,17 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
* @words_data_size: Words to read from NVM
* @data_ptr: Pointer to memory location where resulting buffer will be stored
**/
-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr,
- u16 module_offset,
- u16 data_offset,
- u16 words_data_size,
- u16 *data_ptr)
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr)
{
- i40e_status status;
u16 specific_ptr = 0;
u16 ptr_value = 0;
u32 offset = 0;
+ int status;
if (module_ptr != 0) {
status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
@@ -406,10 +406,10 @@ enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u16 index, word;
/* Loop thru the selected region */
@@ -437,13 +437,13 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code;
- u16 read_size;
bool last_cmd = false;
u16 words_read = 0;
+ u16 read_size;
+ int ret_code;
u16 i = 0;
do {
@@ -493,9 +493,9 @@ read_nvm_buffer_aq_exit:
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method.
**/
-static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
- u16 offset, u16 *words,
- u16 *data)
+static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset, u16 *words,
+ u16 *data)
{
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
return i40e_read_nvm_buffer_aq(hw, offset, words, data);
@@ -514,10 +514,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -544,12 +544,12 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command)
+static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
{
- i40e_status ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -594,14 +594,14 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
* is customer specific and unknown. Therefore, this function skips all maximum
* possible size of VPD (1kB).
**/
-static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum)
+static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
{
- i40e_status ret_code;
struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
+ int ret_code;
u16 *data;
u16 i = 0;
@@ -675,11 +675,11 @@ i40e_calc_nvm_checksum_exit:
* on ARQ completion event reception by caller.
* This function will commit SR to NVM.
**/
-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
+int i40e_update_nvm_checksum(struct i40e_hw *hw)
{
- i40e_status ret_code;
- u16 checksum;
__le16 le_sum;
+ int ret_code;
+ u16 checksum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
if (!ret_code) {
@@ -699,12 +699,12 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
* Performs checksum calculation and validates the NVM SW checksum. If the
* caller does not need checksum, the value can be NULL.
**/
-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum)
+int i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
{
- i40e_status ret_code = 0;
- u16 checksum_sr = 0;
u16 checksum_local = 0;
+ u16 checksum_sr = 0;
+ int ret_code = 0;
/* We must acquire the NVM lock in order to correctly synchronize the
* NVM accesses across multiple PFs. Without doing so it is possible
@@ -733,36 +733,36 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
return ret_code;
}
-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *errno);
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *perrno);
-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -807,12 +807,12 @@ static const char * const i40e_nvm_update_state_str[] = {
*
* Dispatches command depending on what update state is current
**/
-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ int status;
/* assume success */
*perrno = 0;
@@ -923,12 +923,12 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* Process legitimate commands of the Init state and conditionally set next
* state. Reject all other commands.
**/
-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1062,12 +1062,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands.
**/
-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1104,13 +1104,13 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands
**/
-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
bool retry_attempt = false;
+ int status = 0;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1187,8 +1187,8 @@ retry:
*/
if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
!retry_attempt) {
- i40e_status old_status = status;
u32 old_asq_status = hw->aq.asq_last_status;
+ int old_status = status;
u32 gtime;
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
@@ -1370,17 +1370,17 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- i40e_status status;
struct i40e_aq_desc *aq_desc;
u32 buff_size = 0;
u8 *buff = NULL;
u32 aq_desc_len;
u32 aq_data_len;
+ int status;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
if (cmd->offset == 0xffff)
@@ -1429,8 +1429,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
buff_size, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_exec_aq err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "%s err %pe aq_err %s\n",
+ __func__, ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;
@@ -1454,9 +1454,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
@@ -1523,9 +1523,9 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
@@ -1557,13 +1557,13 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- i40e_status status;
u8 module, transaction;
+ int status;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
@@ -1596,13 +1596,13 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
*
* module, offset, data_size and data are in cmd structure
**/
-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
{
- i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ int status = 0;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
@@ -1636,14 +1636,14 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
*
* module, offset, data_size and data are in cmd structure
**/
-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
u8 preservation_flags;
+ int status = 0;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 2f6815b2f8df..2bd4de03dafa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -56,5 +56,4 @@ do { \
(h)->bus.func, ##__VA_ARGS__); \
} while (0)
-typedef enum i40e_status_code i40e_status;
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9a71121420c3..fe845987d99a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -16,29 +16,29 @@
*/
/* adminq functions */
-i40e_status i40e_init_adminq(struct i40e_hw *hw);
+int i40e_init_adminq(struct i40e_hw *hw);
void i40e_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *events_pending);
-i40e_status
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+int
i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status
+int
i40e_asq_send_command_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-i40e_status
+int
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
-i40e_status
+int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -53,327 +53,332 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
- u16 led_addr, u32 mode);
-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
- u16 *val);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode);
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
/* admin send queue commands */
-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
- u16 *fw_major_version, u16 *fw_minor_version,
- u32 *fw_build,
- u16 *api_major_version, u16 *api_minor_version,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
- u32 reg_addr, u64 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
- bool qualified_modules, bool report_init,
- struct i40e_aq_get_phy_abilities_resp *abilities,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
- struct i40e_aq_set_phy_config *config,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_reset);
-i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw,
- bool ena_lpbk,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
- bool enable_link,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
- bool enable_lse, struct i40e_link_status *link,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
- u64 advt_reg,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_reset);
+int i40e_aq_set_mac_loopback(struct i40e_hw *hw,
+ bool ena_lpbk,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
- u16 vsi_id, bool set_filter,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
- bool rx_only_promisc);
-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable, u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
- u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *pveb_seid,
- bool enable_stats,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
- u16 veb_seid, u16 *switch_id, bool *floating,
- u16 *statistic_index, u16 *vebs_used,
- u16 *vebs_free,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status
+int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_remove_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status
+int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free);
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free);
-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
- u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
- struct i40e_aqc_get_switch_config_resp *buf,
- u16 buf_size, u16 *start_seid,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags,
- u16 valid_flags, u8 mode,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- enum i40e_aq_resource_access_type access,
- u8 sdp_number, u64 *timeout,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- u8 sdp_number,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, bool last_command,
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
- void *buff, u16 buff_size, u16 *data_size,
- enum i40e_admin_queue_opc list_type_opc,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command, u8 preservation_flags,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
- u8 mib_type, void *buff, u16 buff_size,
- u16 *local_len, u16 *remote_len,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
- bool enable_update,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
- bool persist,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
- bool dcb_enable,
- struct i40e_asq_cmd_details
- *cmd_details);
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
- void *buff, u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 protocol_index,
- u8 *filter_index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
- u16 flags, u8 *mac_addr,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
- u16 seid, u16 credit, u8 max_bw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
- enum i40e_admin_queue_opc opcode,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_port_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count);
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count);
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count);
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count);
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg);
-enum i40e_status_code
+int i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
+int
i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
/* i40e_common */
-i40e_status i40e_init_shared_code(struct i40e_hw *hw);
-i40e_status i40e_pf_reset(struct i40e_hw *hw);
+int i40e_init_shared_code(struct i40e_hw *hw);
+int i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
-i40e_status i40e_update_link_info(struct i40e_hw *hw);
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
- u32 *max_bw, u32 *min_bw, bool *min_valid,
- bool *max_valid);
-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
- struct i40e_aqc_configure_partition_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+int i40e_update_link_info(struct i40e_hw *hw);
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid,
+ bool *max_valid);
+int
+i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+int i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
-i40e_status i40e_init_nvm(struct i40e_hw *hw);
-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
- enum i40e_aq_resource_access_type access);
+int i40e_init_nvm(struct i40e_hw *hw);
+int i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr,
- u16 module_offset,
- u16 data_offset,
- u16 words_data_size,
- u16 *data_ptr);
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum);
-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *);
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr);
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+int i40e_update_nvm_checksum(struct i40e_hw *hw);
+int i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *errno);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
-i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+int i40e_set_mac_type(struct i40e_hw *hw);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -422,41 +427,41 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct virtchnl_vf_resource *msg);
-i40e_status i40e_vf_reset(struct i40e_hw *hw);
-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum virtchnl_ops v_opcode,
- i40e_status v_retval,
- u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings);
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
- u8 *mac_addr, u16 ethtype, u16 flags,
- u16 vsi_seid, u16 queue, bool is_add,
- struct i40e_control_filter_stats *stats,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
- u8 table_id, u32 start_index, u16 buff_size,
- void *buff, u16 *ret_buff_size,
- u8 *ret_next_table, u32 *ret_next_index,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_vf_reset(struct i40e_hw *hw);
+int i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum virtchnl_ops v_opcode,
+ int v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid);
-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-enum i40e_status_code
+int
i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr, bool page_change,
bool set_mdio, u8 mdio_num,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int
i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr, bool page_change,
bool set_mdio, u8 mdio_num,
@@ -469,43 +474,43 @@ i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
-i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *
- cmd_details);
-i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *
- cmd_details);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
struct i40e_profile_section_header *
i40e_find_section_in_profile(u32 section_type,
struct i40e_profile_segment *profile);
-enum i40e_status_code
+int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-enum i40e_status_code
+int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-enum i40e_status_code
+int
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index db3714a65dc7..4d2782e76038 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -9,65 +9,30 @@ enum i40e_status_code {
I40E_SUCCESS = 0,
I40E_ERR_NVM = -1,
I40E_ERR_NVM_CHECKSUM = -2,
- I40E_ERR_PHY = -3,
I40E_ERR_CONFIG = -4,
I40E_ERR_PARAM = -5,
- I40E_ERR_MAC_TYPE = -6,
I40E_ERR_UNKNOWN_PHY = -7,
- I40E_ERR_LINK_SETUP = -8,
- I40E_ERR_ADAPTER_STOPPED = -9,
I40E_ERR_INVALID_MAC_ADDR = -10,
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
- I40E_ERR_PRIMARY_REQUESTS_PENDING = -12,
- I40E_ERR_INVALID_LINK_SETTINGS = -13,
- I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
I40E_ERR_RESET_FAILED = -15,
- I40E_ERR_SWFW_SYNC = -16,
I40E_ERR_NO_AVAILABLE_VSI = -17,
I40E_ERR_NO_MEMORY = -18,
I40E_ERR_BAD_PTR = -19,
- I40E_ERR_RING_FULL = -20,
- I40E_ERR_INVALID_PD_ID = -21,
- I40E_ERR_INVALID_QP_ID = -22,
- I40E_ERR_INVALID_CQ_ID = -23,
- I40E_ERR_INVALID_CEQ_ID = -24,
- I40E_ERR_INVALID_AEQ_ID = -25,
I40E_ERR_INVALID_SIZE = -26,
- I40E_ERR_INVALID_ARP_INDEX = -27,
- I40E_ERR_INVALID_FPM_FUNC_ID = -28,
- I40E_ERR_QP_INVALID_MSG_SIZE = -29,
- I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
- I40E_ERR_INVALID_FRAG_COUNT = -31,
I40E_ERR_QUEUE_EMPTY = -32,
- I40E_ERR_INVALID_ALIGNMENT = -33,
- I40E_ERR_FLUSHED_QUEUE = -34,
- I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
- I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
I40E_ERR_TIMEOUT = -37,
- I40E_ERR_OPCODE_MISMATCH = -38,
- I40E_ERR_CQP_COMPL_ERROR = -39,
- I40E_ERR_INVALID_VF_ID = -40,
- I40E_ERR_INVALID_HMCFN_ID = -41,
- I40E_ERR_BACKING_PAGE_ERROR = -42,
- I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
- I40E_ERR_INVALID_PBLE_INDEX = -44,
I40E_ERR_INVALID_SD_INDEX = -45,
I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
I40E_ERR_INVALID_SD_TYPE = -47,
- I40E_ERR_MEMCPY_FAILED = -48,
I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
- I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
- I40E_ERR_SRQ_ENABLED = -52,
I40E_ERR_ADMIN_QUEUE_ERROR = -53,
I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
I40E_ERR_BUF_TOO_SHORT = -55,
I40E_ERR_ADMIN_QUEUE_FULL = -56,
I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
- I40E_ERR_BAD_IWARP_CQE = -58,
I40E_ERR_NVM_BLANK_MODE = -59,
I40E_ERR_NOT_IMPLEMENTED = -60,
- I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
I40E_ERR_DIAG_TEST_FAILED = -62,
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 635f93d60318..8a4587585acd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -17,7 +17,7 @@
**/
static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
enum virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg,
+ int v_retval, u8 *msg,
u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
@@ -441,14 +441,14 @@ irq_list_done:
}
/**
- * i40e_release_iwarp_qvlist
+ * i40e_release_rdma_qvlist
* @vf: pointer to the VF.
*
**/
-static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
+static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
- struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
+ struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
u32 msix_vf;
u32 i;
@@ -457,7 +457,7 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
- struct virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_rdma_qv_info *qv_info;
u32 next_q_index, next_q_type;
struct i40e_hw *hw = &pf->hw;
u32 v_idx, reg_idx, reg;
@@ -491,18 +491,19 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
}
/**
- * i40e_config_iwarp_qvlist
+ * i40e_config_rdma_qvlist
* @vf: pointer to the VF info
* @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
-static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
- struct virtchnl_iwarp_qvlist_info *qvlist_info)
+static int
+i40e_config_rdma_qvlist(struct i40e_vf *vf,
+ struct virtchnl_rdma_qvlist_info *qvlist_info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- struct virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_rdma_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf;
@@ -1246,13 +1247,13 @@ err:
* @vl: List of VLANs - apply filter for given VLANs
* @num_vlans: Number of elements in @vl
**/
-static i40e_status
+static int
i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
bool unicast_enable, s16 *vl, u16 num_vlans)
{
- i40e_status aq_ret, aq_tmp = 0;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ int aq_ret, aq_tmp = 0;
int i;
/* No VLAN to set promisc on, set on VSI */
@@ -1264,9 +1265,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
return aq_ret;
@@ -1280,9 +1281,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
@@ -1297,9 +1298,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
if (!aq_tmp)
@@ -1313,9 +1314,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
if (!aq_tmp)
@@ -1339,13 +1340,13 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
* Called from the VF to configure the promiscuous mode of
* VF vsis and from the VF reset path to reset promiscuous mode.
**/
-static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
- u16 vsi_id,
- bool allmulti,
- bool alluni)
+static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+ u16 vsi_id,
+ bool allmulti,
+ bool alluni)
{
- i40e_status aq_ret = I40E_SUCCESS;
struct i40e_pf *pf = vf->pf;
+ int aq_ret = I40E_SUCCESS;
struct i40e_vsi *vsi;
u16 num_vlans;
s16 *vl;
@@ -1955,7 +1956,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
struct i40e_pf *pf;
struct i40e_hw *hw;
int abs_vf_id;
- i40e_status aq_ret;
+ int aq_ret;
/* validate the request */
if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
@@ -1987,7 +1988,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
**/
static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
enum virtchnl_ops opcode,
- i40e_status retval)
+ int retval)
{
return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
}
@@ -2091,9 +2092,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
int num_vsis = 1;
+ int aq_ret = 0;
size_t len = 0;
int ret;
@@ -2123,11 +2124,11 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
if (i40e_vf_client_capable(pf, vf->vf_id) &&
- (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
- set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
+ set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
} else {
- clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
}
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2221,9 +2222,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
bool allmulti = false;
bool alluni = false;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2308,10 +2309,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_pair_info *qpi;
u16 vsi_id, vsi_queue_id = 0;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
int i, j = 0, idx = 0;
struct i40e_vsi *vsi;
u16 num_qps_all = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2458,8 +2459,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
+ int aq_ret = 0;
u16 vsi_id;
- i40e_status aq_ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -2574,7 +2575,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
@@ -2632,7 +2633,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2783,7 +2784,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_eth_stats stats;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
struct i40e_vsi *vsi;
memset(&stats, 0, sizeof(struct i40e_eth_stats));
@@ -2926,7 +2927,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status ret = 0;
+ int ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -2998,7 +2999,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
bool was_unimac_deleted = false;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status ret = 0;
+ int ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3071,7 +3072,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;
if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
@@ -3142,7 +3143,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3187,21 +3188,21 @@ error_param:
}
/**
- * i40e_vc_iwarp_msg
+ * i40e_vc_rdma_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* called from the VF for the iwarp msgs
**/
-static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
struct i40e_pf *pf = vf->pf;
int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+ !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -3211,42 +3212,42 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
aq_ret);
}
/**
- * i40e_vc_iwarp_qvmap_msg
+ * i40e_vc_rdma_qvmap_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @config: config qvmap or release it
*
* called from the VF for the iwarp msgs
**/
-static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
+static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
{
- struct virtchnl_iwarp_qvlist_info *qvlist_info =
- (struct virtchnl_iwarp_qvlist_info *)msg;
- i40e_status aq_ret = 0;
+ struct virtchnl_rdma_qvlist_info *qvlist_info =
+ (struct virtchnl_rdma_qvlist_info *)msg;
+ int aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+ !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
if (config) {
- if (i40e_config_iwarp_qvlist(vf, qvlist_info))
+ if (i40e_config_rdma_qvlist(vf, qvlist_info))
aq_ret = I40E_ERR_PARAM;
} else {
- i40e_release_iwarp_qvlist(vf);
+ i40e_release_rdma_qvlist(vf);
}
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
- VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
aq_ret);
}
@@ -3263,7 +3264,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_key *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
@@ -3293,7 +3294,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_lut *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
u16 i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3328,7 +3329,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int len = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3365,7 +3366,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_hena *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3389,8 +3390,8 @@ err:
**/
static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3415,8 +3416,8 @@ err:
**/
static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3615,8 +3616,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
if (ret)
dev_err(&pf->pdev->dev,
- "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
@@ -3642,7 +3643,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
struct hlist_node *node;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i, ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3718,8 +3719,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
if (ret) {
dev_err(&pf->pdev->dev,
- "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err;
}
@@ -3773,7 +3774,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_cloud_filter *cfilter = NULL;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i, ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3852,8 +3853,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
if (ret) {
dev_err(&pf->pdev->dev,
- "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err_free;
}
@@ -3882,7 +3883,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
int i, adq_request_qps = 0;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
u64 speed = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3994,7 +3995,7 @@ err:
static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -4112,14 +4113,14 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg);
break;
- case VIRTCHNL_OP_IWARP:
- ret = i40e_vc_iwarp_msg(vf, msg, msglen);
+ case VIRTCHNL_OP_RDMA:
+ ret = i40e_vc_rdma_msg(vf, msg, msglen);
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
+ ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
break;
- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
+ case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
+ ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
ret = i40e_vc_config_rss_key(vf, msg);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 358bbdb58795..895b8feb2567 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -34,7 +34,7 @@ enum i40e_queue_ctrl {
enum i40e_vf_states {
I40E_VF_STATE_INIT = 0,
I40E_VF_STATE_ACTIVE,
- I40E_VF_STATE_IWARPENA,
+ I40E_VF_STATE_RDMAENA,
I40E_VF_STATE_DISABLED,
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
@@ -46,7 +46,7 @@ enum i40e_vf_states {
enum i40e_vf_capabilities {
I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
I40E_VIRTCHNL_VF_CAP_L2,
- I40E_VIRTCHNL_VF_CAP_IWARP,
+ I40E_VIRTCHNL_VF_CAP_RDMA,
};
/* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI.
@@ -108,7 +108,7 @@ struct i40e_vf {
u16 num_cloud_filters;
/* RDMA Client */
- struct virtchnl_iwarp_qvlist_info *qvlist_info;
+ struct virtchnl_rdma_qvlist_info *qvlist_info;
};
void i40e_free_vfs(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 2a9f1eeeb701..232bc61d9eee 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -30,6 +30,7 @@
#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
@@ -276,8 +277,8 @@ struct iavf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
- int num_iwarp_msix;
- int iwarp_base_vector;
+ int num_rdma_msix;
+ int rdma_base_vector;
u32 client_pending;
struct iavf_client_instance *cinst;
struct msix_entry *msix_entries;
@@ -384,7 +385,7 @@ struct iavf_adapter {
enum virtchnl_ops current_op;
#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_cap_flags & \
- VIRTCHNL_VF_OFFLOAD_IWARP : \
+ VIRTCHNL_VF_OFFLOAD_RDMA : \
0)
#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c
index 0c77e4171808..93c903c02c64 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.c
@@ -127,7 +127,7 @@ void iavf_notify_client_open(struct iavf_vsi *vsi)
}
/**
- * iavf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * iavf_client_release_qvlist - send a message to the PF to release rdma qv map
* @ldev: pointer to L2 context.
*
* Return 0 on success or < 0 on error
@@ -141,12 +141,12 @@ static int iavf_client_release_qvlist(struct iavf_info *ldev)
return -EAGAIN;
err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
IAVF_SUCCESS, NULL, 0, NULL);
if (err)
dev_err(&adapter->pdev->dev,
- "Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
+ "Unable to send RDMA vector release message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
return err;
@@ -215,9 +215,9 @@ iavf_client_add_instance(struct iavf_adapter *adapter)
cinst->lan_info.params = params;
set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state);
- cinst->lan_info.msix_count = adapter->num_iwarp_msix;
+ cinst->lan_info.msix_count = adapter->num_rdma_msix;
cinst->lan_info.msix_entries =
- &adapter->msix_entries[adapter->iwarp_base_vector];
+ &adapter->msix_entries[adapter->rdma_base_vector];
mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
@@ -425,17 +425,17 @@ static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,
if (adapter->aq_required)
return -EAGAIN;
- err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
+ err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA,
IAVF_SUCCESS, msg, len, NULL);
if (err)
- dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
+ dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
return err;
}
/**
- * iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @qvlist_info: queue and vector list
@@ -446,7 +446,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
struct iavf_client *client,
struct iavf_qvlist_info *qvlist_info)
{
- struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
+ struct virtchnl_rdma_qvlist_info *v_qvlist_info;
struct iavf_adapter *adapter = ldev->vf;
struct iavf_qv_info *qv_info;
enum iavf_status err;
@@ -463,23 +463,23 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
continue;
v_idx = qv_info->v_idx;
if ((v_idx >=
- (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
- (v_idx < adapter->iwarp_base_vector))
+ (adapter->rdma_base_vector + adapter->num_rdma_msix)) ||
+ (v_idx < adapter->rdma_base_vector))
return -EINVAL;
}
- v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
+ v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info;
msg_size = struct_size(v_qvlist_info, qv_info,
v_qvlist_info->num_vectors - 1);
- adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+ adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP);
err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, IAVF_SUCCESS,
+ VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS,
(u8 *)v_qvlist_info, msg_size, NULL);
if (err) {
dev_err(&adapter->pdev->dev,
- "Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
+ "Unable to send RDMA vector config message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
goto out;
}
@@ -488,7 +488,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
for (i = 0; i < 5; i++) {
msleep(100);
if (!(adapter->client_pending &
- BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+ BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) {
err = 0;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h
index 9a7cf39ea75a..c5d51d7dc7cc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.h
@@ -159,7 +159,7 @@ struct iavf_client {
#define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
u8 type;
-#define IAVF_CLIENT_IWARP 0
+#define IAVF_CLIENT_RDMA 0
struct iavf_client_ops *ops; /* client ops provided by the client */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 34e46a23894f..16c490965b61 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -223,8 +223,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
return "IAVF_ERR_ADMIN_QUEUE_FULL";
case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
- case IAVF_ERR_BAD_IWARP_CQE:
- return "IAVF_ERR_BAD_IWARP_CQE";
+ case IAVF_ERR_BAD_RDMA_CQE:
+ return "IAVF_ERR_BAD_RDMA_CQE";
case IAVF_ERR_NVM_BLANK_MODE:
return "IAVF_ERR_NVM_BLANK_MODE";
case IAVF_ERR_NOT_IMPLEMENTED:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 4b09785d2147..3273aeb8fa67 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -105,7 +105,7 @@ int iavf_status_to_errno(enum iavf_status status)
case IAVF_ERR_SRQ_ENABLED:
case IAVF_ERR_ADMIN_QUEUE_ERROR:
case IAVF_ERR_ADMIN_QUEUE_FULL:
- case IAVF_ERR_BAD_IWARP_CQE:
+ case IAVF_ERR_BAD_RDMA_CQE:
case IAVF_ERR_NVM_BLANK_MODE:
case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
case IAVF_ERR_DIAG_TEST_FAILED:
@@ -4868,8 +4868,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
@@ -4957,7 +4955,6 @@ err_ioremap:
err_alloc_wq:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:
@@ -5175,8 +5172,6 @@ static void iavf_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h
index 2ea5c7c339bc..0e493ee9e9d1 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_status.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_status.h
@@ -64,7 +64,7 @@ enum iavf_status {
IAVF_ERR_BUF_TOO_SHORT = -55,
IAVF_ERR_ADMIN_QUEUE_FULL = -56,
IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
- IAVF_ERR_BAD_IWARP_CQE = -58,
+ IAVF_ERR_BAD_RDMA_CQE = -58,
IAVF_ERR_NVM_BLANK_MODE = -59,
IAVF_ERR_NOT_IMPLEMENTED = -60,
IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 365ca0c710c4..6d23338604bb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -2298,7 +2298,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_RDMA:
/* Gobble zero-length replies from the PF. They indicate that
* a previous message was received OK, and the client doesn't
* care about that.
@@ -2307,9 +2307,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_notify_client_message(&adapter->vsi, msg, msglen);
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
adapter->client_pending &=
- ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+ ~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP));
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 9183d480b70b..f269952d207d 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -28,6 +28,7 @@ ice-y := ice_main.o \
ice_flow.o \
ice_idc.o \
ice_devlink.o \
+ ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o \
@@ -42,8 +43,8 @@ ice-$(CONFIG_PCI_IOV) += \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
-ice-$(CONFIG_TTY) += ice_gnss.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
+ice-$(CONFIG_ICE_GNSS) += ice_gnss.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 713069f809ec..b0e29e342401 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -39,7 +39,9 @@
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
+#include <linux/gnss.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_gact.h>
#include <net/ip.h>
@@ -121,6 +123,8 @@
#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
+#define ICE_MAX_TSO_SIZE 131072
+
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
ICE_AQ_VSI_UP_TABLE_UP##i##_M)
@@ -352,7 +356,6 @@ struct ice_vsi {
struct ice_vf *vf; /* VF associated with this VSI */
- u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr;
u16 num_bfltr;
@@ -565,9 +568,8 @@ struct ice_pf {
struct mutex adev_mutex; /* lock to protect aux device access */
u32 msg_enable;
struct ice_ptp ptp;
- struct tty_driver *ice_gnss_tty_driver;
- struct tty_port *gnss_tty_port[ICE_GNSS_TTY_MINOR_DEVICES];
- struct gnss_serial *gnss_serial[ICE_GNSS_TTY_MINOR_DEVICES];
+ struct gnss_serial *gnss_serial;
+ struct gnss_device *gnss_dev;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
@@ -889,7 +891,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
-int ice_vsi_cfg(struct ice_vsi *vsi);
+int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
@@ -907,6 +909,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
+void ice_deinit_rdma(struct ice_pf *pf);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
@@ -931,6 +934,8 @@ int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
+int ice_load(struct ice_pf *pf);
+void ice_unload(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 958c1e435232..838d9b274d68 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1659,14 +1659,24 @@ struct ice_aqc_lldp_get_mib {
#define ICE_AQ_LLDP_TX_ACTIVE 0
#define ICE_AQ_LLDP_TX_SUSPENDED 1
#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* DCBX mode */
+#define ICE_AQ_LLDP_DCBX_M GENMASK(7, 6)
+#define ICE_AQ_LLDP_DCBX_NA 0
+#define ICE_AQ_LLDP_DCBX_CEE 1
+#define ICE_AQ_LLDP_DCBX_IEEE 2
+
+ u8 state;
+#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M BIT(0)
+#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0
+#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1
+
/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
* and in the LLDP MIB Change Event (0x0A01). They are valid for the
* Get LLDP MIB (0x0A00) response only.
*/
- u8 reserved1;
__le16 local_len;
__le16 remote_len;
- u8 reserved2[2];
+ u8 reserved[2];
__le32 addr_high;
__le32 addr_low;
};
@@ -1677,6 +1687,9 @@ struct ice_aqc_lldp_set_mib_change {
u8 command;
#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+#define ICE_AQ_LLDP_MIB_PENDING_M BIT(1)
+#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0
+#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1
u8 reserved[15];
};
@@ -2329,6 +2342,7 @@ enum ice_adminq_opc {
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
+ ice_aqc_opc_lldp_execute_pending_mib = 0x0A0B,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 554095b25f44..1911d644dfa8 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -355,9 +355,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
- else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
- return XDP_PACKET_HEADROOM;
-
return 0;
}
@@ -495,7 +492,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
- u16 num_bufs = ICE_DESC_UNUSED(ring);
+ u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
int err;
ring->rx_buf_len = ring->vsi->rx_buf_len;
@@ -503,8 +500,10 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
+ __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->vsi->rx_buf_len);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
@@ -524,9 +523,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
} else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
+ __xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->vsi->rx_buf_len);
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
@@ -536,6 +537,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
}
}
+ xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+ ring->xdp.data = NULL;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 3e08847505ce..c2fda4fa4188 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -208,6 +208,31 @@ bool ice_is_e810t(struct ice_hw *hw)
}
/**
+ * ice_is_e823
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E823-L or E823-C based, false if not.
+ */
+bool ice_is_e823(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -1088,8 +1113,10 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*hw->port_info), GFP_KERNEL);
+ if (!hw->port_info)
+ hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*hw->port_info),
+ GFP_KERNEL);
if (!hw->port_info) {
status = -ENOMEM;
goto err_unroll_cqinit;
@@ -1217,11 +1244,6 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
- if (hw->port_info) {
- devm_kfree(ice_hw_to_dev(hw), hw->port_info);
- hw->port_info = NULL;
- }
-
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
ice_destroy_all_ctrlq(hw);
@@ -5504,6 +5526,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
}
/**
+ * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
+ * @hw: pointer to HW struct
+ */
+int ice_lldp_execute_pending_mib(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_fw_supports_report_dflt_cfg
* @hw: pointer to the hardware structure
*
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 4c6a0b5c9304..8ba5f935a092 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -122,7 +122,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
- enum ice_fc_mode fc);
+ enum ice_fc_mode req_mode);
bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
@@ -199,6 +199,7 @@ void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
+bool ice_is_e823(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -221,6 +222,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 6be02f9b0b8c..c557dfc50aad 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -73,6 +73,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
if (!ena_update)
cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+ else
+ cmd->command |= FIELD_PREP(ICE_AQ_LLDP_MIB_PENDING_M,
+ ICE_AQ_LLDP_MIB_PENDING_ENABLE);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -566,7 +569,7 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
* @tlv: Organization specific TLV
* @dcbcfg: Local store to update ETS REC data
*
- * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * Currently IEEE 802.1Qaz and CEE DCBX TLV are supported, others
* will be returned
*/
static void
@@ -585,7 +588,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
ice_parse_cee_tlv(tlv, dcbcfg);
break;
default:
- break;
+ break; /* Other OUIs not supported */
}
}
@@ -964,6 +967,42 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)
}
/**
+ * ice_get_dcb_cfg_from_mib_change
+ * @pi: port information structure
+ * @event: pointer to the admin queue receive event
+ *
+ * Set DCB configuration from received MIB Change event
+ */
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+ struct ice_aqc_lldp_get_mib *mib;
+ u8 change_type, dcbx_mode;
+
+ mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+
+ change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
+ if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
+ dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
+
+ dcbx_mode = FIELD_GET(ICE_AQ_LLDP_DCBX_M, mib->type);
+
+ switch (dcbx_mode) {
+ case ICE_AQ_LLDP_DCBX_IEEE:
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg);
+ break;
+
+ case ICE_AQ_LLDP_DCBX_CEE:
+ pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
+ ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *)
+ event->msg_buf, pi);
+ break;
+ }
+}
+
+/**
* ice_init_dcb
* @hw: pointer to the HW struct
* @enable_mib_change: enable MIB change event
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index 6abf28a14291..be34650a77d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -144,6 +144,8 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
int ice_get_dcb_cfg(struct ice_port_info *pi);
int ice_set_dcb_cfg(struct ice_port_info *pi);
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event);
int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
int
ice_query_port_ets(struct ice_port_info *pi,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 0a55c552189a..c6d4926f0fcf 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -862,7 +862,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
if (err)
goto dcb_init_err;
- return err;
+ return 0;
dcb_init_err:
dev_err(dev, "DCB init failed\n");
@@ -947,6 +947,16 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
}
/**
+ * ice_dcb_is_mib_change_pending - Check if MIB change is pending
+ * @state: MIB change state
+ */
+static bool ice_dcb_is_mib_change_pending(u8 state)
+{
+ return ICE_AQ_LLDP_MIB_CHANGE_PENDING ==
+ FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state);
+}
+
+/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
* @event: pointer to the admin queue receive event
@@ -959,6 +969,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aqc_lldp_get_mib *mib;
struct ice_dcbx_cfg tmp_dcbx_cfg;
+ bool pending_handled = true;
bool need_reconfig = false;
struct ice_port_info *pi;
u8 mib_type;
@@ -975,41 +986,58 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+
/* Ignore if event is not for Nearest Bridge */
- mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
- ICE_AQ_LLDP_BRID_TYPE_M);
+ mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
+ /* A pending change event contains accurate config information, and
+ * the FW setting has not been updaed yet, so detect if change is
+ * pending to determine where to pull config information from
+ * (FW vs event)
+ */
+ if (ice_dcb_is_mib_change_pending(mib->state))
+ pending_handled = false;
+
/* Check MIB Type and return if event for Remote MIB update */
- mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
- ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
- ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
- &pi->qos_cfg.remote_dcbx_cfg);
- if (ret) {
- dev_err(dev, "Failed to get remote DCB config\n");
- return;
+ if (!pending_handled) {
+ ice_get_dcb_cfg_from_mib_change(pi, event);
+ } else {
+ ret =
+ ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
+ &pi->qos_cfg.remote_dcbx_cfg);
+ if (ret)
+ dev_dbg(dev, "Failed to get remote DCB config\n");
}
+ return;
}
+ /* That a DCB change has happened is now determined */
mutex_lock(&pf->tc_mutex);
/* store the old configuration */
- tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */
memset(&pi->qos_cfg.local_dcbx_cfg, 0,
sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */
- ret = ice_get_dcb_cfg(pf->hw.port_info);
- if (ret) {
- dev_err(dev, "Failed to get DCB config\n");
- goto out;
+ if (!pending_handled) {
+ ice_get_dcb_cfg_from_mib_change(pi, event);
+ } else {
+ ret = ice_get_dcb_cfg(pi);
+ if (ret) {
+ dev_err(dev, "Failed to get DCB config\n");
+ goto out;
+ }
}
/* No change detected in DCBX configs */
@@ -1036,11 +1064,17 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
+ /* Send Execute Pending MIB Change event if it is a Pending event */
+ if (!pending_handled) {
+ ice_lldp_execute_pending_mib(&pf->hw);
+ pending_handled = true;
+ }
+
rtnl_lock();
/* disable VSIs affected by DCB changes */
ice_dcb_ena_dis_vsi(pf, false, true);
- ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
goto unlock_rtnl;
@@ -1055,4 +1089,8 @@ unlock_rtnl:
rtnl_unlock();
out:
mutex_unlock(&pf->tc_mutex);
+
+ /* Send Execute Pending MIB Change event if it is a Pending event */
+ if (!pending_handled)
+ ice_lldp_execute_pending_mib(&pf->hw);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
new file mode 100644
index 000000000000..d71ed210f9c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -0,0 +1,1897 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice.h"
+#include "ice_ddp.h"
+
+/* For supporting double VLAN mode, it is necessary to enable or disable certain
+ * boost tcam entries. The metadata labels names that match the following
+ * prefixes will be saved to allow enabling double VLAN mode.
+ */
+#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
+#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
+
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+#define ICE_TNL_PRE "TNL_"
+static const struct ice_tunnel_type_scan tnls[] = {
+ { TNL_VXLAN, "TNL_VXLAN_PF" },
+ { TNL_GENEVE, "TNL_GENEVE_PF" },
+ { TNL_LAST, "" }
+};
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+ u32 seg_count;
+ u32 i;
+
+ if (len < struct_size(pkg, seg_offset, 1))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* pkg must have at least one segment */
+ seg_count = le32_to_cpu(pkg->seg_count);
+ if (seg_count < 1)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* make sure segment array fits in package length */
+ if (len < struct_size(pkg, seg_offset, seg_count))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* all segments must fit within length */
+ for (i = 0; i < seg_count; i++) {
+ u32 off = le32_to_cpu(pkg->seg_offset[i]);
+ struct ice_generic_seg_hdr *seg;
+
+ /* segment header must fit */
+ if (len < off + sizeof(*seg))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+ /* segment body must fit */
+ if (len < off + le32_to_cpu(seg->seg_size))
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+ if (hw->pkg_copy) {
+ devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
+ hw->pkg_copy = NULL;
+ hw->pkg_size = 0;
+ }
+ hw->seg = NULL;
+}
+
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
+ else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+ struct ice_buf_hdr *hdr;
+ u16 section_count;
+ u16 data_end;
+
+ hdr = (struct ice_buf_hdr *)buf->buf;
+ /* verify data */
+ section_count = le16_to_cpu(hdr->section_count);
+ if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+ return NULL;
+
+ data_end = le16_to_cpu(hdr->data_end);
+ if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ return hdr;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+ struct ice_nvm_table *nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count));
+
+ return (__force struct ice_buf_table *)(nvms->vers +
+ le32_to_cpu(nvms->table_count));
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state)
+{
+ if (ice_seg) {
+ state->buf_table = ice_find_buf_table(ice_seg);
+ if (!state->buf_table)
+ return NULL;
+
+ state->buf_idx = 0;
+ return ice_pkg_val_buf(state->buf_table->buf_array);
+ }
+
+ if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
+ return ice_pkg_val_buf(state->buf_table->buf_array +
+ state->buf_idx);
+ else
+ return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+static bool ice_pkg_advance_sect(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state)
+{
+ if (!ice_seg && !state->buf)
+ return false;
+
+ if (!ice_seg && state->buf)
+ if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
+ return true;
+
+ state->buf = ice_pkg_enum_buf(ice_seg, state);
+ if (!state->buf)
+ return false;
+
+ /* start of new buffer, reset section index */
+ state->sect_idx = 0;
+ return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type)
+{
+ u16 offset, size;
+
+ if (ice_seg)
+ state->type = sect_type;
+
+ if (!ice_pkg_advance_sect(ice_seg, state))
+ return NULL;
+
+ /* scan for next matching section */
+ while (state->buf->section_entry[state->sect_idx].type !=
+ cpu_to_le32(state->type))
+ if (!ice_pkg_advance_sect(NULL, state))
+ return NULL;
+
+ /* validate section */
+ offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
+ if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+ return NULL;
+
+ size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+ return NULL;
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE)
+ return NULL;
+
+ state->sect_type =
+ le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
+
+ /* calc pointer to this section */
+ state->sect =
+ ((u8 *)state->buf) +
+ le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
+
+ return state->sect;
+}
+
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state, u32 sect_type,
+ u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section = section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= le16_to_cpu(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = le16_to_cpu(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in HW for following use.
+ */
+static int ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return -EINVAL;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return 0;
+}
+
+/**
+ * ice_get_ddp_pkg_state - get DDP pkg state after download
+ * @hw: pointer to the HW struct
+ * @already_loaded: indicates if pkg was already loaded onto the device
+ */
+static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw,
+ bool already_loaded)
+{
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
+ if (already_loaded)
+ return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
+ else
+ return ICE_DDP_PKG_SUCCESS;
+ } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
+ hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
+ } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
+ } else {
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX 0
+
+ /* setup Switch block input mask, which is 48-bits in two parts */
+ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+ wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_marker_ptype_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the Marker PType TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual Marker PType TCAM entries.
+ */
+static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section,
+ u32 index, u32 *offset)
+{
+ struct ice_marker_ptype_tcam_section *marker_ptype;
+
+ if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
+ return NULL;
+
+ if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ marker_ptype = section;
+ if (index >= le16_to_cpu(marker_ptype->count))
+ return NULL;
+
+ return marker_ptype->tcam + index;
+}
+
+/**
+ * ice_add_dvm_hint
+ * @hw: pointer to the HW structure
+ * @val: value of the boost entry
+ * @enable: true if entry needs to be enabled, or false if needs to be disabled
+ */
+static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
+{
+ if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
+ hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
+ hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
+ hw->dvm_upd.count++;
+ }
+}
+
+/**
+ * ice_add_tunnel_hint
+ * @hw: pointer to the HW structure
+ * @label_name: label text
+ * @val: value of the tunnel port boost entry
+ */
+static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
+{
+ if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ u16 i;
+
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *ice_label_enum_handler(u32 __always_unused sect_type,
+ void *section, u32 index, u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = section;
+ if (index >= le16_to_cpu(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type,
+ struct ice_pkg_enum *state, u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = le16_to_cpu(label->value);
+ return label->name;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = section;
+ if (index >= le16_to_cpu(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return -EINVAL;
+
+ do {
+ tcam = ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && le16_to_cpu(tcam->addr) == addr) {
+ *entry = tcam;
+ return 0;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return -EIO;
+}
+
+/**
+ * ice_is_init_pkg_successful - check if DDP init was successful
+ * @state: state of the DDP pkg after download
+ */
+bool ice_is_init_pkg_successful(enum ice_ddp_state state)
+{
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end =
+ cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry));
+ return bld;
+}
+
+static bool ice_is_gtp_u_profile(u16 prof_idx)
+{
+ return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
+ prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
+ prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
+}
+
+static bool ice_is_gtp_c_profile(u16 prof_idx)
+{
+ switch (prof_idx) {
+ case ICE_PROFID_IPV4_GTPC_TEID:
+ case ICE_PROFID_IPV4_GTPC_NO_TEID:
+ case ICE_PROFID_IPV6_GTPC_TEID:
+ case ICE_PROFID_IPV6_GTPC_NO_TEID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ * @prof_idx: profile index to check
+ */
+static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw,
+ struct ice_fv *fv, u32 prof_idx)
+{
+ u16 i;
+
+ if (ice_is_gtp_c_profile(prof_idx))
+ return ICE_PROF_TUN_GTPC;
+
+ if (ice_is_gtp_u_profile(prof_idx))
+ return ICE_PROF_TUN_GTPU;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ fv->ew[i].off == ICE_VNI_OFFSET)
+ return ICE_PROF_TUN_UDP;
+
+ /* GRE tunnel will have GRE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+ return ICE_PROF_TUN_GRE;
+ }
+
+ return ICE_PROF_NON_TUN;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ unsigned long *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ if (req_profs == ICE_PROF_ALL) {
+ bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
+ return;
+ }
+
+ memset(&state, 0, sizeof(state));
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+ ice_seg = hw->seg;
+ do {
+ enum ice_prof_type prof_type;
+ u32 offset;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ /* Determine field vector type */
+ prof_type = ice_get_sw_prof_type(hw, fv, offset);
+
+ if (req_profs & prof_type)
+ set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @lkups: list of protocol types
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and offset and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!lkups->n_val_words || !hw->seg)
+ return -EINVAL;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!test_bit((u16)offset, bm))
+ continue;
+
+ for (i = 0; i < lkups->n_val_words; i++) {
+ int j;
+
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id ==
+ lkups->fv_words[i].prot_id &&
+ fv->ew[j].off == lkups->fv_words[i].off)
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == lkups->n_val_words) {
+ fvl = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*fvl), GFP_KERNEL);
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ list_add(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (list_empty(fv_list)) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Required profiles not found in currently loaded DDP package");
+ return -EIO;
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
+ list_del(&fvl->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvl);
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ bitmap_zero(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ set_bit(i, hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ devm_kfree(ice_hw_to_dev(hw), bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return -EINVAL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = le16_to_cpu(buf->section_count);
+ if (section_count > 0)
+ return -EIO;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return -EIO;
+ bld->reserved_section_table_entries += count;
+
+ data_end = le16_to_cpu(buf->data_end) +
+ flex_array_size(buf, section_entry, count);
+ buf->data_end = cpu_to_le16(data_end);
+
+ return 0;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = le16_to_cpu(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = le16_to_cpu(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
+ buf->section_entry[sect_count].size = cpu_to_le16(size);
+ buf->section_entry[sect_count].type = cpu_to_le32(type);
+
+ data_end += size;
+ buf->data_end = cpu_to_le16(data_end);
+
+ buf->section_count = cpu_to_le16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw,
+ u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return le16_to_cpu(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (!bld)
+ return NULL;
+
+ return &bld->buf;
+}
+
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_ENOSEC:
+ case ICE_AQ_RC_EBADSIG:
+ return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
+ case ICE_AQ_RC_ESVN:
+ return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
+ case ICE_AQ_RC_EBADMAN:
+ case ICE_AQ_RC_EBADBUF:
+ return ICE_DDP_PKG_LOAD_ERROR;
+ default:
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * 0 - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * -EALREADY - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+static int ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access)
+{
+ int status;
+
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+ if (!status)
+ mutex_lock(&ice_global_cfg_lock_sw);
+ else if (status == -EALREADY)
+ ice_debug(hw, ICE_DBG_PKG,
+ "Global config lock: No work to do\n");
+
+ return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+static void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+ mutex_unlock(&ice_global_cfg_lock_sw);
+ ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware. Metadata buffers are skipped, and the first metadata buffer
+ * found indicates that the rest of the buffers are all metadata buffers.
+ */
+static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
+ struct ice_buf *bufs, u32 count)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_buf_hdr *bh;
+ enum ice_aq_err err;
+ u32 offset, info, i;
+ int status;
+
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == -EALREADY)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
+ }
+
+ for (i = 0; i < count; i++) {
+ bool last = ((i + 1) == count);
+
+ if (!last) {
+ /* check next buffer for metadata flag */
+ bh = (struct ice_buf_hdr *)(bufs + i + 1);
+
+ /* A set metadata flag in the next buffer will signal
+ * that the current buffer will be the last buffer
+ * downloaded
+ */
+ if (le16_to_cpu(bh->section_count))
+ if (le32_to_cpu(bh->section_entry[0].type) &
+ ICE_METADATA_BUF)
+ last = true;
+ }
+
+ bh = (struct ice_buf_hdr *)(bufs + i);
+
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+ &offset, &info, NULL);
+
+ /* Save AQ status from download package */
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Pkg download failed: err %d off %d inf %d\n",
+ status, offset, info);
+ err = hw->adminq.sq_last_status;
+ state = ice_map_aq_err_to_ddp_state(err);
+ break;
+ }
+
+ if (last)
+ break;
+ }
+
+ if (!status) {
+ status = ice_set_vlan_mode(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG,
+ "Failed to set VLAN mode: err %d\n", status);
+ }
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw,
+ struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+ int status;
+
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ le32_to_cpu(ice_seg->hdr.seg_type),
+ le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
+
+ return status;
+}
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == -EIO) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == -EIO) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_update_pkg_no_lock
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ */
+int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ int status = 0;
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+ u32 offset, info;
+
+ status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ int status;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ice_update_pkg_no_lock(hw, bufs, count);
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
+ struct ice_generic_seg_hdr *seg;
+
+ seg = (struct ice_generic_seg_hdr
+ *)((u8 *)pkg_hdr +
+ le32_to_cpu(pkg_hdr->seg_offset[i]));
+
+ if (le32_to_cpu(seg->seg_type) == seg_type)
+ return seg;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the HW structure.
+ */
+static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ if (!pkg_hdr)
+ return ICE_DDP_PKG_ERR;
+
+ seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ if (seg_hdr) {
+ struct ice_meta_sect *meta;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ /* Get package information from the Metadata Section */
+ meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
+ ICE_SID_METADATA);
+ if (!meta) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find ice metadata section in package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ hw->pkg_ver = meta->ver;
+ memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+ meta->ver.major, meta->ver.minor, meta->ver.update,
+ meta->ver.draft, meta->name);
+
+ hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
+ memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id));
+
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft, seg_hdr->seg_id);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find ice segment in driver package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_aqc_get_pkg_info_resp *pkg_info;
+ u16 size;
+ u32 i;
+
+ size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
+ pkg_info = kzalloc(size, GFP_KERNEL);
+ if (!pkg_info)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
+ goto init_pkg_free_alloc;
+ }
+
+ for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT 4
+ char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+ u8 place = 0;
+
+ if (pkg_info->pkg_info[i].is_active) {
+ flags[place++] = 'A';
+ hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ le32_to_cpu(pkg_info->pkg_info[i].track_id);
+ memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name,
+ sizeof(pkg_info->pkg_info[i].name));
+ hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
+ }
+ if (pkg_info->pkg_info[i].is_active_at_boot)
+ flags[place++] = 'B';
+ if (pkg_info->pkg_info[i].is_modified)
+ flags[place++] = 'M';
+ if (pkg_info->pkg_info[i].is_in_nvm)
+ flags[place++] = 'N';
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i,
+ pkg_info->pkg_info[i].ver.major,
+ pkg_info->pkg_info[i].ver.minor,
+ pkg_info->pkg_info[i].ver.update,
+ pkg_info->pkg_info[i].ver.draft,
+ pkg_info->pkg_info[i].name, flags);
+ }
+
+init_pkg_free_alloc:
+ kfree(pkg_info);
+
+ return state;
+}
+
+/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
+ struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_ddp_state state;
+ u16 size;
+ u32 i;
+
+ /* Check package version compatibility */
+ state = ice_chk_pkg_version(&hw->pkg_ver);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return state;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
+ pkg = kzalloc(size, GFP_KERNEL);
+ if (!pkg)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
+ state = ICE_DDP_PKG_LOAD_ERROR;
+ goto fw_ddp_compat_free_alloc;
+ }
+
+ for (i = 0; i < le32_to_cpu(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ state = ICE_DDP_PKG_FW_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT,
+ "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ kfree(pkg);
+ return state;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ memset(&hw->tnl, 0, sizeof(hw->tnl));
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name) {
+ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
+ /* check for a tunnel entry */
+ ice_add_tunnel_hint(hw, label_name, val);
+
+ /* check for a dvm mode entry */
+ else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
+ ice_add_dvm_hint(hw, val, true);
+
+ /* check for a svm mode entry */
+ else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
+ ice_add_dvm_hint(hw, val, false);
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers for tunnels */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry) {
+ hw->tnl.tbl[i].valid = true;
+ if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
+ hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
+ }
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
+ for (i = 0; i < hw->dvm_upd.count; i++)
+ ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
+ &hw->dvm_upd.tbl[i].boost_entry);
+}
+
+/**
+ * ice_fill_hw_ptype - fill the enabled PTYPE bit information
+ * @hw: pointer to the HW structure
+ */
+static void ice_fill_hw_ptype(struct ice_hw *hw)
+{
+ struct ice_marker_ptype_tcam_entry *tcam;
+ struct ice_seg *seg = hw->seg;
+ struct ice_pkg_enum state;
+
+ bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
+ if (!seg)
+ return;
+
+ memset(&state, 0, sizeof(state));
+
+ do {
+ tcam = ice_pkg_enum_entry(seg, &state,
+ ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
+ ice_marker_ptype_tcam_handler);
+ if (tcam &&
+ le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
+ le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
+ set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
+
+ seg = NULL;
+ } while (tcam);
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ bool already_loaded = false;
+ enum ice_ddp_state state;
+ struct ice_pkg_hdr *pkg;
+ struct ice_seg *seg;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ pkg = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ state);
+ return state;
+ }
+
+ /* initialize package info */
+ state = ice_init_pkg_info(hw, pkg);
+ if (state)
+ return state;
+
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ state = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (state)
+ return state;
+
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
+ state = ice_download_pkg(hw, seg);
+ if (state == ICE_DDP_PKG_ALREADY_LOADED) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "package previously loaded - no work.\n");
+ already_loaded = true;
+ }
+
+ /* Get information on the package currently loaded in HW, then make sure
+ * the driver is compatible with this version.
+ */
+ if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
+ state = ice_get_pkg_info(hw);
+ if (!state)
+ state = ice_get_ddp_pkg_state(hw, already_loaded);
+ }
+
+ if (ice_is_init_pkg_successful(state)) {
+ hw->seg = seg;
+ /* on successful package download update other required
+ * registers to support the package and fill HW tables
+ * with package content.
+ */
+ ice_init_pkg_regs(hw);
+ ice_fill_blk_tbls(hw);
+ ice_fill_hw_ptype(hw);
+ ice_get_prof_index_max(hw);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state);
+ }
+
+ return state;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
+ u32 len)
+{
+ enum ice_ddp_state state;
+ u8 *buf_copy;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+
+ state = ice_init_pkg(hw, buf_copy, len);
+ if (!ice_is_init_pkg_successful(state)) {
+ /* Free the copy, since we failed to initialize the package */
+ devm_kfree(ice_hw_to_dev(hw), buf_copy);
+ } else {
+ /* Track the copied pkg so we can free it later */
+ hw->pkg_copy = buf_copy;
+ hw->pkg_size = len;
+ }
+
+ return state;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
new file mode 100644
index 000000000000..37eadb3d27a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022, Intel Corporation. */
+
+#ifndef _ICE_DDP_H_
+#define _ICE_DDP_H_
+
+#include "ice_type.h"
+
+/* Package minimal version supported */
+#define ICE_PKG_SUPP_VER_MAJ 1
+#define ICE_PKG_SUPP_VER_MNR 3
+
+/* Package format version */
+#define ICE_PKG_FMT_VER_MAJ 1
+#define ICE_PKG_FMT_VER_MNR 0
+#define ICE_PKG_FMT_VER_UPD 0
+#define ICE_PKG_FMT_VER_DFT 0
+
+#define ICE_PKG_CNT 4
+
+#define ICE_FV_OFFSET_INVAL 0x1FF
+
+/* Extraction Sequence (Field Vector) Table */
+struct ice_fv_word {
+ u8 prot_id;
+ u16 off; /* Offset within the protocol header */
+ u8 resvrd;
+} __packed;
+
+#define ICE_MAX_NUM_PROFILES 256
+
+#define ICE_MAX_FV_WORDS 48
+struct ice_fv {
+ struct ice_fv_word ew[ICE_MAX_FV_WORDS];
+};
+
+enum ice_ddp_state {
+ /* Indicates that this call to ice_init_pkg
+ * successfully loaded the requested DDP package
+ */
+ ICE_DDP_PKG_SUCCESS = 0,
+
+ /* Generic error for already loaded errors, it is mapped later to
+ * the more specific one (one of the next 3)
+ */
+ ICE_DDP_PKG_ALREADY_LOADED = -1,
+
+ /* Indicates that a DDP package of the same version has already been
+ * loaded onto the device by a previous call or by another PF
+ */
+ ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
+
+ /* The device has a DDP package that is not supported by the driver */
+ ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
+
+ /* The device has a compatible package
+ * (but different from the request) already loaded
+ */
+ ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
+
+ /* The firmware loaded on the device is not compatible with
+ * the DDP package loaded
+ */
+ ICE_DDP_PKG_FW_MISMATCH = -5,
+
+ /* The DDP package file is invalid */
+ ICE_DDP_PKG_INVALID_FILE = -6,
+
+ /* The version of the DDP package provided is higher than
+ * the driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
+
+ /* The version of the DDP package provided is lower than the
+ * driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
+
+ /* The signature of the DDP package file provided is invalid */
+ ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
+
+ /* The DDP package file security revision is too low and not
+ * supported by firmware
+ */
+ ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
+
+ /* An error occurred in firmware while loading the DDP package */
+ ICE_DDP_PKG_LOAD_ERROR = -11,
+
+ /* Other errors */
+ ICE_DDP_PKG_ERR = -12
+};
+
+/* Package and segment headers and tables */
+struct ice_pkg_hdr {
+ struct ice_pkg_ver pkg_format_ver;
+ __le32 seg_count;
+ __le32 seg_offset[];
+};
+
+/* generic segment */
+struct ice_generic_seg_hdr {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_ICE 0x00000010
+ __le32 seg_type;
+ struct ice_pkg_ver seg_format_ver;
+ __le32 seg_size;
+ char seg_id[ICE_PKG_NAME_SIZE];
+};
+
+/* ice specific segment */
+
+union ice_device_id {
+ struct {
+ __le16 device_id;
+ __le16 vendor_id;
+ } dev_vend_id;
+ __le32 id;
+};
+
+struct ice_device_id_entry {
+ union ice_device_id device;
+ union ice_device_id sub_device;
+};
+
+struct ice_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 device_table_count;
+ struct ice_device_id_entry device_table[];
+};
+
+struct ice_nvm_table {
+ __le32 table_count;
+ __le32 vers[];
+};
+
+struct ice_buf {
+#define ICE_PKG_BUF_SIZE 4096
+ u8 buf[ICE_PKG_BUF_SIZE];
+};
+
+struct ice_buf_table {
+ __le32 buf_count;
+ struct ice_buf buf_array[];
+};
+
+struct ice_run_time_cfg_seg {
+ struct ice_generic_seg_hdr hdr;
+ u8 rsvd[8];
+ struct ice_buf_table buf_table;
+};
+
+/* global metadata specific segment */
+struct ice_global_metadata_seg {
+ struct ice_generic_seg_hdr hdr;
+ struct ice_pkg_ver pkg_ver;
+ __le32 rsvd;
+ char pkg_name[ICE_PKG_NAME_SIZE];
+};
+
+#define ICE_MIN_S_OFF 12
+#define ICE_MAX_S_OFF 4095
+#define ICE_MIN_S_SZ 1
+#define ICE_MAX_S_SZ 4084
+
+/* section information */
+struct ice_section_entry {
+ __le32 type;
+ __le16 offset;
+ __le16 size;
+};
+
+#define ICE_MIN_S_COUNT 1
+#define ICE_MAX_S_COUNT 511
+#define ICE_MIN_S_DATA_END 12
+#define ICE_MAX_S_DATA_END 4096
+
+#define ICE_METADATA_BUF 0x80000000
+
+struct ice_buf_hdr {
+ __le16 section_count;
+ __le16 data_end;
+ struct ice_section_entry section_entry[];
+};
+
+#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \
+ ((ICE_PKG_BUF_SIZE - \
+ struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \
+ (ent_sz))
+
+/* ice package section IDs */
+#define ICE_SID_METADATA 1
+#define ICE_SID_XLT0_SW 10
+#define ICE_SID_XLT_KEY_BUILDER_SW 11
+#define ICE_SID_XLT1_SW 12
+#define ICE_SID_XLT2_SW 13
+#define ICE_SID_PROFID_TCAM_SW 14
+#define ICE_SID_PROFID_REDIR_SW 15
+#define ICE_SID_FLD_VEC_SW 16
+#define ICE_SID_CDID_KEY_BUILDER_SW 17
+
+struct ice_meta_sect {
+ struct ice_pkg_ver ver;
+#define ICE_META_SECT_NAME_SIZE 28
+ char name[ICE_META_SECT_NAME_SIZE];
+ __le32 track_id;
+};
+
+#define ICE_SID_CDID_REDIR_SW 18
+
+#define ICE_SID_XLT0_ACL 20
+#define ICE_SID_XLT_KEY_BUILDER_ACL 21
+#define ICE_SID_XLT1_ACL 22
+#define ICE_SID_XLT2_ACL 23
+#define ICE_SID_PROFID_TCAM_ACL 24
+#define ICE_SID_PROFID_REDIR_ACL 25
+#define ICE_SID_FLD_VEC_ACL 26
+#define ICE_SID_CDID_KEY_BUILDER_ACL 27
+#define ICE_SID_CDID_REDIR_ACL 28
+
+#define ICE_SID_XLT0_FD 30
+#define ICE_SID_XLT_KEY_BUILDER_FD 31
+#define ICE_SID_XLT1_FD 32
+#define ICE_SID_XLT2_FD 33
+#define ICE_SID_PROFID_TCAM_FD 34
+#define ICE_SID_PROFID_REDIR_FD 35
+#define ICE_SID_FLD_VEC_FD 36
+#define ICE_SID_CDID_KEY_BUILDER_FD 37
+#define ICE_SID_CDID_REDIR_FD 38
+
+#define ICE_SID_XLT0_RSS 40
+#define ICE_SID_XLT_KEY_BUILDER_RSS 41
+#define ICE_SID_XLT1_RSS 42
+#define ICE_SID_XLT2_RSS 43
+#define ICE_SID_PROFID_TCAM_RSS 44
+#define ICE_SID_PROFID_REDIR_RSS 45
+#define ICE_SID_FLD_VEC_RSS 46
+#define ICE_SID_CDID_KEY_BUILDER_RSS 47
+#define ICE_SID_CDID_REDIR_RSS 48
+
+#define ICE_SID_RXPARSER_MARKER_PTYPE 55
+#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_METADATA_INIT 58
+#define ICE_SID_TXPARSER_BOOST_TCAM 66
+
+#define ICE_SID_XLT0_PE 80
+#define ICE_SID_XLT_KEY_BUILDER_PE 81
+#define ICE_SID_XLT1_PE 82
+#define ICE_SID_XLT2_PE 83
+#define ICE_SID_PROFID_TCAM_PE 84
+#define ICE_SID_PROFID_REDIR_PE 85
+#define ICE_SID_FLD_VEC_PE 86
+#define ICE_SID_CDID_KEY_BUILDER_PE 87
+#define ICE_SID_CDID_REDIR_PE 88
+
+/* Label Metadata section IDs */
+#define ICE_SID_LBL_FIRST 0x80000010
+#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
+/* The following define MUST be updated to reflect the last label section ID */
+#define ICE_SID_LBL_LAST 0x80000038
+
+/* Label ICE runtime configuration section IDs */
+#define ICE_SID_TX_5_LAYER_TOPO 0x10
+
+enum ice_block {
+ ICE_BLK_SW = 0,
+ ICE_BLK_ACL,
+ ICE_BLK_FD,
+ ICE_BLK_RSS,
+ ICE_BLK_PE,
+ ICE_BLK_COUNT
+};
+
+enum ice_sect {
+ ICE_XLT0 = 0,
+ ICE_XLT_KB,
+ ICE_XLT1,
+ ICE_XLT2,
+ ICE_PROF_TCAM,
+ ICE_PROF_REDIR,
+ ICE_VEC_TBL,
+ ICE_CDID_KB,
+ ICE_CDID_REDIR,
+ ICE_SECT_COUNT
+};
+
+/* package labels */
+struct ice_label {
+ __le16 value;
+#define ICE_PKG_LABEL_SIZE 64
+ char name[ICE_PKG_LABEL_SIZE];
+};
+
+struct ice_label_section {
+ __le16 count;
+ struct ice_label label[];
+};
+
+#define ICE_MAX_LABELS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_label_section *)0, \
+ label, 1) - \
+ sizeof(struct ice_label), \
+ sizeof(struct ice_label))
+
+struct ice_sw_fv_section {
+ __le16 count;
+ __le16 base_offset;
+ struct ice_fv fv[];
+};
+
+struct ice_sw_fv_list_entry {
+ struct list_head list_entry;
+ u32 profile_id;
+ struct ice_fv *fv_ptr;
+};
+
+/* The BOOST TCAM stores the match packet header in reverse order, meaning
+ * the fields are reversed; in addition, this means that the normally big endian
+ * fields of the packet are now little endian.
+ */
+struct ice_boost_key_value {
+#define ICE_BOOST_REMAINING_HV_KEY 15
+ u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
+ __le16 hv_dst_port_key;
+ __le16 hv_src_port_key;
+ u8 tcam_search_key;
+} __packed;
+
+struct ice_boost_key {
+ struct ice_boost_key_value key;
+ struct ice_boost_key_value key2;
+};
+
+/* package Boost TCAM entry */
+struct ice_boost_tcam_entry {
+ __le16 addr;
+ __le16 reserved;
+ /* break up the 40 bytes of key into different fields */
+ struct ice_boost_key key;
+ u8 boost_hit_index_group;
+ /* The following contains bitfields which are not on byte boundaries.
+ * These fields are currently unused by driver software.
+ */
+#define ICE_BOOST_BIT_FIELDS 43
+ u8 bit_fields[ICE_BOOST_BIT_FIELDS];
+};
+
+struct ice_boost_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_boost_tcam_entry tcam[];
+};
+
+#define ICE_MAX_BST_TCAMS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_boost_tcam_section *)0, \
+ tcam, 1) - \
+ sizeof(struct ice_boost_tcam_entry), \
+ sizeof(struct ice_boost_tcam_entry))
+
+/* package Marker Ptype TCAM entry */
+struct ice_marker_ptype_tcam_entry {
+#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
+ __le16 addr;
+ __le16 ptype;
+ u8 keys[20];
+};
+
+struct ice_marker_ptype_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_marker_ptype_tcam_entry tcam[];
+};
+
+#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF( \
+ struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, \
+ 1) - \
+ sizeof(struct ice_marker_ptype_tcam_entry), \
+ sizeof(struct ice_marker_ptype_tcam_entry))
+
+struct ice_xlt1_section {
+ __le16 count;
+ __le16 offset;
+ u8 value[];
+};
+
+struct ice_xlt2_section {
+ __le16 count;
+ __le16 offset;
+ __le16 value[];
+};
+
+struct ice_prof_redir_section {
+ __le16 count;
+ __le16 offset;
+ u8 redir_value[];
+};
+
+/* package buffer building */
+
+struct ice_buf_build {
+ struct ice_buf buf;
+ u16 reserved_section_table_entries;
+};
+
+struct ice_pkg_enum {
+ struct ice_buf_table *buf_table;
+ u32 buf_idx;
+
+ u32 type;
+ struct ice_buf_hdr *buf;
+ u32 sect_idx;
+ void *sect;
+ u32 sect_type;
+
+ u32 entry_idx;
+ void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
+};
+
+int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd);
+int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd);
+
+void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
+
+enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
+
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
+
+struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr);
+
+int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+
+int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type);
+
+struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf);
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 0fae0186bd85..05f216af8c81 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -371,10 +371,7 @@ out_free_ctx:
/**
* ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
- * @devlink: pointer to the devlink instance to reload
- * @netns_change: if true, the network namespace is changing
- * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
- * @limit: limits on what reload should do, such as not resetting
+ * @pf: pointer to the pf instance
* @extack: netlink extended ACK structure
*
* Allow user to activate new Embedded Management Processor firmware by
@@ -387,12 +384,9 @@ out_free_ctx:
* any source.
*/
static int
-ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
- enum devlink_reload_action action,
- enum devlink_reload_limit limit,
+ice_devlink_reload_empr_start(struct ice_pf *pf,
struct netlink_ext_ack *extack)
{
- struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
u8 pending;
@@ -431,11 +425,51 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
}
/**
+ * ice_devlink_reload_down - prepare for reload
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform
+ * @limit: limits on what reload should do, such as not resetting
+ * @extack: netlink extended ACK structure
+ */
+static int
+ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Go to legacy mode before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ if (ice_is_adq_active(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Turn off ADQ before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ if (ice_has_vfs(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Remove all VFs before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ ice_unload(pf);
+ return 0;
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ return ice_devlink_reload_empr_start(pf, extack);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* ice_devlink_reload_empr_finish - Wait for EMP reset to finish
- * @devlink: pointer to the devlink instance reloading
- * @action: the action requested
- * @limit: limits imposed by userspace, such as not resetting
- * @actions_performed: on return, indicate what actions actually performed
+ * @pf: pointer to the pf instance
* @extack: netlink extended ACK structure
*
* Wait for driver to finish rebuilding after EMP reset is completed. This
@@ -443,17 +477,11 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
* for the driver's rebuild to complete.
*/
static int
-ice_devlink_reload_empr_finish(struct devlink *devlink,
- enum devlink_reload_action action,
- enum devlink_reload_limit limit,
- u32 *actions_performed,
+ice_devlink_reload_empr_finish(struct ice_pf *pf,
struct netlink_ext_ack *extack)
{
- struct ice_pf *pf = devlink_priv(devlink);
int err;
- *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
-
err = ice_wait_for_reset(pf, 60 * HZ);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
@@ -1192,12 +1220,43 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+/**
+ * ice_devlink_reload_up - do reload up after reinit
+ * @devlink: pointer to the devlink instance reloading
+ * @action: the action requested
+ * @limit: limits imposed by userspace, such as not resetting
+ * @actions_performed: on return, indicate what actions actually performed
+ * @extack: netlink extended ACK structure
+ */
+static int
+ice_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ return ice_load(pf);
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+ return ice_devlink_reload_empr_finish(pf, extack);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
- .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
/* The ice driver currently does not support driver reinit */
- .reload_down = ice_devlink_reload_empr_start,
- .reload_up = ice_devlink_reload_empr_finish,
+ .reload_down = ice_devlink_reload_down,
+ .reload_up = ice_devlink_reload_up,
.port_split = ice_devlink_port_split,
.port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get,
@@ -1376,7 +1435,6 @@ void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
}
@@ -1411,25 +1469,9 @@ ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- union devlink_param_value value;
- int err;
- err = devlink_params_register(devlink, ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
- if (err)
- return err;
-
- value.vbool = false;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
- value);
-
- value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- value);
-
- return 0;
+ return devlink_params_register(devlink, ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
}
void ice_devlink_unregister_params(struct ice_pf *pf)
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index f9f15acae90a..f6dd3f8fd936 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -71,17 +71,17 @@ void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
if (!ice_is_switchdev_running(vf->pf))
return;
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
- vf->hw_lan_addr.addr);
+ vf->hw_lan_addr);
if (err) {
dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
- vf->hw_lan_addr.addr, vf->vf_id, err);
+ vf->hw_lan_addr, vf->vf_id, err);
return;
}
vf->num_mac++;
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
}
}
@@ -237,7 +237,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
netif_napi_del(&vf->repr->q_vector->napi);
@@ -265,14 +265,14 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
GFP_KERNEL);
if (!vf->repr->dst) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
goto err;
}
if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
@@ -281,7 +281,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(vsi)) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
@@ -338,7 +338,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
vsi->vf->vf_id);
}
@@ -425,7 +425,13 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_SWITCHDEV_CTRL;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a359f1610fc1..b360bd8f1599 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -656,7 +656,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_setup_rx_ring;
- status = ice_vsi_cfg(vsi);
+ status = ice_vsi_cfg_lan(vsi);
if (status)
goto err_setup_rx_ring;
@@ -664,7 +664,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_start_rx_ring;
- return status;
+ return 0;
err_start_rx_ring:
ice_vsi_free_rx_rings(vsi);
@@ -1950,8 +1950,7 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
ICE_PHY_TYPE_LOW_100G_CAUI4 |
ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC |
ICE_PHY_TYPE_LOW_100G_AUI4 |
- ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 |
- ICE_PHY_TYPE_LOW_100GBASE_CP2;
+ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4;
phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |
ICE_PHY_TYPE_HIGH_100G_CAUI2 |
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC |
@@ -1964,15 +1963,27 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
100000baseCR4_Full);
}
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 |
- ICE_PHY_TYPE_LOW_100GBASE_SR2;
- if (phy_types_low & phy_type_mask_lo) {
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseCR2_Full);
+ }
+
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseSR4_Full);
}
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseSR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseSR2_Full);
+ }
+
phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 |
ICE_PHY_TYPE_LOW_100GBASE_DR;
if (phy_types_low & phy_type_mask_lo) {
@@ -1984,14 +1995,20 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 |
ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4;
- phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4;
- if (phy_types_low & phy_type_mask_lo ||
- phy_types_high & phy_type_mask_hi) {
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseKR4_Full);
}
+
+ if (phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseKR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseKR2_Full);
+ }
+
}
#define TEST_SET_BITS_TIMEOUT 50
@@ -2242,17 +2259,15 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
100baseT_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 1000baseX_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseT_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseKX_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 2500baseT_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
2500baseX_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
@@ -2261,9 +2276,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseT_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseKR_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseSR_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseLR_Full))
@@ -2287,9 +2301,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseCR2_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 50000baseKR2_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseKR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseSR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
@@ -2299,7 +2312,13 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseLR4_ER4_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseKR4_Full))
+ 100000baseKR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseCR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseSR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseKR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;
return adv_link_speed;
@@ -3027,8 +3046,6 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
- xdp_rings[i].next_dd = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
- xdp_rings[i].next_rs = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
xdp_rings[i].desc = NULL;
xdp_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&xdp_rings[i]);
@@ -3073,7 +3090,7 @@ process_rx:
/* allocate Rx buffers */
err = ice_alloc_rx_bufs(&rx_rings[i],
- ICE_DESC_UNUSED(&rx_rings[i]));
+ ICE_RX_DESC_UNUSED(&rx_rings[i]));
rx_unwind:
if (err) {
while (i) {
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 4b3bb19e1d06..5ce413965930 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -6,23 +6,6 @@
#include "ice_flow.h"
#include "ice.h"
-/* For supporting double VLAN mode, it is necessary to enable or disable certain
- * boost tcam entries. The metadata labels names that match the following
- * prefixes will be saved to allow enabling double VLAN mode.
- */
-#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
-#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
-
-/* To support tunneling entries by PF, the package will append the PF number to
- * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
- */
-#define ICE_TNL_PRE "TNL_"
-static const struct ice_tunnel_type_scan tnls[] = {
- { TNL_VXLAN, "TNL_VXLAN_PF" },
- { TNL_GENEVE, "TNL_GENEVE_PF" },
- { TNL_LAST, "" }
-};
-
static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
/* SWITCH */
{
@@ -104,225 +87,6 @@ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
}
/**
- * ice_pkg_val_buf
- * @buf: pointer to the ice buffer
- *
- * This helper function validates a buffer's header.
- */
-static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
-{
- struct ice_buf_hdr *hdr;
- u16 section_count;
- u16 data_end;
-
- hdr = (struct ice_buf_hdr *)buf->buf;
- /* verify data */
- section_count = le16_to_cpu(hdr->section_count);
- if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
- return NULL;
-
- data_end = le16_to_cpu(hdr->data_end);
- if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
- return NULL;
-
- return hdr;
-}
-
-/**
- * ice_find_buf_table
- * @ice_seg: pointer to the ice segment
- *
- * Returns the address of the buffer table within the ice segment.
- */
-static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
-{
- struct ice_nvm_table *nvms;
-
- nvms = (struct ice_nvm_table *)
- (ice_seg->device_table +
- le32_to_cpu(ice_seg->device_table_count));
-
- return (__force struct ice_buf_table *)
- (nvms->vers + le32_to_cpu(nvms->table_count));
-}
-
-/**
- * ice_pkg_enum_buf
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This function will enumerate all the buffers in the ice segment. The first
- * call is made with the ice_seg parameter non-NULL; on subsequent calls,
- * ice_seg is set to NULL which continues the enumeration. When the function
- * returns a NULL pointer, then the end of the buffers has been reached, or an
- * unexpected value has been detected (for example an invalid section count or
- * an invalid buffer end value).
- */
-static struct ice_buf_hdr *
-ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (ice_seg) {
- state->buf_table = ice_find_buf_table(ice_seg);
- if (!state->buf_table)
- return NULL;
-
- state->buf_idx = 0;
- return ice_pkg_val_buf(state->buf_table->buf_array);
- }
-
- if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
- return ice_pkg_val_buf(state->buf_table->buf_array +
- state->buf_idx);
- else
- return NULL;
-}
-
-/**
- * ice_pkg_advance_sect
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This helper function will advance the section within the ice segment,
- * also advancing the buffer if needed.
- */
-static bool
-ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (!ice_seg && !state->buf)
- return false;
-
- if (!ice_seg && state->buf)
- if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
- return true;
-
- state->buf = ice_pkg_enum_buf(ice_seg, state);
- if (!state->buf)
- return false;
-
- /* start of new buffer, reset section index */
- state->sect_idx = 0;
- return true;
-}
-
-/**
- * ice_pkg_enum_section
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- *
- * This function will enumerate all the sections of a particular type in the
- * ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the matching
- * sections has been reached.
- */
-static void *
-ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type)
-{
- u16 offset, size;
-
- if (ice_seg)
- state->type = sect_type;
-
- if (!ice_pkg_advance_sect(ice_seg, state))
- return NULL;
-
- /* scan for next matching section */
- while (state->buf->section_entry[state->sect_idx].type !=
- cpu_to_le32(state->type))
- if (!ice_pkg_advance_sect(NULL, state))
- return NULL;
-
- /* validate section */
- offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
- if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
- return NULL;
-
- size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
- if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
- return NULL;
-
- /* make sure the section fits in the buffer */
- if (offset + size > ICE_PKG_BUF_SIZE)
- return NULL;
-
- state->sect_type =
- le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
-
- /* calc pointer to this section */
- state->sect = ((u8 *)state->buf) +
- le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
-
- return state->sect;
-}
-
-/**
- * ice_pkg_enum_entry
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- * @offset: pointer to variable that receives the offset in the table (optional)
- * @handler: function that handles access to the entries into the section type
- *
- * This function will enumerate all the entries in particular section type in
- * the ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the entries has
- * been reached.
- *
- * Since each section may have a different header and entry size, the handler
- * function is needed to determine the number and location entries in each
- * section.
- *
- * The offset parameter is optional, but should be used for sections that
- * contain an offset for each section table. For such cases, the section handler
- * function must return the appropriate offset + index to give the absolution
- * offset for each entry. For example, if the base for a section's header
- * indicates a base offset of 10, and the index for the entry is 2, then
- * section handler function should set the offset to 10 + 2 = 12.
- */
-static void *
-ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type, u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset))
-{
- void *entry;
-
- if (ice_seg) {
- if (!handler)
- return NULL;
-
- if (!ice_pkg_enum_section(ice_seg, state, sect_type))
- return NULL;
-
- state->entry_idx = 0;
- state->handler = handler;
- } else {
- state->entry_idx++;
- }
-
- if (!state->handler)
- return NULL;
-
- /* get entry */
- entry = state->handler(state->sect_type, state->sect, state->entry_idx,
- offset);
- if (!entry) {
- /* end of a section, look for another section of this type */
- if (!ice_pkg_enum_section(NULL, state, 0))
- return NULL;
-
- state->entry_idx = 0;
- entry = state->handler(state->sect_type, state->sect,
- state->entry_idx, offset);
- }
-
- return entry;
-}
-
-/**
* ice_hw_ptype_ena - check if the PTYPE is enabled or not
* @hw: pointer to the HW structure
* @ptype: the hardware PTYPE
@@ -333,312 +97,6 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
test_bit(ptype, hw->hw_ptype);
}
-/**
- * ice_marker_ptype_tcam_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the Marker PType TCAM entry to be returned
- * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual Marker PType TCAM entries.
- */
-static void *
-ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
- u32 *offset)
-{
- struct ice_marker_ptype_tcam_section *marker_ptype;
-
- if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
- return NULL;
-
- if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- marker_ptype = section;
- if (index >= le16_to_cpu(marker_ptype->count))
- return NULL;
-
- return marker_ptype->tcam + index;
-}
-
-/**
- * ice_fill_hw_ptype - fill the enabled PTYPE bit information
- * @hw: pointer to the HW structure
- */
-static void ice_fill_hw_ptype(struct ice_hw *hw)
-{
- struct ice_marker_ptype_tcam_entry *tcam;
- struct ice_seg *seg = hw->seg;
- struct ice_pkg_enum state;
-
- bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
- if (!seg)
- return;
-
- memset(&state, 0, sizeof(state));
-
- do {
- tcam = ice_pkg_enum_entry(seg, &state,
- ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
- ice_marker_ptype_tcam_handler);
- if (tcam &&
- le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
- le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
- set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
-
- seg = NULL;
- } while (tcam);
-}
-
-/**
- * ice_boost_tcam_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the boost TCAM entry to be returned
- * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual boost TCAM entries.
- */
-static void *
-ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_boost_tcam_section *boost;
-
- if (!section)
- return NULL;
-
- if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
- return NULL;
-
- /* cppcheck-suppress nullPointer */
- if (index > ICE_MAX_BST_TCAMS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- boost = section;
- if (index >= le16_to_cpu(boost->count))
- return NULL;
-
- return boost->tcam + index;
-}
-
-/**
- * ice_find_boost_entry
- * @ice_seg: pointer to the ice segment (non-NULL)
- * @addr: Boost TCAM address of entry to search for
- * @entry: returns pointer to the entry
- *
- * Finds a particular Boost TCAM entry and returns a pointer to that entry
- * if it is found. The ice_seg parameter must not be NULL since the first call
- * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
- */
-static int
-ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
- struct ice_boost_tcam_entry **entry)
-{
- struct ice_boost_tcam_entry *tcam;
- struct ice_pkg_enum state;
-
- memset(&state, 0, sizeof(state));
-
- if (!ice_seg)
- return -EINVAL;
-
- do {
- tcam = ice_pkg_enum_entry(ice_seg, &state,
- ICE_SID_RXPARSER_BOOST_TCAM, NULL,
- ice_boost_tcam_handler);
- if (tcam && le16_to_cpu(tcam->addr) == addr) {
- *entry = tcam;
- return 0;
- }
-
- ice_seg = NULL;
- } while (tcam);
-
- *entry = NULL;
- return -EIO;
-}
-
-/**
- * ice_label_enum_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the label entry to be returned
- * @offset: pointer to receive absolute offset, always zero for label sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual label entries.
- */
-static void *
-ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
- u32 *offset)
-{
- struct ice_label_section *labels;
-
- if (!section)
- return NULL;
-
- /* cppcheck-suppress nullPointer */
- if (index > ICE_MAX_LABELS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- labels = section;
- if (index >= le16_to_cpu(labels->count))
- return NULL;
-
- return labels->label + index;
-}
-
-/**
- * ice_enum_labels
- * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
- * @type: the section type that will contain the label (0 on subsequent calls)
- * @state: ice_pkg_enum structure that will hold the state of the enumeration
- * @value: pointer to a value that will return the label's value if found
- *
- * Enumerates a list of labels in the package. The caller will call
- * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
- * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
- * the end of the list has been reached.
- */
-static char *
-ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
- u16 *value)
-{
- struct ice_label *label;
-
- /* Check for valid label section on first call */
- if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
- return NULL;
-
- label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
- ice_label_enum_handler);
- if (!label)
- return NULL;
-
- *value = le16_to_cpu(label->value);
- return label->name;
-}
-
-/**
- * ice_add_tunnel_hint
- * @hw: pointer to the HW structure
- * @label_name: label text
- * @val: value of the tunnel port boost entry
- */
-static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
-{
- if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
- u16 i;
-
- for (i = 0; tnls[i].type != TNL_LAST; i++) {
- size_t len = strlen(tnls[i].label_prefix);
-
- /* Look for matching label start, before continuing */
- if (strncmp(label_name, tnls[i].label_prefix, len))
- continue;
-
- /* Make sure this label matches our PF. Note that the PF
- * character ('0' - '7') will be located where our
- * prefix string's null terminator is located.
- */
- if ((label_name[len] - '0') == hw->pf_id) {
- hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
- hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].boost_addr = val;
- hw->tnl.tbl[hw->tnl.count].port = 0;
- hw->tnl.count++;
- break;
- }
- }
- }
-}
-
-/**
- * ice_add_dvm_hint
- * @hw: pointer to the HW structure
- * @val: value of the boost entry
- * @enable: true if entry needs to be enabled, or false if needs to be disabled
- */
-static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
-{
- if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
- hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
- hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
- hw->dvm_upd.count++;
- }
-}
-
-/**
- * ice_init_pkg_hints
- * @hw: pointer to the HW structure
- * @ice_seg: pointer to the segment of the package scan (non-NULL)
- *
- * This function will scan the package and save off relevant information
- * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
- * since the first call to ice_enum_labels requires a pointer to an actual
- * ice_seg structure.
- */
-static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
-{
- struct ice_pkg_enum state;
- char *label_name;
- u16 val;
- int i;
-
- memset(&hw->tnl, 0, sizeof(hw->tnl));
- memset(&state, 0, sizeof(state));
-
- if (!ice_seg)
- return;
-
- label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
- &val);
-
- while (label_name) {
- if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
- /* check for a tunnel entry */
- ice_add_tunnel_hint(hw, label_name, val);
-
- /* check for a dvm mode entry */
- else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
- ice_add_dvm_hint(hw, val, true);
-
- /* check for a svm mode entry */
- else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
- ice_add_dvm_hint(hw, val, false);
-
- label_name = ice_enum_labels(NULL, 0, &state, &val);
- }
-
- /* Cache the appropriate boost TCAM entry pointers for tunnels */
- for (i = 0; i < hw->tnl.count; i++) {
- ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
- &hw->tnl.tbl[i].boost_entry);
- if (hw->tnl.tbl[i].boost_entry) {
- hw->tnl.tbl[i].valid = true;
- if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
- hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
- }
- }
-
- /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
- for (i = 0; i < hw->dvm_upd.count; i++)
- ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
- &hw->dvm_upd.tbl[i].boost_entry);
-}
-
/* Key creation */
#define ICE_DC_KEY 0x1 /* don't care */
@@ -810,51 +268,6 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
}
/**
- * ice_acquire_global_cfg_lock
- * @hw: pointer to the HW structure
- * @access: access type (read or write)
- *
- * This function will request ownership of the global config lock for reading
- * or writing of the package. When attempting to obtain write access, the
- * caller must check for the following two return values:
- *
- * 0 - Means the caller has acquired the global config lock
- * and can perform writing of the package.
- * -EALREADY - Indicates another driver has already written the
- * package or has found that no update was necessary; in
- * this case, the caller can just skip performing any
- * update of the package.
- */
-static int
-ice_acquire_global_cfg_lock(struct ice_hw *hw,
- enum ice_aq_res_access_type access)
-{
- int status;
-
- status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
- ICE_GLOBAL_CFG_LOCK_TIMEOUT);
-
- if (!status)
- mutex_lock(&ice_global_cfg_lock_sw);
- else if (status == -EALREADY)
- ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
-
- return status;
-}
-
-/**
- * ice_release_global_cfg_lock
- * @hw: pointer to the HW structure
- *
- * This function will release the global config lock.
- */
-static void ice_release_global_cfg_lock(struct ice_hw *hw)
-{
- mutex_unlock(&ice_global_cfg_lock_sw);
- ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
-}
-
-/**
* ice_acquire_change_lock
* @hw: pointer to the HW structure
* @access: access type (read or write)
@@ -880,1325 +293,6 @@ void ice_release_change_lock(struct ice_hw *hw)
}
/**
- * ice_aq_download_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer to transfer
- * @buf_size: the size of the package buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Download Package (0x0C40)
- */
-static int
-ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- int status;
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == -EIO) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = le32_to_cpu(resp->error_offset);
- if (error_info)
- *error_info = le32_to_cpu(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_aq_upload_section
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer which will receive the section
- * @buf_size: the size of the package buffer
- * @cd: pointer to command details structure or NULL
- *
- * Upload Section (0x0C41)
- */
-int
-ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
-}
-
-/**
- * ice_aq_update_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package cmd buffer
- * @buf_size: the size of the package cmd buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Update Package (0x0C42)
- */
-static int
-ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
- bool last_buf, u32 *error_offset, u32 *error_info,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- int status;
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == -EIO) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = le32_to_cpu(resp->error_offset);
- if (error_info)
- *error_info = le32_to_cpu(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_find_seg_in_pkg
- * @hw: pointer to the hardware structure
- * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
- * @pkg_hdr: pointer to the package header to be searched
- *
- * This function searches a package file for a particular segment type. On
- * success it returns a pointer to the segment header, otherwise it will
- * return NULL.
- */
-static struct ice_generic_seg_hdr *
-ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr)
-{
- u32 i;
-
- ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
- pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
- pkg_hdr->pkg_format_ver.update,
- pkg_hdr->pkg_format_ver.draft);
-
- /* Search all package segments for the requested segment type */
- for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
- struct ice_generic_seg_hdr *seg;
-
- seg = (struct ice_generic_seg_hdr *)
- ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
-
- if (le32_to_cpu(seg->seg_type) == seg_type)
- return seg;
- }
-
- return NULL;
-}
-
-/**
- * ice_update_pkg_no_lock
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- */
-static int
-ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- int status = 0;
- u32 i;
-
- for (i = 0; i < count; i++) {
- struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
- bool last = ((i + 1) == count);
- u32 offset, info;
-
- status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
- last, &offset, &info, NULL);
-
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
- status, offset, info);
- break;
- }
- }
-
- return status;
-}
-
-/**
- * ice_update_pkg
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains change lock and updates package.
- */
-static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- int status;
-
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- return status;
-
- status = ice_update_pkg_no_lock(hw, bufs, count);
-
- ice_release_change_lock(hw);
-
- return status;
-}
-
-static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
-{
- switch (aq_err) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
- return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
- case ICE_AQ_RC_ESVN:
- return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
- return ICE_DDP_PKG_LOAD_ERROR;
- default:
- return ICE_DDP_PKG_ERR;
- }
-}
-
-/**
- * ice_dwnld_cfg_bufs
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains global config lock and downloads the package configuration buffers
- * to the firmware. Metadata buffers are skipped, and the first metadata buffer
- * found indicates that the rest of the buffers are all metadata buffers.
- */
-static enum ice_ddp_state
-ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_buf_hdr *bh;
- enum ice_aq_err err;
- u32 offset, info, i;
- int status;
-
- if (!bufs || !count)
- return ICE_DDP_PKG_ERR;
-
- /* If the first buffer's first section has its metadata bit set
- * then there are no buffers to be downloaded, and the operation is
- * considered a success.
- */
- bh = (struct ice_buf_hdr *)bufs;
- if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return ICE_DDP_PKG_SUCCESS;
-
- status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
- if (status) {
- if (status == -EALREADY)
- return ICE_DDP_PKG_ALREADY_LOADED;
- return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
- }
-
- for (i = 0; i < count; i++) {
- bool last = ((i + 1) == count);
-
- if (!last) {
- /* check next buffer for metadata flag */
- bh = (struct ice_buf_hdr *)(bufs + i + 1);
-
- /* A set metadata flag in the next buffer will signal
- * that the current buffer will be the last buffer
- * downloaded
- */
- if (le16_to_cpu(bh->section_count))
- if (le32_to_cpu(bh->section_entry[0].type) &
- ICE_METADATA_BUF)
- last = true;
- }
-
- bh = (struct ice_buf_hdr *)(bufs + i);
-
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
-
- /* Save AQ status from download package */
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
- status, offset, info);
- err = hw->adminq.sq_last_status;
- state = ice_map_aq_err_to_ddp_state(err);
- break;
- }
-
- if (last)
- break;
- }
-
- if (!status) {
- status = ice_set_vlan_mode(hw);
- if (status)
- ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
- status);
- }
-
- ice_release_global_cfg_lock(hw);
-
- return state;
-}
-
-/**
- * ice_aq_get_pkg_info_list
- * @hw: pointer to the hardware structure
- * @pkg_info: the buffer which will receive the information list
- * @buf_size: the size of the pkg_info information buffer
- * @cd: pointer to command details structure or NULL
- *
- * Get Package Info List (0x0C43)
- */
-static int
-ice_aq_get_pkg_info_list(struct ice_hw *hw,
- struct ice_aqc_get_pkg_info_resp *pkg_info,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
-
- return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
-}
-
-/**
- * ice_download_pkg
- * @hw: pointer to the hardware structure
- * @ice_seg: pointer to the segment of the package to be downloaded
- *
- * Handles the download of a complete package.
- */
-static enum ice_ddp_state
-ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
-{
- struct ice_buf_table *ice_buf_tbl;
- int status;
-
- ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_format_ver.major,
- ice_seg->hdr.seg_format_ver.minor,
- ice_seg->hdr.seg_format_ver.update,
- ice_seg->hdr.seg_format_ver.draft);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
- le32_to_cpu(ice_seg->hdr.seg_type),
- le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
-
- ice_buf_tbl = ice_find_buf_table(ice_seg);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
- le32_to_cpu(ice_buf_tbl->buf_count));
-
- status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- le32_to_cpu(ice_buf_tbl->buf_count));
-
- ice_post_pkg_dwnld_vlan_mode_cfg(hw);
-
- return status;
-}
-
-/**
- * ice_init_pkg_info
- * @hw: pointer to the hardware structure
- * @pkg_hdr: pointer to the driver's package hdr
- *
- * Saves off the package details into the HW structure.
- */
-static enum ice_ddp_state
-ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
-{
- struct ice_generic_seg_hdr *seg_hdr;
-
- if (!pkg_hdr)
- return ICE_DDP_PKG_ERR;
-
- seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
- if (seg_hdr) {
- struct ice_meta_sect *meta;
- struct ice_pkg_enum state;
-
- memset(&state, 0, sizeof(state));
-
- /* Get package information from the Metadata Section */
- meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
- ICE_SID_METADATA);
- if (!meta) {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
- return ICE_DDP_PKG_INVALID_FILE;
- }
-
- hw->pkg_ver = meta->ver;
- memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
- meta->ver.major, meta->ver.minor, meta->ver.update,
- meta->ver.draft, meta->name);
-
- hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
- memcpy(hw->ice_seg_id, seg_hdr->seg_id,
- sizeof(hw->ice_seg_id));
-
- ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
- seg_hdr->seg_format_ver.major,
- seg_hdr->seg_format_ver.minor,
- seg_hdr->seg_format_ver.update,
- seg_hdr->seg_format_ver.draft,
- seg_hdr->seg_id);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
- return ICE_DDP_PKG_INVALID_FILE;
- }
-
- return ICE_DDP_PKG_SUCCESS;
-}
-
-/**
- * ice_get_pkg_info
- * @hw: pointer to the hardware structure
- *
- * Store details of the package currently loaded in HW into the HW structure.
- */
-static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
-{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_aqc_get_pkg_info_resp *pkg_info;
- u16 size;
- u32 i;
-
- size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
- pkg_info = kzalloc(size, GFP_KERNEL);
- if (!pkg_info)
- return ICE_DDP_PKG_ERR;
-
- if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
- state = ICE_DDP_PKG_ERR;
- goto init_pkg_free_alloc;
- }
-
- for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
-#define ICE_PKG_FLAG_COUNT 4
- char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
- u8 place = 0;
-
- if (pkg_info->pkg_info[i].is_active) {
- flags[place++] = 'A';
- hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
- hw->active_track_id =
- le32_to_cpu(pkg_info->pkg_info[i].track_id);
- memcpy(hw->active_pkg_name,
- pkg_info->pkg_info[i].name,
- sizeof(pkg_info->pkg_info[i].name));
- hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
- }
- if (pkg_info->pkg_info[i].is_active_at_boot)
- flags[place++] = 'B';
- if (pkg_info->pkg_info[i].is_modified)
- flags[place++] = 'M';
- if (pkg_info->pkg_info[i].is_in_nvm)
- flags[place++] = 'N';
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
- i, pkg_info->pkg_info[i].ver.major,
- pkg_info->pkg_info[i].ver.minor,
- pkg_info->pkg_info[i].ver.update,
- pkg_info->pkg_info[i].ver.draft,
- pkg_info->pkg_info[i].name, flags);
- }
-
-init_pkg_free_alloc:
- kfree(pkg_info);
-
- return state;
-}
-
-/**
- * ice_verify_pkg - verify package
- * @pkg: pointer to the package buffer
- * @len: size of the package buffer
- *
- * Verifies various attributes of the package file, including length, format
- * version, and the requirement of at least one segment.
- */
-static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
-{
- u32 seg_count;
- u32 i;
-
- if (len < struct_size(pkg, seg_offset, 1))
- return ICE_DDP_PKG_INVALID_FILE;
-
- if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
- pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
- pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
- pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
- return ICE_DDP_PKG_INVALID_FILE;
-
- /* pkg must have at least one segment */
- seg_count = le32_to_cpu(pkg->seg_count);
- if (seg_count < 1)
- return ICE_DDP_PKG_INVALID_FILE;
-
- /* make sure segment array fits in package length */
- if (len < struct_size(pkg, seg_offset, seg_count))
- return ICE_DDP_PKG_INVALID_FILE;
-
- /* all segments must fit within length */
- for (i = 0; i < seg_count; i++) {
- u32 off = le32_to_cpu(pkg->seg_offset[i]);
- struct ice_generic_seg_hdr *seg;
-
- /* segment header must fit */
- if (len < off + sizeof(*seg))
- return ICE_DDP_PKG_INVALID_FILE;
-
- seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
-
- /* segment body must fit */
- if (len < off + le32_to_cpu(seg->seg_size))
- return ICE_DDP_PKG_INVALID_FILE;
- }
-
- return ICE_DDP_PKG_SUCCESS;
-}
-
-/**
- * ice_free_seg - free package segment pointer
- * @hw: pointer to the hardware structure
- *
- * Frees the package segment pointer in the proper manner, depending on if the
- * segment was allocated or just the passed in pointer was stored.
- */
-void ice_free_seg(struct ice_hw *hw)
-{
- if (hw->pkg_copy) {
- devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
- hw->pkg_copy = NULL;
- hw->pkg_size = 0;
- }
- hw->seg = NULL;
-}
-
-/**
- * ice_init_pkg_regs - initialize additional package registers
- * @hw: pointer to the hardware structure
- */
-static void ice_init_pkg_regs(struct ice_hw *hw)
-{
-#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
-#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
-#define ICE_SW_BLK_IDX 0
-
- /* setup Switch block input mask, which is 48-bits in two parts */
- wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
- wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
-}
-
-/**
- * ice_chk_pkg_version - check package version for compatibility with driver
- * @pkg_ver: pointer to a version structure to check
- *
- * Check to make sure that the package about to be downloaded is compatible with
- * the driver. To be compatible, the major and minor components of the package
- * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
- * definitions.
- */
-static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
-{
- if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
- (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
- pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
- return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
- else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
- (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
- pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
- return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
-
- return ICE_DDP_PKG_SUCCESS;
-}
-
-/**
- * ice_chk_pkg_compat
- * @hw: pointer to the hardware structure
- * @ospkg: pointer to the package hdr
- * @seg: pointer to the package segment hdr
- *
- * This function checks the package version compatibility with driver and NVM
- */
-static enum ice_ddp_state
-ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
- struct ice_seg **seg)
-{
- struct ice_aqc_get_pkg_info_resp *pkg;
- enum ice_ddp_state state;
- u16 size;
- u32 i;
-
- /* Check package version compatibility */
- state = ice_chk_pkg_version(&hw->pkg_ver);
- if (state) {
- ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
- return state;
- }
-
- /* find ICE segment in given package */
- *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
- ospkg);
- if (!*seg) {
- ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_DDP_PKG_INVALID_FILE;
- }
-
- /* Check if FW is compatible with the OS package */
- size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
- pkg = kzalloc(size, GFP_KERNEL);
- if (!pkg)
- return ICE_DDP_PKG_ERR;
-
- if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
- state = ICE_DDP_PKG_LOAD_ERROR;
- goto fw_ddp_compat_free_alloc;
- }
-
- for (i = 0; i < le32_to_cpu(pkg->count); i++) {
- /* loop till we find the NVM package */
- if (!pkg->pkg_info[i].is_in_nvm)
- continue;
- if ((*seg)->hdr.seg_format_ver.major !=
- pkg->pkg_info[i].ver.major ||
- (*seg)->hdr.seg_format_ver.minor >
- pkg->pkg_info[i].ver.minor) {
- state = ICE_DDP_PKG_FW_MISMATCH;
- ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
- }
- /* done processing NVM package so break */
- break;
- }
-fw_ddp_compat_free_alloc:
- kfree(pkg);
- return state;
-}
-
-/**
- * ice_sw_fv_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the field vector entry to be returned
- * @offset: ptr to variable that receives the offset in the field vector table
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * This function treats the given section as of type ice_sw_fv_section and
- * enumerates offset field. "offset" is an index into the field vector table.
- */
-static void *
-ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_sw_fv_section *fv_section = section;
-
- if (!section || sect_type != ICE_SID_FLD_VEC_SW)
- return NULL;
- if (index >= le16_to_cpu(fv_section->count))
- return NULL;
- if (offset)
- /* "index" passed in to this function is relative to a given
- * 4k block. To get to the true index into the field vector
- * table need to add the relative index to the base_offset
- * field of this section
- */
- *offset = le16_to_cpu(fv_section->base_offset) + index;
- return fv_section->fv + index;
-}
-
-/**
- * ice_get_prof_index_max - get the max profile index for used profile
- * @hw: pointer to the HW struct
- *
- * Calling this function will get the max profile index for used profile
- * and store the index number in struct ice_switch_info *switch_info
- * in HW for following use.
- */
-static int ice_get_prof_index_max(struct ice_hw *hw)
-{
- u16 prof_index = 0, j, max_prof_index = 0;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- bool flag = false;
- struct ice_fv *fv;
- u32 offset;
-
- memset(&state, 0, sizeof(state));
-
- if (!hw->seg)
- return -EINVAL;
-
- ice_seg = hw->seg;
-
- do {
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* in the profile that not be used, the prot_id is set to 0xff
- * and the off is set to 0x1ff for all the field vectors.
- */
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
- fv->ew[j].off != ICE_FV_OFFSET_INVAL)
- flag = true;
- if (flag && prof_index > max_prof_index)
- max_prof_index = prof_index;
-
- prof_index++;
- flag = false;
- } while (fv);
-
- hw->switch_info->max_used_prof_index = max_prof_index;
-
- return 0;
-}
-
-/**
- * ice_get_ddp_pkg_state - get DDP pkg state after download
- * @hw: pointer to the HW struct
- * @already_loaded: indicates if pkg was already loaded onto the device
- */
-static enum ice_ddp_state
-ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
-{
- if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
- hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
- hw->pkg_ver.update == hw->active_pkg_ver.update &&
- hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
- !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
- if (already_loaded)
- return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
- else
- return ICE_DDP_PKG_SUCCESS;
- } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
- hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
- return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
- } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
- } else {
- return ICE_DDP_PKG_ERR;
- }
-}
-
-/**
- * ice_init_pkg - initialize/download package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function initializes a package. The package contains HW tables
- * required to do packet processing. First, the function extracts package
- * information such as version. Then it finds the ice configuration segment
- * within the package; this function then saves a copy of the segment pointer
- * within the supplied package buffer. Next, the function will cache any hints
- * from the package, followed by downloading the package itself. Note, that if
- * a previous PF driver has already downloaded the package successfully, then
- * the current driver will not have to download the package again.
- *
- * The local package contents will be used to query default behavior and to
- * update specific sections of the HW's version of the package (e.g. to update
- * the parse graph to understand new protocols).
- *
- * This function stores a pointer to the package buffer memory, and it is
- * expected that the supplied buffer will not be freed immediately. If the
- * package buffer needs to be freed, such as when read from a file, use
- * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
- * case.
- */
-enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
-{
- bool already_loaded = false;
- enum ice_ddp_state state;
- struct ice_pkg_hdr *pkg;
- struct ice_seg *seg;
-
- if (!buf || !len)
- return ICE_DDP_PKG_ERR;
-
- pkg = (struct ice_pkg_hdr *)buf;
- state = ice_verify_pkg(pkg, len);
- if (state) {
- ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
- state);
- return state;
- }
-
- /* initialize package info */
- state = ice_init_pkg_info(hw, pkg);
- if (state)
- return state;
-
- /* before downloading the package, check package version for
- * compatibility with driver
- */
- state = ice_chk_pkg_compat(hw, pkg, &seg);
- if (state)
- return state;
-
- /* initialize package hints and then download package */
- ice_init_pkg_hints(hw, seg);
- state = ice_download_pkg(hw, seg);
- if (state == ICE_DDP_PKG_ALREADY_LOADED) {
- ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
- already_loaded = true;
- }
-
- /* Get information on the package currently loaded in HW, then make sure
- * the driver is compatible with this version.
- */
- if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
- state = ice_get_pkg_info(hw);
- if (!state)
- state = ice_get_ddp_pkg_state(hw, already_loaded);
- }
-
- if (ice_is_init_pkg_successful(state)) {
- hw->seg = seg;
- /* on successful package download update other required
- * registers to support the package and fill HW tables
- * with package content.
- */
- ice_init_pkg_regs(hw);
- ice_fill_blk_tbls(hw);
- ice_fill_hw_ptype(hw);
- ice_get_prof_index_max(hw);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
- state);
- }
-
- return state;
-}
-
-/**
- * ice_copy_and_init_pkg - initialize/download a copy of the package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function copies the package buffer, and then calls ice_init_pkg() to
- * initialize the copied package contents.
- *
- * The copying is necessary if the package buffer supplied is constant, or if
- * the memory may disappear shortly after calling this function.
- *
- * If the package buffer resides in the data segment and can be modified, the
- * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
- *
- * However, if the package buffer needs to be copied first, such as when being
- * read from a file, the caller should use ice_copy_and_init_pkg().
- *
- * This function will first copy the package buffer, before calling
- * ice_init_pkg(). The caller is free to immediately destroy the original
- * package buffer, as the new copy will be managed by this function and
- * related routines.
- */
-enum ice_ddp_state
-ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
-{
- enum ice_ddp_state state;
- u8 *buf_copy;
-
- if (!buf || !len)
- return ICE_DDP_PKG_ERR;
-
- buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
-
- state = ice_init_pkg(hw, buf_copy, len);
- if (!ice_is_init_pkg_successful(state)) {
- /* Free the copy, since we failed to initialize the package */
- devm_kfree(ice_hw_to_dev(hw), buf_copy);
- } else {
- /* Track the copied pkg so we can free it later */
- hw->pkg_copy = buf_copy;
- hw->pkg_size = len;
- }
-
- return state;
-}
-
-/**
- * ice_is_init_pkg_successful - check if DDP init was successful
- * @state: state of the DDP pkg after download
- */
-bool ice_is_init_pkg_successful(enum ice_ddp_state state)
-{
- switch (state) {
- case ICE_DDP_PKG_SUCCESS:
- case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
- case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_pkg_buf_alloc
- * @hw: pointer to the HW structure
- *
- * Allocates a package buffer and returns a pointer to the buffer header.
- * Note: all package contents must be in Little Endian form.
- */
-static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
-{
- struct ice_buf_build *bld;
- struct ice_buf_hdr *buf;
-
- bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
- if (!bld)
- return NULL;
-
- buf = (struct ice_buf_hdr *)bld;
- buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
- section_entry));
- return bld;
-}
-
-static bool ice_is_gtp_u_profile(u16 prof_idx)
-{
- return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
- prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
- prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
-}
-
-static bool ice_is_gtp_c_profile(u16 prof_idx)
-{
- switch (prof_idx) {
- case ICE_PROFID_IPV4_GTPC_TEID:
- case ICE_PROFID_IPV4_GTPC_NO_TEID:
- case ICE_PROFID_IPV6_GTPC_TEID:
- case ICE_PROFID_IPV6_GTPC_NO_TEID:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_get_sw_prof_type - determine switch profile type
- * @hw: pointer to the HW structure
- * @fv: pointer to the switch field vector
- * @prof_idx: profile index to check
- */
-static enum ice_prof_type
-ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
-{
- u16 i;
-
- if (ice_is_gtp_c_profile(prof_idx))
- return ICE_PROF_TUN_GTPC;
-
- if (ice_is_gtp_u_profile(prof_idx))
- return ICE_PROF_TUN_GTPU;
-
- for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
- /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
- fv->ew[i].off == ICE_VNI_OFFSET)
- return ICE_PROF_TUN_UDP;
-
- /* GRE tunnel will have GRE protocol */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
- return ICE_PROF_TUN_GRE;
- }
-
- return ICE_PROF_NON_TUN;
-}
-
-/**
- * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
- * @hw: pointer to hardware structure
- * @req_profs: type of profiles requested
- * @bm: pointer to memory for returning the bitmap of field vectors
- */
-void
-ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
- unsigned long *bm)
-{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- if (req_profs == ICE_PROF_ALL) {
- bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
- return;
- }
-
- memset(&state, 0, sizeof(state));
- bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
- ice_seg = hw->seg;
- do {
- enum ice_prof_type prof_type;
- u32 offset;
-
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- ice_seg = NULL;
-
- if (fv) {
- /* Determine field vector type */
- prof_type = ice_get_sw_prof_type(hw, fv, offset);
-
- if (req_profs & prof_type)
- set_bit((u16)offset, bm);
- }
- } while (fv);
-}
-
-/**
- * ice_get_sw_fv_list
- * @hw: pointer to the HW structure
- * @lkups: list of protocol types
- * @bm: bitmap of field vectors to consider
- * @fv_list: Head of a list
- *
- * Finds all the field vector entries from switch block that contain
- * a given protocol ID and offset and returns a list of structures of type
- * "ice_sw_fv_list_entry". Every structure in the list has a field vector
- * definition and profile ID information
- * NOTE: The caller of the function is responsible for freeing the memory
- * allocated for every list entry.
- */
-int
-ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
- unsigned long *bm, struct list_head *fv_list)
-{
- struct ice_sw_fv_list_entry *fvl;
- struct ice_sw_fv_list_entry *tmp;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
- u32 offset;
-
- memset(&state, 0, sizeof(state));
-
- if (!lkups->n_val_words || !hw->seg)
- return -EINVAL;
-
- ice_seg = hw->seg;
- do {
- u16 i;
-
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* If field vector is not in the bitmap list, then skip this
- * profile.
- */
- if (!test_bit((u16)offset, bm))
- continue;
-
- for (i = 0; i < lkups->n_val_words; i++) {
- int j;
-
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id ==
- lkups->fv_words[i].prot_id &&
- fv->ew[j].off == lkups->fv_words[i].off)
- break;
- if (j >= hw->blk[ICE_BLK_SW].es.fvw)
- break;
- if (i + 1 == lkups->n_val_words) {
- fvl = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*fvl), GFP_KERNEL);
- if (!fvl)
- goto err;
- fvl->fv_ptr = fv;
- fvl->profile_id = offset;
- list_add(&fvl->list_entry, fv_list);
- break;
- }
- }
- } while (fv);
- if (list_empty(fv_list)) {
- dev_warn(ice_hw_to_dev(hw), "Required profiles not found in currently loaded DDP package");
- return -EIO;
- }
-
- return 0;
-
-err:
- list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
- list_del(&fvl->list_entry);
- devm_kfree(ice_hw_to_dev(hw), fvl);
- }
-
- return -ENOMEM;
-}
-
-/**
- * ice_init_prof_result_bm - Initialize the profile result index bitmap
- * @hw: pointer to hardware structure
- */
-void ice_init_prof_result_bm(struct ice_hw *hw)
-{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- memset(&state, 0, sizeof(state));
-
- if (!hw->seg)
- return;
-
- ice_seg = hw->seg;
- do {
- u32 off;
- u16 i;
-
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &off, ice_sw_fv_handler);
- ice_seg = NULL;
- if (!fv)
- break;
-
- bitmap_zero(hw->switch_info->prof_res_bm[off],
- ICE_MAX_FV_WORDS);
-
- /* Determine empty field vector indices, these can be
- * used for recipe results. Skip index 0, since it is
- * always used for Switch ID.
- */
- for (i = 1; i < ICE_MAX_FV_WORDS; i++)
- if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
- fv->ew[i].off == ICE_FV_OFFSET_INVAL)
- set_bit(i, hw->switch_info->prof_res_bm[off]);
- } while (fv);
-}
-
-/**
- * ice_pkg_buf_free
- * @hw: pointer to the HW structure
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Frees a package buffer
- */
-void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
-{
- devm_kfree(ice_hw_to_dev(hw), bld);
-}
-
-/**
- * ice_pkg_buf_reserve_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @count: the number of sections to reserve
- *
- * Reserves one or more section table entries in a package buffer. This routine
- * can be called multiple times as long as they are made before calling
- * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
- * is called once, the number of sections that can be allocated will not be able
- * to be increased; not using all reserved sections is fine, but this will
- * result in some wasted space in the buffer.
- * Note: all package contents must be in Little Endian form.
- */
-static int
-ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
-{
- struct ice_buf_hdr *buf;
- u16 section_count;
- u16 data_end;
-
- if (!bld)
- return -EINVAL;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* already an active section, can't increase table size */
- section_count = le16_to_cpu(buf->section_count);
- if (section_count > 0)
- return -EIO;
-
- if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
- return -EIO;
- bld->reserved_section_table_entries += count;
-
- data_end = le16_to_cpu(buf->data_end) +
- flex_array_size(buf, section_entry, count);
- buf->data_end = cpu_to_le16(data_end);
-
- return 0;
-}
-
-/**
- * ice_pkg_buf_alloc_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- *
- * Reserves memory in the buffer for a section's content and updates the
- * buffers' status accordingly. This routine returns a pointer to the first
- * byte of the section start within the buffer, which is used to fill in the
- * section contents.
- * Note: all package contents must be in Little Endian form.
- */
-static void *
-ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
-{
- struct ice_buf_hdr *buf;
- u16 sect_count;
- u16 data_end;
-
- if (!bld || !type || !size)
- return NULL;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* check for enough space left in buffer */
- data_end = le16_to_cpu(buf->data_end);
-
- /* section start must align on 4 byte boundary */
- data_end = ALIGN(data_end, 4);
-
- if ((data_end + size) > ICE_MAX_S_DATA_END)
- return NULL;
-
- /* check for more available section table entries */
- sect_count = le16_to_cpu(buf->section_count);
- if (sect_count < bld->reserved_section_table_entries) {
- void *section_ptr = ((u8 *)buf) + data_end;
-
- buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
- buf->section_entry[sect_count].size = cpu_to_le16(size);
- buf->section_entry[sect_count].type = cpu_to_le32(type);
-
- data_end += size;
- buf->data_end = cpu_to_le16(data_end);
-
- buf->section_count = cpu_to_le16(sect_count + 1);
- return section_ptr;
- }
-
- /* no free section table entries */
- return NULL;
-}
-
-/**
- * ice_pkg_buf_alloc_single_section
- * @hw: pointer to the HW structure
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- * @section: returns pointer to the section
- *
- * Allocates a package buffer with a single section.
- * Note: all package contents must be in Little Endian form.
- */
-struct ice_buf_build *
-ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
- void **section)
-{
- struct ice_buf_build *buf;
-
- if (!section)
- return NULL;
-
- buf = ice_pkg_buf_alloc(hw);
- if (!buf)
- return NULL;
-
- if (ice_pkg_buf_reserve_section(buf, 1))
- goto ice_pkg_buf_alloc_single_section_err;
-
- *section = ice_pkg_buf_alloc_section(buf, type, size);
- if (!*section)
- goto ice_pkg_buf_alloc_single_section_err;
-
- return buf;
-
-ice_pkg_buf_alloc_single_section_err:
- ice_pkg_buf_free(hw, buf);
- return NULL;
-}
-
-/**
- * ice_pkg_buf_get_active_sections
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Returns the number of active sections. Before using the package buffer
- * in an update package command, the caller should make sure that there is at
- * least one active section - otherwise, the buffer is not legal and should
- * not be used.
- * Note: all package contents must be in Little Endian form.
- */
-static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
-{
- struct ice_buf_hdr *buf;
-
- if (!bld)
- return 0;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
- return le16_to_cpu(buf->section_count);
-}
-
-/**
- * ice_pkg_buf
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Return a pointer to the buffer's header
- */
-struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
-{
- if (!bld)
- return NULL;
-
- return &bld->buf;
-}
-
-/**
* ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
* @port: returns open port
@@ -2297,10 +391,11 @@ ice_upd_dvm_boost_entry_err:
*/
int ice_set_dvm_boost_entries(struct ice_hw *hw)
{
- int status;
u16 i;
for (i = 0; i < hw->dvm_upd.count; i++) {
+ int status;
+
status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
if (status)
return status;
@@ -2757,7 +852,6 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
count++;
list_for_each_entry(tmp2, list2, list)
chk_count++;
- /* cppcheck-suppress knownConditionTrueFalse */
if (!count || count != chk_count)
return false;
@@ -5102,12 +3196,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_vsi *vsi_cur;
struct ice_vsig_prof *d, *t;
- int status;
/* remove TCAM entries */
list_for_each_entry_safe(d, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
list) {
+ int status;
+
status = ice_rem_prof_id(hw, blk, d);
if (status)
return status;
@@ -5158,12 +3253,13 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_prof *p, *t;
- int status;
list_for_each_entry_safe(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
list)
if (p->profile_cookie == hdl) {
+ int status;
+
if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
/* this is the last profile, remove the VSIG */
return ice_rem_vsig(hw, blk, vsig, chg);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 9c530c86703e..7af7c8e9aa4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -6,75 +6,6 @@
#include "ice_type.h"
-/* Package minimal version supported */
-#define ICE_PKG_SUPP_VER_MAJ 1
-#define ICE_PKG_SUPP_VER_MNR 3
-
-/* Package format version */
-#define ICE_PKG_FMT_VER_MAJ 1
-#define ICE_PKG_FMT_VER_MNR 0
-#define ICE_PKG_FMT_VER_UPD 0
-#define ICE_PKG_FMT_VER_DFT 0
-
-#define ICE_PKG_CNT 4
-
-enum ice_ddp_state {
- /* Indicates that this call to ice_init_pkg
- * successfully loaded the requested DDP package
- */
- ICE_DDP_PKG_SUCCESS = 0,
-
- /* Generic error for already loaded errors, it is mapped later to
- * the more specific one (one of the next 3)
- */
- ICE_DDP_PKG_ALREADY_LOADED = -1,
-
- /* Indicates that a DDP package of the same version has already been
- * loaded onto the device by a previous call or by another PF
- */
- ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
-
- /* The device has a DDP package that is not supported by the driver */
- ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
-
- /* The device has a compatible package
- * (but different from the request) already loaded
- */
- ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
-
- /* The firmware loaded on the device is not compatible with
- * the DDP package loaded
- */
- ICE_DDP_PKG_FW_MISMATCH = -5,
-
- /* The DDP package file is invalid */
- ICE_DDP_PKG_INVALID_FILE = -6,
-
- /* The version of the DDP package provided is higher than
- * the driver supports
- */
- ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
-
- /* The version of the DDP package provided is lower than the
- * driver supports
- */
- ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
-
- /* The signature of the DDP package file provided is invalid */
- ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
-
- /* The DDP package file security revision is too low and not
- * supported by firmware
- */
- ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
-
- /* An error occurred in firmware while loading the DDP package */
- ICE_DDP_PKG_LOAD_ERROR = -11,
-
- /* Other errors */
- ICE_DDP_PKG_ERR = -12
-};
-
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 974d14a83b2e..4f42e14ed3ae 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -3,205 +3,7 @@
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
-
-#define ICE_FV_OFFSET_INVAL 0x1FF
-
-/* Extraction Sequence (Field Vector) Table */
-struct ice_fv_word {
- u8 prot_id;
- u16 off; /* Offset within the protocol header */
- u8 resvrd;
-} __packed;
-
-#define ICE_MAX_NUM_PROFILES 256
-
-#define ICE_MAX_FV_WORDS 48
-struct ice_fv {
- struct ice_fv_word ew[ICE_MAX_FV_WORDS];
-};
-
-/* Package and segment headers and tables */
-struct ice_pkg_hdr {
- struct ice_pkg_ver pkg_format_ver;
- __le32 seg_count;
- __le32 seg_offset[];
-};
-
-/* generic segment */
-struct ice_generic_seg_hdr {
-#define SEGMENT_TYPE_METADATA 0x00000001
-#define SEGMENT_TYPE_ICE 0x00000010
- __le32 seg_type;
- struct ice_pkg_ver seg_format_ver;
- __le32 seg_size;
- char seg_id[ICE_PKG_NAME_SIZE];
-};
-
-/* ice specific segment */
-
-union ice_device_id {
- struct {
- __le16 device_id;
- __le16 vendor_id;
- } dev_vend_id;
- __le32 id;
-};
-
-struct ice_device_id_entry {
- union ice_device_id device;
- union ice_device_id sub_device;
-};
-
-struct ice_seg {
- struct ice_generic_seg_hdr hdr;
- __le32 device_table_count;
- struct ice_device_id_entry device_table[];
-};
-
-struct ice_nvm_table {
- __le32 table_count;
- __le32 vers[];
-};
-
-struct ice_buf {
-#define ICE_PKG_BUF_SIZE 4096
- u8 buf[ICE_PKG_BUF_SIZE];
-};
-
-struct ice_buf_table {
- __le32 buf_count;
- struct ice_buf buf_array[];
-};
-
-/* global metadata specific segment */
-struct ice_global_metadata_seg {
- struct ice_generic_seg_hdr hdr;
- struct ice_pkg_ver pkg_ver;
- __le32 rsvd;
- char pkg_name[ICE_PKG_NAME_SIZE];
-};
-
-#define ICE_MIN_S_OFF 12
-#define ICE_MAX_S_OFF 4095
-#define ICE_MIN_S_SZ 1
-#define ICE_MAX_S_SZ 4084
-
-/* section information */
-struct ice_section_entry {
- __le32 type;
- __le16 offset;
- __le16 size;
-};
-
-#define ICE_MIN_S_COUNT 1
-#define ICE_MAX_S_COUNT 511
-#define ICE_MIN_S_DATA_END 12
-#define ICE_MAX_S_DATA_END 4096
-
-#define ICE_METADATA_BUF 0x80000000
-
-struct ice_buf_hdr {
- __le16 section_count;
- __le16 data_end;
- struct ice_section_entry section_entry[];
-};
-
-#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
- struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
- (ent_sz))
-
-/* ice package section IDs */
-#define ICE_SID_METADATA 1
-#define ICE_SID_XLT0_SW 10
-#define ICE_SID_XLT_KEY_BUILDER_SW 11
-#define ICE_SID_XLT1_SW 12
-#define ICE_SID_XLT2_SW 13
-#define ICE_SID_PROFID_TCAM_SW 14
-#define ICE_SID_PROFID_REDIR_SW 15
-#define ICE_SID_FLD_VEC_SW 16
-#define ICE_SID_CDID_KEY_BUILDER_SW 17
-
-struct ice_meta_sect {
- struct ice_pkg_ver ver;
-#define ICE_META_SECT_NAME_SIZE 28
- char name[ICE_META_SECT_NAME_SIZE];
- __le32 track_id;
-};
-
-#define ICE_SID_CDID_REDIR_SW 18
-
-#define ICE_SID_XLT0_ACL 20
-#define ICE_SID_XLT_KEY_BUILDER_ACL 21
-#define ICE_SID_XLT1_ACL 22
-#define ICE_SID_XLT2_ACL 23
-#define ICE_SID_PROFID_TCAM_ACL 24
-#define ICE_SID_PROFID_REDIR_ACL 25
-#define ICE_SID_FLD_VEC_ACL 26
-#define ICE_SID_CDID_KEY_BUILDER_ACL 27
-#define ICE_SID_CDID_REDIR_ACL 28
-
-#define ICE_SID_XLT0_FD 30
-#define ICE_SID_XLT_KEY_BUILDER_FD 31
-#define ICE_SID_XLT1_FD 32
-#define ICE_SID_XLT2_FD 33
-#define ICE_SID_PROFID_TCAM_FD 34
-#define ICE_SID_PROFID_REDIR_FD 35
-#define ICE_SID_FLD_VEC_FD 36
-#define ICE_SID_CDID_KEY_BUILDER_FD 37
-#define ICE_SID_CDID_REDIR_FD 38
-
-#define ICE_SID_XLT0_RSS 40
-#define ICE_SID_XLT_KEY_BUILDER_RSS 41
-#define ICE_SID_XLT1_RSS 42
-#define ICE_SID_XLT2_RSS 43
-#define ICE_SID_PROFID_TCAM_RSS 44
-#define ICE_SID_PROFID_REDIR_RSS 45
-#define ICE_SID_FLD_VEC_RSS 46
-#define ICE_SID_CDID_KEY_BUILDER_RSS 47
-#define ICE_SID_CDID_REDIR_RSS 48
-
-#define ICE_SID_RXPARSER_MARKER_PTYPE 55
-#define ICE_SID_RXPARSER_BOOST_TCAM 56
-#define ICE_SID_RXPARSER_METADATA_INIT 58
-#define ICE_SID_TXPARSER_BOOST_TCAM 66
-
-#define ICE_SID_XLT0_PE 80
-#define ICE_SID_XLT_KEY_BUILDER_PE 81
-#define ICE_SID_XLT1_PE 82
-#define ICE_SID_XLT2_PE 83
-#define ICE_SID_PROFID_TCAM_PE 84
-#define ICE_SID_PROFID_REDIR_PE 85
-#define ICE_SID_FLD_VEC_PE 86
-#define ICE_SID_CDID_KEY_BUILDER_PE 87
-#define ICE_SID_CDID_REDIR_PE 88
-
-/* Label Metadata section IDs */
-#define ICE_SID_LBL_FIRST 0x80000010
-#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
-/* The following define MUST be updated to reflect the last label section ID */
-#define ICE_SID_LBL_LAST 0x80000038
-
-enum ice_block {
- ICE_BLK_SW = 0,
- ICE_BLK_ACL,
- ICE_BLK_FD,
- ICE_BLK_RSS,
- ICE_BLK_PE,
- ICE_BLK_COUNT
-};
-
-enum ice_sect {
- ICE_XLT0 = 0,
- ICE_XLT_KB,
- ICE_XLT1,
- ICE_XLT2,
- ICE_PROF_TCAM,
- ICE_PROF_REDIR,
- ICE_VEC_TBL,
- ICE_CDID_KB,
- ICE_CDID_REDIR,
- ICE_SECT_COUNT
-};
+#include "ice_ddp.h"
/* Packet Type (PTYPE) values */
#define ICE_PTYPE_MAC_PAY 1
@@ -283,134 +85,6 @@ struct ice_ptype_attributes {
enum ice_ptype_attrib_type attrib;
};
-/* package labels */
-struct ice_label {
- __le16 value;
-#define ICE_PKG_LABEL_SIZE 64
- char name[ICE_PKG_LABEL_SIZE];
-};
-
-struct ice_label_section {
- __le16 count;
- struct ice_label label[];
-};
-
-#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- struct_size((struct ice_label_section *)0, label, 1) - \
- sizeof(struct ice_label), sizeof(struct ice_label))
-
-struct ice_sw_fv_section {
- __le16 count;
- __le16 base_offset;
- struct ice_fv fv[];
-};
-
-struct ice_sw_fv_list_entry {
- struct list_head list_entry;
- u32 profile_id;
- struct ice_fv *fv_ptr;
-};
-
-/* The BOOST TCAM stores the match packet header in reverse order, meaning
- * the fields are reversed; in addition, this means that the normally big endian
- * fields of the packet are now little endian.
- */
-struct ice_boost_key_value {
-#define ICE_BOOST_REMAINING_HV_KEY 15
- u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
- __le16 hv_dst_port_key;
- __le16 hv_src_port_key;
- u8 tcam_search_key;
-} __packed;
-
-struct ice_boost_key {
- struct ice_boost_key_value key;
- struct ice_boost_key_value key2;
-};
-
-/* package Boost TCAM entry */
-struct ice_boost_tcam_entry {
- __le16 addr;
- __le16 reserved;
- /* break up the 40 bytes of key into different fields */
- struct ice_boost_key key;
- u8 boost_hit_index_group;
- /* The following contains bitfields which are not on byte boundaries.
- * These fields are currently unused by driver software.
- */
-#define ICE_BOOST_BIT_FIELDS 43
- u8 bit_fields[ICE_BOOST_BIT_FIELDS];
-};
-
-struct ice_boost_tcam_section {
- __le16 count;
- __le16 reserved;
- struct ice_boost_tcam_entry tcam[];
-};
-
-#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
- sizeof(struct ice_boost_tcam_entry), \
- sizeof(struct ice_boost_tcam_entry))
-
-/* package Marker Ptype TCAM entry */
-struct ice_marker_ptype_tcam_entry {
-#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
- __le16 addr;
- __le16 ptype;
- u8 keys[20];
-};
-
-struct ice_marker_ptype_tcam_section {
- __le16 count;
- __le16 reserved;
- struct ice_marker_ptype_tcam_entry tcam[];
-};
-
-#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
- ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
- sizeof(struct ice_marker_ptype_tcam_entry), \
- sizeof(struct ice_marker_ptype_tcam_entry))
-
-struct ice_xlt1_section {
- __le16 count;
- __le16 offset;
- u8 value[];
-};
-
-struct ice_xlt2_section {
- __le16 count;
- __le16 offset;
- __le16 value[];
-};
-
-struct ice_prof_redir_section {
- __le16 count;
- __le16 offset;
- u8 redir_value[];
-};
-
-/* package buffer building */
-
-struct ice_buf_build {
- struct ice_buf buf;
- u16 reserved_section_table_entries;
-};
-
-struct ice_pkg_enum {
- struct ice_buf_table *buf_table;
- u32 buf_idx;
-
- u32 type;
- struct ice_buf_hdr *buf;
- u32 sect_idx;
- void *sect;
- u32 sect_type;
-
- u32 entry_idx;
- void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
-};
-
/* Tunnel enabling */
enum ice_tunnel_type {
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 40e678cfb507..aff7a141c30d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -208,6 +208,11 @@ static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
void ice_fltr_remove_all(struct ice_vsi *vsi)
{
ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+ /* sync netdev filters if exist */
+ if (vsi->netdev) {
+ __dev_uc_unsync(vsi->netdev, NULL);
+ __dev_mc_unsync(vsi->netdev, NULL);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 43e199b5b513..8dec748bb53a 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -3,15 +3,18 @@
#include "ice.h"
#include "ice_lib.h"
-#include <linux/tty_driver.h>
/**
- * ice_gnss_do_write - Write data to internal GNSS
+ * ice_gnss_do_write - Write data to internal GNSS receiver
* @pf: board private structure
* @buf: command buffer
* @size: command buffer size
*
* Write UBX command data to the GNSS receiver
+ *
+ * Return:
+ * * number of bytes written - success
+ * * negative - error code
*/
static unsigned int
ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
@@ -82,6 +85,12 @@ static void ice_gnss_write_pending(struct kthread_work *work)
write_work);
struct ice_pf *pf = gnss->back;
+ if (!pf)
+ return;
+
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return;
+
if (!list_empty(&gnss->queue)) {
struct gnss_write_buf *write_buf = NULL;
unsigned int bytes;
@@ -102,16 +111,14 @@ static void ice_gnss_write_pending(struct kthread_work *work)
* ice_gnss_read - Read data from internal GNSS module
* @work: GNSS read work structure
*
- * Read the data from internal GNSS receiver, number of bytes read will be
- * returned in *read_data parameter.
+ * Read the data from internal GNSS receiver, write it to gnss_dev.
*/
static void ice_gnss_read(struct kthread_work *work)
{
struct gnss_serial *gnss = container_of(work, struct gnss_serial,
read_work.work);
+ unsigned int i, bytes_read, data_len, count;
struct ice_aqc_link_topo_addr link_topo;
- unsigned int i, bytes_read, data_len;
- struct tty_port *port;
struct ice_pf *pf;
struct ice_hw *hw;
__be16 data_len_b;
@@ -120,14 +127,15 @@ static void ice_gnss_read(struct kthread_work *work)
int err = 0;
pf = gnss->back;
- if (!pf || !gnss->tty || !gnss->tty->port) {
+ if (!pf) {
err = -EFAULT;
goto exit;
}
- hw = &pf->hw;
- port = gnss->tty->port;
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return;
+ hw = &pf->hw;
buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
@@ -159,7 +167,6 @@ static void ice_gnss_read(struct kthread_work *work)
}
data_len = min_t(typeof(data_len), data_len, PAGE_SIZE);
- data_len = tty_buffer_request_room(port, data_len);
if (!data_len) {
err = -ENOMEM;
goto exit_buf;
@@ -179,12 +186,11 @@ static void ice_gnss_read(struct kthread_work *work)
goto exit_buf;
}
- /* Send the data to the tty layer for users to read. This doesn't
- * actually push the data through unless tty->low_latency is set.
- */
- tty_insert_flip_string(port, buf, i);
- tty_flip_buffer_push(port);
-
+ count = gnss_insert_raw(pf->gnss_dev, buf, i);
+ if (count != i)
+ dev_warn(ice_pf_to_dev(pf),
+ "gnss_insert_raw ret=%d size=%d\n",
+ count, i);
exit_buf:
free_page((unsigned long)buf);
kthread_queue_delayed_work(gnss->kworker, &gnss->read_work,
@@ -195,11 +201,16 @@ exit:
}
/**
- * ice_gnss_struct_init - Initialize GNSS structure for the TTY
+ * ice_gnss_struct_init - Initialize GNSS receiver
* @pf: Board private structure
- * @index: TTY device index
+ *
+ * Initialize GNSS structures and workers.
+ *
+ * Return:
+ * * pointer to initialized gnss_serial struct - success
+ * * NULL - error
*/
-static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
+static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct kthread_worker *kworker;
@@ -209,17 +220,12 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
if (!gnss)
return NULL;
- mutex_init(&gnss->gnss_mutex);
- gnss->open_count = 0;
gnss->back = pf;
- pf->gnss_serial[index] = gnss;
+ pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
INIT_LIST_HEAD(&gnss->queue);
kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
- /* Allocate a kworker for handling work required for the GNSS TTY
- * writes.
- */
kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
@@ -232,140 +238,100 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
}
/**
- * ice_gnss_tty_open - Initialize GNSS structures on TTY device open
- * @tty: pointer to the tty_struct
- * @filp: pointer to the file
+ * ice_gnss_open - Open GNSS device
+ * @gdev: pointer to the gnss device struct
+ *
+ * Open GNSS device and start filling the read buffer for consumer.
*
- * This routine is mandatory. If this routine is not filled in, the attempted
- * open will fail with ENODEV.
+ * Return:
+ * * 0 - success
+ * * negative - error code
*/
-static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp)
+static int ice_gnss_open(struct gnss_device *gdev)
{
+ struct ice_pf *pf = gnss_get_drvdata(gdev);
struct gnss_serial *gnss;
- struct ice_pf *pf;
- pf = (struct ice_pf *)tty->driver->driver_state;
if (!pf)
return -EFAULT;
- /* Clear the pointer in case something fails */
- tty->driver_data = NULL;
-
- /* Get the serial object associated with this tty pointer */
- gnss = pf->gnss_serial[tty->index];
- if (!gnss) {
- /* Initialize GNSS struct on the first device open */
- gnss = ice_gnss_struct_init(pf, tty->index);
- if (!gnss)
- return -ENOMEM;
- }
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return -EFAULT;
- mutex_lock(&gnss->gnss_mutex);
+ gnss = pf->gnss_serial;
+ if (!gnss)
+ return -ENODEV;
- /* Save our structure within the tty structure */
- tty->driver_data = gnss;
- gnss->tty = tty;
- gnss->open_count++;
kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0);
- mutex_unlock(&gnss->gnss_mutex);
-
return 0;
}
/**
- * ice_gnss_tty_close - Cleanup GNSS structures on tty device close
- * @tty: pointer to the tty_struct
- * @filp: pointer to the file
+ * ice_gnss_close - Close GNSS device
+ * @gdev: pointer to the gnss device struct
+ *
+ * Close GNSS device, cancel worker, stop filling the read buffer.
*/
-static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp)
+static void ice_gnss_close(struct gnss_device *gdev)
{
- struct gnss_serial *gnss = tty->driver_data;
- struct ice_pf *pf;
-
- if (!gnss)
- return;
+ struct ice_pf *pf = gnss_get_drvdata(gdev);
+ struct gnss_serial *gnss;
- pf = (struct ice_pf *)tty->driver->driver_state;
if (!pf)
return;
- mutex_lock(&gnss->gnss_mutex);
-
- if (!gnss->open_count) {
- /* Port was never opened */
- dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n");
- goto exit;
- }
+ gnss = pf->gnss_serial;
+ if (!gnss)
+ return;
- gnss->open_count--;
- if (gnss->open_count <= 0) {
- /* Port is in shutdown state */
- kthread_cancel_delayed_work_sync(&gnss->read_work);
- }
-exit:
- mutex_unlock(&gnss->gnss_mutex);
+ kthread_cancel_work_sync(&gnss->write_work);
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
}
/**
- * ice_gnss_tty_write - Write GNSS data
- * @tty: pointer to the tty_struct
+ * ice_gnss_write - Write to GNSS device
+ * @gdev: pointer to the gnss device struct
* @buf: pointer to the user data
- * @count: the number of characters queued to be sent to the HW
+ * @count: size of the buffer to be sent to the GNSS device
*
- * The write function call is called by the user when there is data to be sent
- * to the hardware. First the tty core receives the call, and then it passes the
- * data on to the tty driver's write function. The tty core also tells the tty
- * driver the size of the data being sent.
- * If any errors happen during the write call, a negative error value should be
- * returned instead of the number of characters queued to be written.
+ * Return:
+ * * number of written bytes - success
+ * * negative - error code
*/
static int
-ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
{
+ struct ice_pf *pf = gnss_get_drvdata(gdev);
struct gnss_write_buf *write_buf;
struct gnss_serial *gnss;
unsigned char *cmd_buf;
- struct ice_pf *pf;
int err = count;
/* We cannot write a single byte using our I2C implementation. */
if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
return -EINVAL;
- gnss = tty->driver_data;
- if (!gnss)
- return -EFAULT;
-
- pf = (struct ice_pf *)tty->driver->driver_state;
if (!pf)
return -EFAULT;
- /* Only allow to write on TTY 0 */
- if (gnss != pf->gnss_serial[0])
- return -EIO;
-
- mutex_lock(&gnss->gnss_mutex);
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return -EFAULT;
- if (!gnss->open_count) {
- err = -EINVAL;
- goto exit;
- }
+ gnss = pf->gnss_serial;
+ if (!gnss)
+ return -ENODEV;
cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
- if (!cmd_buf) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!cmd_buf)
+ return -ENOMEM;
memcpy(cmd_buf, buf, count);
-
- /* Send the data out to a hardware port */
write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
if (!write_buf) {
kfree(cmd_buf);
- err = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
write_buf->buf = cmd_buf;
@@ -373,141 +339,89 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
INIT_LIST_HEAD(&write_buf->queue);
list_add_tail(&write_buf->queue, &gnss->queue);
kthread_queue_work(gnss->kworker, &gnss->write_work);
-exit:
- mutex_unlock(&gnss->gnss_mutex);
+
return err;
}
+static const struct gnss_operations ice_gnss_ops = {
+ .open = ice_gnss_open,
+ .close = ice_gnss_close,
+ .write_raw = ice_gnss_write,
+};
+
/**
- * ice_gnss_tty_write_room - Returns the numbers of characters to be written.
- * @tty: pointer to the tty_struct
+ * ice_gnss_register - Register GNSS receiver
+ * @pf: Board private structure
+ *
+ * Allocate and register GNSS receiver in the Linux GNSS subsystem.
*
- * This routine returns the numbers of characters the tty driver will accept
- * for queuing to be written or 0 if either the TTY is not open or user
- * tries to write to the TTY other than the first.
+ * Return:
+ * * 0 - success
+ * * negative - error code
*/
-static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty)
+static int ice_gnss_register(struct ice_pf *pf)
{
- struct gnss_serial *gnss = tty->driver_data;
-
- /* Only allow to write on TTY 0 */
- if (!gnss || gnss != gnss->back->gnss_serial[0])
- return 0;
-
- mutex_lock(&gnss->gnss_mutex);
+ struct gnss_device *gdev;
+ int ret;
+
+ gdev = gnss_allocate_device(ice_pf_to_dev(pf));
+ if (!gdev) {
+ dev_err(ice_pf_to_dev(pf),
+ "gnss_allocate_device returns NULL\n");
+ return -ENOMEM;
+ }
- if (!gnss->open_count) {
- mutex_unlock(&gnss->gnss_mutex);
- return 0;
+ gdev->ops = &ice_gnss_ops;
+ gdev->type = GNSS_TYPE_UBX;
+ gnss_set_drvdata(gdev, pf);
+ ret = gnss_register_device(gdev);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf), "gnss_register_device err=%d\n",
+ ret);
+ gnss_put_device(gdev);
+ } else {
+ pf->gnss_dev = gdev;
}
- mutex_unlock(&gnss->gnss_mutex);
- return ICE_GNSS_TTY_WRITE_BUF;
+ return ret;
}
-static const struct tty_operations tty_gps_ops = {
- .open = ice_gnss_tty_open,
- .close = ice_gnss_tty_close,
- .write = ice_gnss_tty_write,
- .write_room = ice_gnss_tty_write_room,
-};
-
/**
- * ice_gnss_create_tty_driver - Create a TTY driver for GNSS
+ * ice_gnss_deregister - Deregister GNSS receiver
* @pf: Board private structure
+ *
+ * Deregister GNSS receiver from the Linux GNSS subsystem,
+ * release its resources.
*/
-static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
+static void ice_gnss_deregister(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- const int ICE_TTYDRV_NAME_MAX = 14;
- struct tty_driver *tty_driver;
- char *ttydrv_name;
- unsigned int i;
- int err;
-
- tty_driver = tty_alloc_driver(ICE_GNSS_TTY_MINOR_DEVICES,
- TTY_DRIVER_REAL_RAW);
- if (IS_ERR(tty_driver)) {
- dev_err(dev, "Failed to allocate memory for GNSS TTY\n");
- return NULL;
- }
-
- ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL);
- if (!ttydrv_name) {
- tty_driver_kref_put(tty_driver);
- return NULL;
+ if (pf->gnss_dev) {
+ gnss_deregister_device(pf->gnss_dev);
+ gnss_put_device(pf->gnss_dev);
+ pf->gnss_dev = NULL;
}
-
- snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x_",
- (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn));
-
- /* Initialize the tty driver*/
- tty_driver->owner = THIS_MODULE;
- tty_driver->driver_name = dev_driver_string(dev);
- tty_driver->name = (const char *)ttydrv_name;
- tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- tty_driver->subtype = SERIAL_TYPE_NORMAL;
- tty_driver->init_termios = tty_std_termios;
- tty_driver->init_termios.c_iflag &= ~INLCR;
- tty_driver->init_termios.c_iflag |= IGNCR;
- tty_driver->init_termios.c_oflag &= ~OPOST;
- tty_driver->init_termios.c_lflag &= ~ICANON;
- tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX);
- /* baud rate 9600 */
- tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600);
- tty_driver->driver_state = pf;
- tty_set_operations(tty_driver, &tty_gps_ops);
-
- for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
- pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
- GFP_KERNEL);
- if (!pf->gnss_tty_port[i])
- goto err_out;
-
- pf->gnss_serial[i] = NULL;
-
- tty_port_init(pf->gnss_tty_port[i]);
- tty_port_link_device(pf->gnss_tty_port[i], tty_driver, i);
- }
-
- err = tty_register_driver(tty_driver);
- if (err) {
- dev_err(dev, "Failed to register TTY driver err=%d\n", err);
- goto err_out;
- }
-
- for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
- dev_info(dev, "%s%d registered\n", ttydrv_name, i);
-
- return tty_driver;
-
-err_out:
- while (i--) {
- tty_port_destroy(pf->gnss_tty_port[i]);
- kfree(pf->gnss_tty_port[i]);
- }
- kfree(ttydrv_name);
- tty_driver_kref_put(pf->ice_gnss_tty_driver);
-
- return NULL;
}
/**
- * ice_gnss_init - Initialize GNSS TTY support
+ * ice_gnss_init - Initialize GNSS support
* @pf: Board private structure
*/
void ice_gnss_init(struct ice_pf *pf)
{
- struct tty_driver *tty_driver;
+ int ret;
- tty_driver = ice_gnss_create_tty_driver(pf);
- if (!tty_driver)
+ pf->gnss_serial = ice_gnss_struct_init(pf);
+ if (!pf->gnss_serial)
return;
- pf->ice_gnss_tty_driver = tty_driver;
-
- set_bit(ICE_FLAG_GNSS, pf->flags);
- dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n");
+ ret = ice_gnss_register(pf);
+ if (!ret) {
+ set_bit(ICE_FLAG_GNSS, pf->flags);
+ dev_info(ice_pf_to_dev(pf), "GNSS init successful\n");
+ } else {
+ ice_gnss_exit(pf);
+ dev_err(ice_pf_to_dev(pf), "GNSS init failure\n");
+ }
}
/**
@@ -516,31 +430,20 @@ void ice_gnss_init(struct ice_pf *pf)
*/
void ice_gnss_exit(struct ice_pf *pf)
{
- unsigned int i;
+ ice_gnss_deregister(pf);
+ clear_bit(ICE_FLAG_GNSS, pf->flags);
- if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver)
- return;
-
- for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
- if (pf->gnss_tty_port[i]) {
- tty_port_destroy(pf->gnss_tty_port[i]);
- kfree(pf->gnss_tty_port[i]);
- }
+ if (pf->gnss_serial) {
+ struct gnss_serial *gnss = pf->gnss_serial;
- if (pf->gnss_serial[i]) {
- struct gnss_serial *gnss = pf->gnss_serial[i];
+ kthread_cancel_work_sync(&gnss->write_work);
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ kthread_destroy_worker(gnss->kworker);
+ gnss->kworker = NULL;
- kthread_cancel_work_sync(&gnss->write_work);
- kthread_cancel_delayed_work_sync(&gnss->read_work);
- kfree(gnss);
- pf->gnss_serial[i] = NULL;
- }
+ kfree(gnss);
+ pf->gnss_serial = NULL;
}
-
- tty_unregister_driver(pf->ice_gnss_tty_driver);
- kfree(pf->ice_gnss_tty_driver->name);
- tty_driver_kref_put(pf->ice_gnss_tty_driver);
- pf->ice_gnss_tty_driver = NULL;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index f454dd1d9285..31db0701d13f 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -4,15 +4,8 @@
#ifndef _ICE_GNSS_H_
#define _ICE_GNSS_H_
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-
#define ICE_E810T_GNSS_I2C_BUS 0x2
#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
-/* Create 2 minor devices, both using the same GNSS module. First one is RW,
- * second one RO.
- */
-#define ICE_GNSS_TTY_MINOR_DEVICES 2
#define ICE_GNSS_TTY_WRITE_BUF 250
#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
#define ICE_MAX_I2C_WRITE_BYTES 4
@@ -36,13 +29,9 @@ struct gnss_write_buf {
unsigned char *buf;
};
-
/**
* struct gnss_serial - data used to initialize GNSS TTY port
* @back: back pointer to PF
- * @tty: pointer to the tty for this device
- * @open_count: number of times this port has been opened
- * @gnss_mutex: gnss_mutex used to protect GNSS serial operations
* @kworker: kwork thread for handling periodic work
* @read_work: read_work function for handling GNSS reads
* @write_work: write_work function for handling GNSS writes
@@ -50,16 +39,13 @@ struct gnss_write_buf {
*/
struct gnss_serial {
struct ice_pf *back;
- struct tty_struct *tty;
- int open_count;
- struct mutex gnss_mutex; /* protects GNSS serial structure */
struct kthread_worker *kworker;
struct kthread_delayed_work read_work;
struct kthread_work write_work;
struct list_head queue;
};
-#if IS_ENABLED(CONFIG_TTY)
+#if IS_ENABLED(CONFIG_ICE_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
bool ice_gnss_is_gps_present(struct ice_hw *hw);
@@ -70,5 +56,5 @@ static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
{
return false;
}
-#endif /* IS_ENABLED(CONFIG_TTY) */
+#endif /* IS_ENABLED(CONFIG_ICE_GNSS) */
#endif /* _ICE_GNSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 895c32bcc8b5..e6bc2285071e 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -6,6 +6,8 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+static DEFINE_XARRAY_ALLOC1(ice_aux_id);
+
/**
* ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
* @pf: pointer to PF struct
@@ -246,6 +248,17 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf)
}
/**
+ * ice_free_rdma_qvector - free vector resources reserved for RDMA driver
+ * @pf: board private structure to initialize
+ */
+static void ice_free_rdma_qvector(struct ice_pf *pf)
+{
+ pf->num_avail_sw_msix -= pf->num_rdma_msix;
+ ice_free_res(pf->irq_tracker, pf->rdma_base_vector,
+ ICE_RES_RDMA_VEC_ID);
+}
+
+/**
* ice_adev_release - function to be mapped to AUX dev's release op
* @dev: pointer to device to free
*/
@@ -331,12 +344,48 @@ int ice_init_rdma(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev;
int ret;
+ if (!ice_is_rdma_ena(pf)) {
+ dev_warn(dev, "RDMA is not supported on this device\n");
+ return 0;
+ }
+
+ ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
+ GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "Failed to allocate device ID for AUX driver\n");
+ return -ENOMEM;
+ }
+
/* Reserve vector resources */
ret = ice_reserve_rdma_qvector(pf);
if (ret < 0) {
dev_err(dev, "failed to reserve vectors for RDMA\n");
- return ret;
+ goto err_reserve_rdma_qvector;
}
pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
- return ice_plug_aux_dev(pf);
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ goto err_plug_aux_dev;
+ return 0;
+
+err_plug_aux_dev:
+ ice_free_rdma_qvector(pf);
+err_reserve_rdma_qvector:
+ pf->adev = NULL;
+ xa_erase(&ice_aux_id, pf->aux_idx);
+ return ret;
+}
+
+/**
+ * ice_deinit_rdma - deinitialize RDMA on PF
+ * @pf: ptr to ice_pf
+ */
+void ice_deinit_rdma(struct ice_pf *pf)
+{
+ if (!ice_is_rdma_ena(pf))
+ return;
+
+ ice_unplug_aux_dev(pf);
+ ice_free_rdma_qvector(pf);
+ xa_erase(&ice_aux_id, pf->aux_idx);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a596e07b3ce9..781475480ff2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -166,14 +166,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
- * @vf: the VF associated with this VSI, if any
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
{
enum ice_vsi_type vsi_type = vsi->type;
struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = vsi->vf;
if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
return;
@@ -282,10 +282,10 @@ static int ice_get_free_slot(void *array, int size, int curr)
}
/**
- * ice_vsi_delete - delete a VSI from the switch
+ * ice_vsi_delete_from_hw - delete a VSI from the switch
* @vsi: pointer to VSI being removed
*/
-void ice_vsi_delete(struct ice_vsi *vsi)
+static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
@@ -348,47 +348,144 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
}
/**
- * ice_vsi_clear - clean up and deallocate the provided VSI
+ * ice_vsi_free_stats - Free the ring statistics structures
+ * @vsi: VSI pointer
+ */
+static void ice_vsi_free_stats(struct ice_vsi *vsi)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ ice_for_each_alloc_txq(vsi, i) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+ }
+ }
+
+ ice_for_each_alloc_rxq(vsi, i) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+
+ kfree(vsi_stat->tx_ring_stats);
+ kfree(vsi_stat->rx_ring_stats);
+ kfree(vsi_stat);
+ pf->vsi_stats[vsi->idx] = NULL;
+}
+
+/**
+ * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
+ * @vsi: VSI which is having stats allocated
+ */
+static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
+{
+ struct ice_ring_stats **tx_ring_stats;
+ struct ice_ring_stats **rx_ring_stats;
+ struct ice_vsi_stats *vsi_stats;
+ struct ice_pf *pf = vsi->back;
+ u16 i;
+
+ vsi_stats = pf->vsi_stats[vsi->idx];
+ tx_ring_stats = vsi_stats->tx_ring_stats;
+ rx_ring_stats = vsi_stats->rx_ring_stats;
+
+ /* Allocate Tx ring stats */
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_tx_ring *ring;
+
+ ring = vsi->tx_rings[i];
+ ring_stats = tx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(tx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ /* Allocate Rx ring stats */
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_rx_ring *ring;
+
+ ring = vsi->rx_rings[i];
+ ring_stats = rx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(rx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ return 0;
+
+err_out:
+ ice_vsi_free_stats(vsi);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_free - clean up and deallocate the provided VSI
* @vsi: pointer to VSI being cleared
*
* This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
- *
- * Returns 0 on success, negative on failure
*/
-int ice_vsi_clear(struct ice_vsi *vsi)
+static void ice_vsi_free(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
- if (!vsi)
- return 0;
-
- if (!vsi->back)
- return -EINVAL;
+ if (!vsi || !vsi->back)
+ return;
pf = vsi->back;
dev = ice_pf_to_dev(pf);
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
- return -EINVAL;
+ return;
}
mutex_lock(&pf->sw_mutex);
/* updates the PF for this cleared VSI */
pf->vsi[vsi->idx] = NULL;
- if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
- pf->next_vsi = vsi->idx;
- if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf)
- pf->next_vsi = vsi->idx;
+ pf->next_vsi = vsi->idx;
+ ice_vsi_free_stats(vsi);
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
devm_kfree(dev, vsi);
+}
- return 0;
+void ice_vsi_delete(struct ice_vsi *vsi)
+{
+ ice_vsi_delete_from_hw(vsi);
+ ice_vsi_free(vsi);
}
/**
@@ -461,6 +558,10 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
if (!pf->vsi_stats)
return -ENOENT;
+ if (pf->vsi_stats[vsi->idx])
+ /* realloc will happen in rebuild path */
+ return 0;
+
vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
if (!vsi_stat)
return -ENOMEM;
@@ -491,128 +592,93 @@ err_alloc_tx:
}
/**
- * ice_vsi_alloc - Allocates the next available struct VSI in the PF
- * @pf: board private structure
- * @vsi_type: type of VSI
+ * ice_vsi_alloc_def - set default values for already allocated VSI
+ * @vsi: ptr to VSI
* @ch: ptr to channel
- * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL
- *
- * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL,
- * it may be NULL in the case there is no association with a VF. For
- * ICE_VSI_VF the VF pointer *must not* be NULL.
- *
- * returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
- struct ice_channel *ch, struct ice_vf *vf)
+static int
+ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_vsi *vsi = NULL;
-
- if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
- return NULL;
-
- /* Need to protect the allocation of the VSIs at the PF level */
- mutex_lock(&pf->sw_mutex);
-
- /* If we have already allocated our maximum number of VSIs,
- * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
- * is available to be populated
- */
- if (pf->next_vsi == ICE_NO_VSI) {
- dev_dbg(dev, "out of VSI slots!\n");
- goto unlock_pf;
+ if (vsi->type != ICE_VSI_CHNL) {
+ ice_vsi_set_num_qs(vsi);
+ if (ice_vsi_alloc_arrays(vsi))
+ return -ENOMEM;
}
- vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
- if (!vsi)
- goto unlock_pf;
-
- vsi->type = vsi_type;
- vsi->back = pf;
- set_bit(ICE_VSI_DOWN, vsi->state);
-
- if (vsi_type == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf);
- else if (vsi_type != ICE_VSI_CHNL)
- ice_vsi_set_num_qs(vsi, NULL);
-
switch (vsi->type) {
case ICE_VSI_SWITCHDEV_CTRL:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup eswitch MSIX irq handler for VSI */
vsi->irq_handler = ice_eswitch_msix_clean_rings;
break;
case ICE_VSI_PF:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
case ICE_VSI_CTRL:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi;
-
- /* For the PF control VSI this is NULL, for the VF control VSI
- * this will be the first VF to allocate it.
- */
- vsi->vf = vf;
- break;
- case ICE_VSI_VF:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
- vsi->vf = vf;
break;
case ICE_VSI_CHNL:
if (!ch)
- goto err_rings;
+ return -EINVAL;
+
vsi->num_rxq = ch->num_rxq;
vsi->num_txq = ch->num_txq;
vsi->next_base_q = ch->base_q;
break;
+ case ICE_VSI_VF:
case ICE_VSI_LB:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
break;
default:
- dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
- goto unlock_pf;
+ ice_vsi_free_arrays(vsi);
+ return -EINVAL;
}
- if (vsi->type == ICE_VSI_CTRL && !vf) {
- /* Use the last VSI slot as the index for PF control VSI */
- vsi->idx = pf->num_alloc_vsi - 1;
- pf->ctrl_vsi_idx = vsi->idx;
- pf->vsi[vsi->idx] = vsi;
- } else {
- /* fill slot and make note of the index */
- vsi->idx = pf->next_vsi;
- pf->vsi[pf->next_vsi] = vsi;
+ return 0;
+}
+
+/**
+ * ice_vsi_alloc - Allocates the next available struct VSI in the PF
+ * @pf: board private structure
+ *
+ * Reserves a VSI index from the PF and allocates an empty VSI structure
+ * without a type. The VSI structure must later be initialized by calling
+ * ice_vsi_cfg().
+ *
+ * returns a pointer to a VSI on success, NULL on failure.
+ */
+static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi = NULL;
- /* prepare pf->next_vsi for next use */
- pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
- pf->next_vsi);
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->sw_mutex);
+
+ /* If we have already allocated our maximum number of VSIs,
+ * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
+ * is available to be populated
+ */
+ if (pf->next_vsi == ICE_NO_VSI) {
+ dev_dbg(dev, "out of VSI slots!\n");
+ goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL && vf)
- vf->ctrl_vsi_idx = vsi->idx;
+ vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
+ if (!vsi)
+ goto unlock_pf;
- /* allocate memory for Tx/Rx ring stat pointers */
- if (ice_vsi_alloc_stat_arrays(vsi))
- goto err_rings;
+ vsi->back = pf;
+ set_bit(ICE_VSI_DOWN, vsi->state);
- goto unlock_pf;
+ /* fill slot and make note of the index */
+ vsi->idx = pf->next_vsi;
+ pf->vsi[pf->next_vsi] = vsi;
+
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
-err_rings:
- devm_kfree(dev, vsi);
- vsi = NULL;
unlock_pf:
mutex_unlock(&pf->sw_mutex);
return vsi;
@@ -1177,12 +1243,15 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
- * @init_vsi: is this call creating a VSI
+ * @vsi_flags: VSI configuration flags
+ *
+ * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
+ * reconfigure an existing context.
*
* This initializes a VSI context depending on the VSI type to be added and
* passes it down to the add_vsi aq command to create a new VSI.
*/
-static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
+static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -1244,7 +1313,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
/* if updating VSI context, make sure to set valid_section:
* to indicate which section of VSI context being updated
*/
- if (!init_vsi)
+ if (!(vsi_flags & ICE_VSI_FLAG_INIT))
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
}
@@ -1257,7 +1326,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (ret)
goto out;
- if (!init_vsi) /* means VSI being updated */
+ if (!(vsi_flags & ICE_VSI_FLAG_INIT))
+ /* means VSI being updated */
/* must to indicate which section of VSI context are
* being modified
*/
@@ -1272,7 +1342,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
}
- if (init_vsi) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(dev, "Add VSI failed, err %d\n", ret);
@@ -1436,7 +1506,7 @@ static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
- * This should only be called after ice_vsi_alloc() which allocates the
+ * This should only be called after ice_vsi_alloc_def() which allocates the
* corresponding SW VSI structure and initializes num_queue_pairs for the
* newly allocated VSI.
*
@@ -1584,106 +1654,6 @@ err_out:
}
/**
- * ice_vsi_free_stats - Free the ring statistics structures
- * @vsi: VSI pointer
- */
-static void ice_vsi_free_stats(struct ice_vsi *vsi)
-{
- struct ice_vsi_stats *vsi_stat;
- struct ice_pf *pf = vsi->back;
- int i;
-
- if (vsi->type == ICE_VSI_CHNL)
- return;
- if (!pf->vsi_stats)
- return;
-
- vsi_stat = pf->vsi_stats[vsi->idx];
- if (!vsi_stat)
- return;
-
- ice_for_each_alloc_txq(vsi, i) {
- if (vsi_stat->tx_ring_stats[i]) {
- kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
- WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
- }
- }
-
- ice_for_each_alloc_rxq(vsi, i) {
- if (vsi_stat->rx_ring_stats[i]) {
- kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
- WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
- }
- }
-
- kfree(vsi_stat->tx_ring_stats);
- kfree(vsi_stat->rx_ring_stats);
- kfree(vsi_stat);
- pf->vsi_stats[vsi->idx] = NULL;
-}
-
-/**
- * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
- * @vsi: VSI which is having stats allocated
- */
-static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
-{
- struct ice_ring_stats **tx_ring_stats;
- struct ice_ring_stats **rx_ring_stats;
- struct ice_vsi_stats *vsi_stats;
- struct ice_pf *pf = vsi->back;
- u16 i;
-
- vsi_stats = pf->vsi_stats[vsi->idx];
- tx_ring_stats = vsi_stats->tx_ring_stats;
- rx_ring_stats = vsi_stats->rx_ring_stats;
-
- /* Allocate Tx ring stats */
- ice_for_each_alloc_txq(vsi, i) {
- struct ice_ring_stats *ring_stats;
- struct ice_tx_ring *ring;
-
- ring = vsi->tx_rings[i];
- ring_stats = tx_ring_stats[i];
-
- if (!ring_stats) {
- ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
- if (!ring_stats)
- goto err_out;
-
- WRITE_ONCE(tx_ring_stats[i], ring_stats);
- }
-
- ring->ring_stats = ring_stats;
- }
-
- /* Allocate Rx ring stats */
- ice_for_each_alloc_rxq(vsi, i) {
- struct ice_ring_stats *ring_stats;
- struct ice_rx_ring *ring;
-
- ring = vsi->rx_rings[i];
- ring_stats = rx_ring_stats[i];
-
- if (!ring_stats) {
- ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
- if (!ring_stats)
- goto err_out;
-
- WRITE_ONCE(rx_ring_stats[i], ring_stats);
- }
-
- ring->ring_stats = ring_stats;
- }
-
- return 0;
-
-err_out:
- ice_vsi_free_stats(vsi);
- return -ENOMEM;
-}
-
-/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
@@ -1992,8 +1962,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_2048;
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
@@ -2002,11 +1972,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
-#if (PAGE_SIZE < 8192)
vsi->rx_buf_len = ICE_RXBUF_3072;
-#else
- vsi->rx_buf_len = ICE_RXBUF_2048;
-#endif
}
}
@@ -2645,54 +2611,97 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
}
/**
- * ice_vsi_setup - Set up a VSI by a given type
- * @pf: board private structure
- * @pi: pointer to the port_info instance
- * @vsi_type: VSI type
- * @vf: pointer to VF to which this VSI connects. This field is used primarily
- * for the ICE_VSI_VF type. Other VSI types should pass NULL.
- * @ch: ptr to channel
- *
- * This allocates the sw VSI structure and its queue resources.
+ * ice_free_vf_ctrl_res - Free the VF control VSI resource
+ * @pf: pointer to PF structure
+ * @vsi: the VSI to free resources for
*
- * Returns pointer to the successfully allocated and configured VSI sw struct on
- * success, NULL on failure.
+ * Check if the VF control VSI resource is still in use. If no VF is using it
+ * any more, release the VSI resource. Otherwise, leave it to be cleaned up
+ * once no other VF uses it.
*/
-struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, struct ice_vf *vf,
- struct ice_channel *ch)
+static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+
+ /* No other VFs left that have control VSI. It is now safe to reclaim
+ * SW interrupts back to the common pool.
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+}
+
+static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
- struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_CHNL)
- vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL);
- else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf);
- else
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL);
+ /* configure VSI nodes based on number of queues and TC's */
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i)))
+ continue;
- if (!vsi) {
- dev_err(dev, "could not allocate VSI\n");
- return NULL;
+ if (vsi->type == ICE_VSI_CHNL) {
+ if (!vsi->alloc_txq && vsi->num_txq)
+ max_txqs[i] = vsi->num_txq;
+ else
+ max_txqs[i] = pf->num_lan_tx;
+ } else {
+ max_txqs[i] = vsi->alloc_txq;
+ }
}
- vsi->port_info = pi;
+ dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_err(dev, "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_cfg_def - configure default VSI based on the type
+ * @vsi: pointer to VSI
+ * @params: the parameters to configure this VSI with
+ */
+static int
+ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_pf *pf = vsi->back;
+ int ret;
+
vsi->vsw = pf->first_sw;
- if (vsi->type == ICE_VSI_PF)
- vsi->ethtype = ETH_P_PAUSE;
+
+ ret = ice_vsi_alloc_def(vsi, params->ch);
+ if (ret)
+ return ret;
+
+ /* allocate memory for Tx/Rx ring stat pointers */
+ if (ice_vsi_alloc_stat_arrays(vsi))
+ goto unroll_vsi_alloc;
ice_alloc_fd_res(vsi);
- if (vsi_type != ICE_VSI_CHNL) {
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto unroll_vsi_alloc;
- }
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_vsi_alloc_stat;
}
/* set RSS capabilities */
@@ -2702,7 +2711,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi, true);
+ ret = ice_vsi_init(vsi, params->flags);
if (ret)
goto unroll_get_qs;
@@ -2733,6 +2742,14 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ ret = ice_vsi_determine_xdp_res(vsi);
+ if (ret)
+ goto unroll_vector_base;
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ if (ret)
+ goto unroll_vector_base;
+ }
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
@@ -2797,30 +2814,156 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vsi_init;
}
- /* configure VSI nodes based on number of queues and TC's */
- ice_for_each_traffic_class(i) {
- if (!(vsi->tc_cfg.ena_tc & BIT(i)))
- continue;
+ return 0;
- if (vsi->type == ICE_VSI_CHNL) {
- if (!vsi->alloc_txq && vsi->num_txq)
- max_txqs[i] = vsi->num_txq;
- else
- max_txqs[i] = pf->num_lan_tx;
+unroll_vector_base:
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+unroll_alloc_q_vector:
+ ice_vsi_free_q_vectors(vsi);
+unroll_vsi_init:
+ ice_vsi_delete_from_hw(vsi);
+unroll_get_qs:
+ ice_vsi_put_qs(vsi);
+unroll_vsi_alloc_stat:
+ ice_vsi_free_stats(vsi);
+unroll_vsi_alloc:
+ ice_vsi_free_arrays(vsi);
+ return ret;
+}
+
+/**
+ * ice_vsi_cfg - configure a previously allocated VSI
+ * @vsi: pointer to VSI
+ * @params: parameters used to configure this VSI
+ */
+int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+{
+ struct ice_pf *pf = vsi->back;
+ int ret;
+
+ if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
+ return -EINVAL;
+
+ vsi->type = params->type;
+ vsi->port_info = params->pi;
+
+ /* For VSIs which don't have a connected VF, this will be NULL */
+ vsi->vf = params->vf;
+
+ ret = ice_vsi_cfg_def(vsi, params);
+ if (ret)
+ return ret;
+
+ ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
+ if (ret)
+ ice_vsi_decfg(vsi);
+
+ if (vsi->type == ICE_VSI_CTRL) {
+ if (vsi->vf) {
+ WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
+ vsi->vf->ctrl_vsi_idx = vsi->idx;
} else {
- max_txqs[i] = vsi->alloc_txq;
+ WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
+ pf->ctrl_vsi_idx = vsi->idx;
}
}
- dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (ret) {
- dev_err(dev, "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
- goto unroll_clear_rings;
+ return ret;
+}
+
+/**
+ * ice_vsi_decfg - remove all VSI configuration
+ * @vsi: pointer to VSI
+ */
+void ice_vsi_decfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+
+ ice_fltr_remove_all(vsi);
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, err);
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ /* return value check can be skipped here, it always returns
+ * 0 if reset is in progress
+ */
+ ice_destroy_xdp_rings(vsi);
+
+ ice_vsi_clear_rings(vsi);
+ ice_vsi_free_q_vectors(vsi);
+ ice_vsi_put_qs(vsi);
+ ice_vsi_free_arrays(vsi);
+
+ /* SR-IOV determines needed MSIX resources all at once instead of per
+ * VSI since when VFs are spawned we know how many VFs there are and how
+ * many interrupts each VF needs. SR-IOV MSIX resources are also
+ * cleared in the same manner.
+ */
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ ice_free_vf_ctrl_res(pf, vsi);
+ } else if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ vsi->base_vector = 0;
}
+ if (vsi->type == ICE_VSI_VF &&
+ vsi->agg_node && vsi->agg_node->valid)
+ vsi->agg_node->num_vsis--;
+ if (vsi->agg_node) {
+ vsi->agg_node->valid = false;
+ vsi->agg_node->agg_id = 0;
+ }
+}
+
+/**
+ * ice_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @params: parameters to use when creating the VSI
+ *
+ * This allocates the sw VSI structure and its queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct on
+ * success, NULL on failure.
+ */
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi;
+ int ret;
+
+ /* ice_vsi_setup can only initialize a new VSI, and we must have
+ * a port_info structure for it.
+ */
+ if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
+ WARN_ON(!params->pi))
+ return NULL;
+
+ vsi = ice_vsi_alloc(pf);
+ if (!vsi) {
+ dev_err(dev, "could not allocate VSI\n");
+ return NULL;
+ }
+
+ ret = ice_vsi_cfg(vsi, params);
+ if (ret)
+ goto err_vsi_cfg;
+
/* Add switch rule to drop all Tx Flow Control Frames, of look up
* type ETHERTYPE from VSIs, and restrict malicious VF from sending
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
@@ -2830,34 +2973,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* be dropped so that VFs cannot send LLDP packets to reconfig DCB
* settings in the HW.
*/
- if (!ice_is_safe_mode(pf))
- if (vsi->type == ICE_VSI_PF) {
- ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
- ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
- }
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
+ ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
+ ice_cfg_sw_lldp(vsi, true, true);
+ }
if (!vsi->agg_node)
ice_set_agg_vsi(vsi);
+
return vsi;
-unroll_clear_rings:
- ice_vsi_clear_rings(vsi);
-unroll_vector_base:
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
-unroll_alloc_q_vector:
- ice_vsi_free_q_vectors(vsi);
-unroll_vsi_init:
- ice_vsi_free_stats(vsi);
- ice_vsi_delete(vsi);
-unroll_get_qs:
- ice_vsi_put_qs(vsi);
-unroll_vsi_alloc:
- if (vsi_type == ICE_VSI_VF)
+err_vsi_cfg:
+ if (params->type == ICE_VSI_VF)
ice_enable_lag(pf->lag);
- ice_vsi_clear(vsi);
+ ice_vsi_free(vsi);
return NULL;
}
@@ -3121,37 +3251,6 @@ void ice_napi_del(struct ice_vsi *vsi)
}
/**
- * ice_free_vf_ctrl_res - Free the VF control VSI resource
- * @pf: pointer to PF structure
- * @vsi: the VSI to free resources for
- *
- * Check if the VF control VSI resource is still in use. If no VF is using it
- * any more, release the VSI resource. Otherwise, leave it to be cleaned up
- * once no other VF uses it.
- */
-static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
-{
- struct ice_vf *vf;
- unsigned int bkt;
-
- rcu_read_lock();
- ice_for_each_vf_rcu(pf, bkt, vf) {
- if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
- rcu_read_unlock();
- return;
- }
- }
- rcu_read_unlock();
-
- /* No other VFs left that have control VSI. It is now safe to reclaim
- * SW interrupts back to the common pool.
- */
- ice_free_res(pf->irq_tracker, vsi->base_vector,
- ICE_RES_VF_CTRL_VEC_ID);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
-}
-
-/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3160,7 +3259,6 @@ static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
int ice_vsi_release(struct ice_vsi *vsi)
{
struct ice_pf *pf;
- int err;
if (!vsi->back)
return -ENODEV;
@@ -3178,50 +3276,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
+
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
- /* Disable VSI and free resources */
- if (vsi->type != ICE_VSI_LB)
- ice_vsi_dis_irq(vsi);
ice_vsi_close(vsi);
-
- /* SR-IOV determines needed MSIX resources all at once instead of per
- * VSI since when VFs are spawned we know how many VFs there are and how
- * many interrupts each VF needs. SR-IOV MSIX resources are also
- * cleared in the same manner.
- */
- if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
- ice_free_vf_ctrl_res(pf, vsi);
- } else if (vsi->type != ICE_VSI_VF) {
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- }
-
- if (!ice_is_safe_mode(pf)) {
- if (vsi->type == ICE_VSI_PF) {
- ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
- ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, false);
- /* The Rx rule will only exist to remove if the LLDP FW
- * engine is currently stopped
- */
- if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
- }
- }
-
- if (ice_is_vsi_dflt_vsi(vsi))
- ice_clear_dflt_vsi(vsi);
- ice_fltr_remove_all(vsi);
- ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
- err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
- if (err)
- dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
- vsi->vsi_num, err);
- ice_vsi_delete(vsi);
- ice_vsi_free_q_vectors(vsi);
+ ice_vsi_decfg(vsi);
if (vsi->netdev) {
if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
@@ -3235,19 +3297,12 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- if (vsi->type == ICE_VSI_VF &&
- vsi->agg_node && vsi->agg_node->valid)
- vsi->agg_node->num_vsis--;
- ice_vsi_clear_rings(vsi);
- ice_vsi_free_stats(vsi);
- ice_vsi_put_qs(vsi);
-
/* retain SW VSI data structure since it is needed to unregister and
* free VSI netdev when PF is not in reset recovery pending state,\
* for ex: during rmmod.
*/
if (!ice_is_reset_in_progress(pf->state))
- ice_vsi_clear(vsi);
+ ice_vsi_delete(vsi);
return 0;
}
@@ -3372,7 +3427,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
* @prev_txq: Number of Tx rings before ring reallocation
* @prev_rxq: Number of Rx rings before ring reallocation
*/
-static int
+static void
ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
{
struct ice_vsi_stats *vsi_stat;
@@ -3380,9 +3435,9 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
int i;
if (!prev_txq || !prev_rxq)
- return 0;
+ return;
if (vsi->type == ICE_VSI_CHNL)
- return 0;
+ return;
vsi_stat = pf->vsi_stats[vsi->idx];
@@ -3403,36 +3458,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
}
}
}
-
- return 0;
}
/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
- * @init_vsi: is this an initialization or a reconfigure of the VSI
+ * @vsi_flags: flags used for VSI rebuild flow
+ *
+ * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
+ * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
*
* Returns 0 on success and negative value on failure
*/
-int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
+int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
- u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
- int ret, i, prev_txq, prev_rxq;
+ int ret, prev_txq, prev_rxq;
int prev_num_q_vectors = 0;
- enum ice_vsi_type vtype;
struct ice_pf *pf;
if (!vsi)
return -EINVAL;
+ params = ice_vsi_to_params(vsi);
+ params.flags = vsi_flags;
+
pf = vsi->back;
- vtype = vsi->type;
- if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- ice_vsi_init_vlan_ops(vsi);
-
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
if (!coalesce)
@@ -3443,188 +3498,32 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_txq = vsi->num_txq;
prev_rxq = vsi->num_rxq;
- ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
- ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ ice_vsi_decfg(vsi);
+ ret = ice_vsi_cfg_def(vsi, &params);
if (ret)
- dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
- vsi->vsi_num, ret);
- ice_vsi_free_q_vectors(vsi);
-
- /* SR-IOV determines needed MSIX resources all at once instead of per
- * VSI since when VFs are spawned we know how many VFs there are and how
- * many interrupts each VF needs. SR-IOV MSIX resources are also
- * cleared in the same manner.
- */
- if (vtype != ICE_VSI_VF) {
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- vsi->base_vector = 0;
- }
-
- if (ice_is_xdp_ena_vsi(vsi))
- /* return value check can be skipped here, it always returns
- * 0 if reset is in progress
- */
- ice_destroy_xdp_rings(vsi);
- ice_vsi_put_qs(vsi);
- ice_vsi_clear_rings(vsi);
- ice_vsi_free_arrays(vsi);
- if (vtype == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vsi->vf);
- else
- ice_vsi_set_num_qs(vsi, NULL);
-
- ret = ice_vsi_alloc_arrays(vsi);
- if (ret < 0)
- goto err_vsi;
-
- ice_vsi_get_qs(vsi);
-
- ice_alloc_fd_res(vsi);
- ice_vsi_set_tc_cfg(vsi);
-
- /* Initialize VSI struct elements and create VSI in FW */
- ret = ice_vsi_init(vsi, init_vsi);
- if (ret < 0)
- goto err_vsi;
-
- switch (vtype) {
- case ICE_VSI_CTRL:
- case ICE_VSI_SWITCHDEV_CTRL:
- case ICE_VSI_PF:
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_ring_stats(vsi);
- if (ret)
- goto err_vectors;
-
- ice_vsi_map_rings_to_vectors(vsi);
-
- vsi->stat_offsets_loaded = false;
- if (ice_is_xdp_ena_vsi(vsi)) {
- ret = ice_vsi_determine_xdp_res(vsi);
- if (ret)
- goto err_vectors;
- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
- if (ret)
- goto err_vectors;
- }
- /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
- if (vtype != ICE_VSI_CTRL)
- /* Do not exit if configuring RSS had an issue, at
- * least receive traffic on first queue. Hence no
- * need to capture return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_vsi_cfg_rss_lut_key(vsi);
-
- /* disable or enable CRC stripping */
- if (vsi->netdev)
- ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features &
- NETIF_F_RXFCS));
-
- break;
- case ICE_VSI_VF:
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_ring_stats(vsi);
- if (ret)
- goto err_vectors;
-
- vsi->stat_offsets_loaded = false;
- break;
- case ICE_VSI_CHNL:
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- ice_vsi_cfg_rss_lut_key(vsi);
- ice_vsi_set_rss_flow_fld(vsi);
- }
- break;
- default:
- break;
- }
-
- /* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++) {
- /* configure VSI nodes based on number of queues and TC's.
- * ADQ creates VSIs for each TC/Channel but doesn't
- * allocate queues instead it reconfigures the PF queues
- * as per the TC command. So max_txqs should point to the
- * PF Tx queues.
- */
- if (vtype == ICE_VSI_CHNL)
- max_txqs[i] = pf->num_lan_tx;
- else
- max_txqs[i] = vsi->alloc_txq;
-
- if (ice_is_xdp_ena_vsi(vsi))
- max_txqs[i] += vsi->num_xdp_txq;
- }
-
- if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- /* If MQPRIO is set, means channel code path, hence for main
- * VSI's, use TC as 1
- */
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
- else
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ goto err_vsi_cfg;
+ ret = ice_vsi_cfg_tc_lan(pf, vsi);
if (ret) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
- if (init_vsi) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = -EIO;
- goto err_vectors;
+ goto err_vsi_cfg_tc_lan;
} else {
+ kfree(coalesce);
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
}
- if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq))
- goto err_vectors;
+ ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
return 0;
-err_vectors:
- ice_vsi_free_q_vectors(vsi);
-err_rings:
- if (vsi->netdev) {
- vsi->current_netdev_flags = 0;
- unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
-err_vsi:
- ice_vsi_clear(vsi);
- set_bit(ICE_RESET_FAILED, pf->state);
+err_vsi_cfg_tc_lan:
+ ice_vsi_decfg(vsi);
+err_vsi_cfg:
kfree(coalesce);
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index dcdf69a693e9..75221478f2dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -7,6 +7,47 @@
#include "ice.h"
#include "ice_vlan.h"
+/* Flags used for VSI configuration and rebuild */
+#define ICE_VSI_FLAG_INIT BIT(0)
+#define ICE_VSI_FLAG_NO_INIT 0
+
+/**
+ * struct ice_vsi_cfg_params - VSI configuration parameters
+ * @pi: pointer to the port_info instance for the VSI
+ * @ch: pointer to the channel structure for the VSI, may be NULL
+ * @vf: pointer to the VF associated with this VSI, may be NULL
+ * @type: the type of VSI to configure
+ * @flags: VSI flags used for rebuild and configuration
+ *
+ * Parameter structure used when configuring a new VSI.
+ */
+struct ice_vsi_cfg_params {
+ struct ice_port_info *pi;
+ struct ice_channel *ch;
+ struct ice_vf *vf;
+ enum ice_vsi_type type;
+ u32 flags;
+};
+
+/**
+ * ice_vsi_to_params - Get parameters for an existing VSI
+ * @vsi: the VSI to get parameters for
+ *
+ * Fill a parameter structure for reconfiguring a VSI with its current
+ * parameters, such as during a rebuild operation.
+ */
+static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi)
+{
+ struct ice_vsi_cfg_params params = {};
+
+ params.pi = vsi->port_info;
+ params.ch = vsi->ch;
+ params.vf = vsi->vf;
+ params.type = vsi->type;
+
+ return params;
+}
+
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -42,7 +83,6 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
void ice_vsi_delete(struct ice_vsi *vsi);
-int ice_vsi_clear(struct ice_vsi *vsi);
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
@@ -51,9 +91,7 @@ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, struct ice_vf *vf,
- struct ice_channel *ch);
+ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
void ice_napi_del(struct ice_vsi *vsi);
@@ -63,6 +101,7 @@ void ice_vsi_close(struct ice_vsi *vsi);
int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
+void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
@@ -70,7 +109,8 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
-int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
+int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
+int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8ec24f6cf6be..567694bf098b 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -22,6 +22,7 @@
#include "ice_eswitch.h"
#include "ice_tc_lib.h"
#include "ice_vsi_vlan_ops.h"
+#include <net/xdp_sock_drv.h>
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -44,7 +45,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
-static DEFINE_IDA(ice_aux_ida);
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
EXPORT_SYMBOL(ice_xdp_locking_key);
@@ -564,7 +564,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
/* Disable VFs until reset is completed */
mutex_lock(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf)
- ice_set_vf_state_qs_dis(vf);
+ ice_set_vf_state_dis(vf);
mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) {
@@ -2596,8 +2596,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->netdev = NULL;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
- xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
- xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
@@ -2889,6 +2887,18 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
}
/**
+ * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: Pointer to VSI structure
+ */
+static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+{
+ if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ return ICE_RXBUF_1664;
+ else
+ return ICE_RXBUF_3072;
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2898,13 +2908,16 @@ static int
ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+ unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
bool if_running = netif_running(vsi->netdev);
int ret = 0, xdp_ring_err = 0;
- if (frame_size > vsi->rx_buf_len) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
- return -EOPNOTSUPP;
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (frame_size > ice_max_xdp_frame_size(vsi)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MTU is too large for linear frames and XDP prog does not support frags");
+ return -EOPNOTSUPP;
+ }
}
/* need to stop netdev while setting up the program for Rx rings */
@@ -2925,11 +2938,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
+ xdp_features_set_redirect_target(vsi->netdev, true);
/* reallocate Rx queues that are used for zero-copy */
xdp_ring_err = ice_realloc_zc_buf(vsi, true);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ xdp_features_clear_redirect_target(vsi->netdev);
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
@@ -3344,10 +3359,11 @@ static void ice_napi_add(struct ice_vsi *vsi)
/**
* ice_set_ops - set netdev and ethtools ops for the given netdev
- * @netdev: netdev instance
+ * @vsi: the VSI associated with the new netdev
*/
-static void ice_set_ops(struct net_device *netdev)
+static void ice_set_ops(struct ice_vsi *vsi)
{
+ struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (ice_is_safe_mode(pf)) {
@@ -3359,6 +3375,13 @@ static void ice_set_ops(struct net_device *netdev)
netdev->netdev_ops = &ice_netdev_ops;
netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
ice_set_ethtool_ops(netdev);
+
+ if (vsi->type != ICE_VSI_PF)
+ return;
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
}
/**
@@ -3447,53 +3470,8 @@ static void ice_set_netdev_features(struct net_device *netdev)
* be changed at runtime
*/
netdev->hw_features |= NETIF_F_RXFCS;
-}
-
-/**
- * ice_cfg_netdev - Allocate, configure and register a netdev
- * @vsi: the VSI associated with the new netdev
- *
- * Returns 0 on success, negative value on failure
- */
-static int ice_cfg_netdev(struct ice_vsi *vsi)
-{
- struct ice_netdev_priv *np;
- struct net_device *netdev;
- u8 mac_addr[ETH_ALEN];
-
- netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
- vsi->alloc_rxq);
- if (!netdev)
- return -ENOMEM;
-
- set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- vsi->netdev = netdev;
- np = netdev_priv(netdev);
- np->vsi = vsi;
-
- ice_set_netdev_features(netdev);
-
- ice_set_ops(netdev);
- if (vsi->type == ICE_VSI_PF) {
- SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
- ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
- eth_hw_addr_set(netdev, mac_addr);
- ether_addr_copy(netdev->perm_addr, mac_addr);
- }
-
- netdev->priv_flags |= IFF_UNICAST_FLT;
-
- /* Setup netdev TC information */
- ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
-
- /* setup watchdog timeout value to be 5 second */
- netdev->watchdog_timeo = 5 * HZ;
-
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = ICE_MAX_MTU;
-
- return 0;
+ netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
/**
@@ -3521,14 +3499,27 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_PF;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_CHNL;
+ params.pi = pi;
+ params.ch = ch;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3542,7 +3533,13 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_CTRL;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3556,7 +3553,13 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_LB;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3716,20 +3719,6 @@ static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
}
/**
- * ice_tc_indir_block_remove - clean indirect TC block notifications
- * @pf: PF structure
- */
-static void ice_tc_indir_block_remove(struct ice_pf *pf)
-{
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
- if (!pf_vsi)
- return;
-
- ice_tc_indir_block_unregister(pf_vsi);
-}
-
-/**
* ice_tc_indir_block_register - Register TC indirect block notifications
* @vsi: VSI struct which has the netdev
*
@@ -3749,78 +3738,6 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)
}
/**
- * ice_setup_pf_sw - Setup the HW switch on startup or after reset
- * @pf: board private structure
- *
- * Returns 0 on success, negative value on failure
- */
-static int ice_setup_pf_sw(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- bool dvm = ice_is_dvm_ena(&pf->hw);
- struct ice_vsi *vsi;
- int status;
-
- if (ice_is_reset_in_progress(pf->state))
- return -EBUSY;
-
- status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
- if (status)
- return -EIO;
-
- vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
- if (!vsi)
- return -ENOMEM;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- status = ice_cfg_netdev(vsi);
- if (status)
- goto unroll_vsi_setup;
- /* netdev has to be configured before setting frame size */
- ice_vsi_cfg_frame_size(vsi);
-
- /* init indirect block notifications */
- status = ice_tc_indir_block_register(vsi);
- if (status) {
- dev_err(dev, "Failed to register netdev notifier\n");
- goto unroll_cfg_netdev;
- }
-
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- /* registering the NAPI handler requires both the queues and
- * netdev to be created, which are done in ice_pf_vsi_setup()
- * and ice_cfg_netdev() respectively
- */
- ice_napi_add(vsi);
-
- status = ice_init_mac_fltr(pf);
- if (status)
- goto unroll_napi_add;
-
- return 0;
-
-unroll_napi_add:
- ice_tc_indir_block_unregister(vsi);
-unroll_cfg_netdev:
- if (vsi) {
- ice_napi_del(vsi);
- if (vsi->netdev) {
- clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
- }
-
-unroll_vsi_setup:
- ice_vsi_release(vsi);
- return status;
-}
-
-/**
* ice_get_avail_q_count - Get count of queues in use
* @pf_qmap: bitmap to get queue use count from
* @lock: pointer to a mutex that protects access to pf_qmap
@@ -4249,13 +4166,13 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- ice_vsi_rebuild(vsi, false);
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done;
}
ice_vsi_close(vsi);
- ice_vsi_rebuild(vsi, false);
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
done:
@@ -4518,6 +4435,23 @@ err_vsi_open:
return err;
}
+static void ice_deinit_fdir(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
+
+ if (!vsi)
+ return;
+
+ ice_vsi_manage_fdir(vsi, false);
+ ice_vsi_release(vsi);
+ if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[pf->ctrl_vsi_idx] = NULL;
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+}
+
/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance
@@ -4618,116 +4552,171 @@ static void ice_print_wake_reason(struct ice_pf *pf)
/**
* ice_register_netdev - register netdev
- * @pf: pointer to the PF struct
+ * @vsi: pointer to the VSI struct
*/
-static int ice_register_netdev(struct ice_pf *pf)
+static int ice_register_netdev(struct ice_vsi *vsi)
{
- struct ice_vsi *vsi;
- int err = 0;
+ int err;
- vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->netdev)
return -EIO;
err = register_netdev(vsi->netdev);
if (err)
- goto err_register_netdev;
+ return err;
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
return 0;
-err_register_netdev:
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- return err;
+}
+
+static void ice_unregister_netdev(struct ice_vsi *vsi)
+{
+ if (!vsi || !vsi->netdev)
+ return;
+
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
/**
- * ice_probe - Device initialization routine
- * @pdev: PCI device information struct
- * @ent: entry in ice_pci_tbl
+ * ice_cfg_netdev - Allocate, configure and register a netdev
+ * @vsi: the VSI associated with the new netdev
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative value on failure
*/
-static int
-ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+static int ice_cfg_netdev(struct ice_vsi *vsi)
{
- struct device *dev = &pdev->dev;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
- struct ice_hw *hw;
- int i, err;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
- if (pdev->is_virtfn) {
- dev_err(dev, "can't probe a virtual function\n");
- return -EINVAL;
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+ ice_set_ops(vsi);
+
+ if (vsi->type == ICE_VSI_PF) {
+ SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
+ ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
+ eth_hw_addr_set(netdev, mac_addr);
}
- /* this driver uses devres, see
- * Documentation/driver-api/driver-model/devres.rst
- */
- err = pcim_enable_device(pdev);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Setup netdev TC information */
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
+ netdev->max_mtu = ICE_MAX_MTU;
+
+ return 0;
+}
+
+static void ice_decfg_netdev(struct ice_vsi *vsi)
+{
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+}
+
+static int ice_start_eth(struct ice_vsi *vsi)
+{
+ int err;
+
+ err = ice_init_mac_fltr(vsi->back);
if (err)
return err;
- err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
- if (err) {
- dev_err(dev, "BAR0 I/O map error %d\n", err);
- return err;
- }
+ rtnl_lock();
+ err = ice_vsi_open(vsi);
+ rtnl_unlock();
- pf = ice_allocate_pf(dev);
- if (!pf)
- return -ENOMEM;
+ return err;
+}
- /* initialize Auxiliary index to invalid value */
- pf->aux_idx = -1;
+static int ice_init_eth(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ int err;
- /* set up for high or low DMA */
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(dev, "DMA configuration failed: 0x%x\n", err);
+ if (!vsi)
+ return -EINVAL;
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
+ if (err)
return err;
- }
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- pci_enable_pcie_error_reporting(pdev);
- pci_set_master(pdev);
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
- pf->pdev = pdev;
- pci_set_drvdata(pdev, pf);
- set_bit(ICE_DOWN, pf->state);
- /* Disable service task until DOWN bit is cleared */
- set_bit(ICE_SERVICE_DIS, pf->state);
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create_pf_port;
- hw = &pf->hw;
- hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
- pci_save_state(pdev);
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
- hw->back = pf;
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
- hw->bus.device = PCI_SLOT(pdev->devfn);
- hw->bus.func = PCI_FUNC(pdev->devfn);
- ice_set_ctrlq_len(hw);
+ err = ice_register_netdev(vsi);
+ if (err)
+ goto err_register_netdev;
- pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+ err = ice_tc_indir_block_register(vsi);
+ if (err)
+ goto err_tc_indir_block_register;
-#ifndef CONFIG_DYNAMIC_DEBUG
- if (debug < -1)
- hw->debug_mask = debug;
-#endif
+ ice_napi_add(vsi);
+
+ return 0;
+
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
+ return err;
+}
+
+static void ice_deinit_eth(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ if (!vsi)
+ return;
+
+ ice_vsi_close(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_decfg_netdev(vsi);
+}
+
+static int ice_init_dev(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err;
err = ice_init_hw(hw);
if (err) {
dev_err(dev, "ice_init_hw failed: %d\n", err);
- err = -EIO;
- goto err_exit_unroll;
+ return err;
}
ice_init_feature_support(pf);
@@ -4750,62 +4739,31 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf_unroll;
+ goto err_init_pf;
}
- ice_devlink_init_regions(pf);
-
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
- i = 0;
if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
- pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.udp_tunnel_nic.tables[0].n_entries =
pf->hw.tnl.valid_count[TNL_VXLAN];
- pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
UDP_TUNNEL_TYPE_VXLAN;
- i++;
}
if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
- pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.udp_tunnel_nic.tables[1].n_entries =
pf->hw.tnl.valid_count[TNL_GENEVE];
- pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
UDP_TUNNEL_TYPE_GENEVE;
- i++;
- }
-
- pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
- if (!pf->num_alloc_vsi) {
- err = -EIO;
- goto err_init_pf_unroll;
- }
- if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
- dev_warn(&pf->pdev->dev,
- "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
- pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
- pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
- }
-
- pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
- GFP_KERNEL);
- if (!pf->vsi) {
- err = -ENOMEM;
- goto err_init_pf_unroll;
- }
-
- pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
- sizeof(*pf->vsi_stats), GFP_KERNEL);
- if (!pf->vsi_stats) {
- err = -ENOMEM;
- goto err_init_vsi_unroll;
}
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_vsi_stats_unroll;
+ goto err_init_interrupt_scheme;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4816,49 +4774,94 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_init_interrupt_unroll;
+ goto err_req_irq_msix_misc;
}
- /* create switch struct for the switch element created by FW on boot */
- pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
- if (!pf->first_sw) {
- err = -ENOMEM;
- goto err_msix_misc_unroll;
- }
+ return 0;
- if (hw->evb_veb)
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
- else
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
+err_req_irq_msix_misc:
+ ice_clear_interrupt_scheme(pf);
+err_init_interrupt_scheme:
+ ice_deinit_pf(pf);
+err_init_pf:
+ ice_deinit_hw(hw);
+ return err;
+}
- pf->first_sw->pf = pf;
+static void ice_deinit_dev(struct ice_pf *pf)
+{
+ ice_free_irq_msix_misc(pf);
+ ice_clear_interrupt_scheme(pf);
+ ice_deinit_pf(pf);
+ ice_deinit_hw(&pf->hw);
+}
- /* record the sw_id available for later use */
- pf->first_sw->sw_id = hw->port_info->sw_id;
+static void ice_init_features(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
- err = ice_setup_pf_sw(pf);
- if (err) {
- dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
- goto err_alloc_sw_unroll;
- }
+ if (ice_is_safe_mode(pf))
+ return;
- clear_bit(ICE_SERVICE_DIS, pf->state);
+ /* initialize DDP driven features */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_init(pf);
- /* tell the firmware we are up */
- err = ice_send_version(pf);
- if (err) {
- dev_err(dev, "probe failed sending driver version %s. error: %d\n",
- UTS_RELEASE, err);
- goto err_send_version_unroll;
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
+ /* Note: Flow director init failure is non-fatal to load */
+ if (ice_init_fdir(pf))
+ dev_err(dev, "could not initialize flow director\n");
+
+ /* Note: DCB init failure is non-fatal to load */
+ if (ice_init_pf_dcb(pf, false)) {
+ clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ ice_cfg_lldp_mib_change(&pf->hw, true);
}
- /* since everything is good, start the service timer */
- mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ if (ice_init_lag(pf))
+ dev_warn(dev, "Failed to init link aggregation support\n");
+}
+
+static void ice_deinit_features(struct ice_pf *pf)
+{
+ ice_deinit_lag(pf);
+ if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ ice_cfg_lldp_mib_change(&pf->hw, false);
+ ice_deinit_fdir(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_release(pf);
+}
+
+static void ice_init_wakeup(struct ice_pf *pf)
+{
+ /* Save wakeup reason register for later use */
+ pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
+
+ /* check for a power management event */
+ ice_print_wake_reason(pf);
+
+ /* clear wake status, all bits */
+ wr32(&pf->hw, PFPM_WUS, U32_MAX);
+
+ /* Disable WoL at init, wait for user to enable */
+ device_set_wakeup_enable(ice_pf_to_dev(pf), false);
+}
+
+static int ice_init_link(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
- goto err_send_version_unroll;
+ return err;
}
/* not a fatal error if this fails */
@@ -4894,123 +4897,350 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
}
- ice_verify_cacheline_size(pf);
+ return err;
+}
- /* Save wakeup reason register for later use */
- pf->wakeup_reason = rd32(hw, PFPM_WUS);
+static int ice_init_pf_sw(struct ice_pf *pf)
+{
+ bool dvm = ice_is_dvm_ena(&pf->hw);
+ struct ice_vsi *vsi;
+ int err;
- /* check for a power management event */
- ice_print_wake_reason(pf);
+ /* create switch struct for the switch element created by FW on boot */
+ pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
+ if (!pf->first_sw)
+ return -ENOMEM;
- /* clear wake status, all bits */
- wr32(hw, PFPM_WUS, U32_MAX);
+ if (pf->hw.evb_veb)
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
- /* Disable WoL at init, wait for user to enable */
- device_set_wakeup_enable(dev, false);
+ pf->first_sw->pf = pf;
- if (ice_is_safe_mode(pf)) {
- ice_set_safe_mode_vlan_cfg(pf);
- goto probe_done;
+ /* record the sw_id available for later use */
+ pf->first_sw->sw_id = pf->hw.port_info->sw_id;
+
+ err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (err)
+ goto err_aq_set_port_params;
+
+ vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
+ if (!vsi) {
+ err = -ENOMEM;
+ goto err_pf_vsi_setup;
}
- /* initialize DDP driven features */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_init(pf);
+ return 0;
- if (ice_is_feature_supported(pf, ICE_F_GNSS))
- ice_gnss_init(pf);
+err_pf_vsi_setup:
+err_aq_set_port_params:
+ kfree(pf->first_sw);
+ return err;
+}
- /* Note: Flow director init failure is non-fatal to load */
- if (ice_init_fdir(pf))
- dev_err(dev, "could not initialize flow director\n");
+static void ice_deinit_pf_sw(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
- /* Note: DCB init failure is non-fatal to load */
- if (ice_init_pf_dcb(pf, false)) {
- clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
- clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
- } else {
- ice_cfg_lldp_mib_change(&pf->hw, true);
+ if (!vsi)
+ return;
+
+ ice_vsi_release(vsi);
+ kfree(pf->first_sw);
+}
+
+static int ice_alloc_vsis(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
+ if (!pf->num_alloc_vsi)
+ return -EIO;
+
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
}
- if (ice_init_lag(pf))
- dev_warn(dev, "Failed to init link aggregation support\n");
+ pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
+ GFP_KERNEL);
+ if (!pf->vsi)
+ return -ENOMEM;
- /* print PCI link speed and width */
- pcie_print_link_status(pf->pdev);
+ pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
+ sizeof(*pf->vsi_stats), GFP_KERNEL);
+ if (!pf->vsi_stats) {
+ devm_kfree(dev, pf->vsi);
+ return -ENOMEM;
+ }
-probe_done:
- err = ice_devlink_create_pf_port(pf);
+ return 0;
+}
+
+static void ice_dealloc_vsis(struct ice_pf *pf)
+{
+ devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
+ pf->vsi_stats = NULL;
+
+ pf->num_alloc_vsi = 0;
+ devm_kfree(ice_pf_to_dev(pf), pf->vsi);
+ pf->vsi = NULL;
+}
+
+static int ice_init_devlink(struct ice_pf *pf)
+{
+ int err;
+
+ err = ice_devlink_register_params(pf);
if (err)
- goto err_create_pf_port;
+ return err;
- vsi = ice_get_main_vsi(pf);
- if (!vsi || !vsi->netdev) {
- err = -EINVAL;
- goto err_netdev_reg;
- }
+ ice_devlink_init_regions(pf);
+ ice_devlink_register(pf);
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+ return 0;
+}
+
+static void ice_deinit_devlink(struct ice_pf *pf)
+{
+ ice_devlink_unregister(pf);
+ ice_devlink_destroy_regions(pf);
+ ice_devlink_unregister_params(pf);
+}
+
+static int ice_init(struct ice_pf *pf)
+{
+ int err;
- err = ice_register_netdev(pf);
+ err = ice_init_dev(pf);
if (err)
- goto err_netdev_reg;
+ return err;
- err = ice_devlink_register_params(pf);
+ err = ice_alloc_vsis(pf);
+ if (err)
+ goto err_alloc_vsis;
+
+ err = ice_init_pf_sw(pf);
if (err)
- goto err_netdev_reg;
+ goto err_init_pf_sw;
+
+ ice_init_wakeup(pf);
+
+ err = ice_init_link(pf);
+ if (err)
+ goto err_init_link;
+
+ err = ice_send_version(pf);
+ if (err)
+ goto err_init_link;
+
+ ice_verify_cacheline_size(pf);
+
+ if (ice_is_safe_mode(pf))
+ ice_set_safe_mode_vlan_cfg(pf);
+ else
+ /* print PCI link speed and width */
+ pcie_print_link_status(pf->pdev);
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
- if (ice_is_rdma_ena(pf)) {
- pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
- if (pf->aux_idx < 0) {
- dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- err = -ENOMEM;
- goto err_devlink_reg_param;
- }
+ clear_bit(ICE_SERVICE_DIS, pf->state);
- err = ice_init_rdma(pf);
- if (err) {
- dev_err(dev, "Failed to initialize RDMA: %d\n", err);
- err = -EIO;
- goto err_init_aux_unroll;
- }
- } else {
- dev_warn(dev, "RDMA is not supported on this device\n");
- }
+ /* since everything is good, start the service timer */
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
- ice_devlink_register(pf);
return 0;
-err_init_aux_unroll:
- pf->adev = NULL;
- ida_free(&ice_aux_ida, pf->aux_idx);
-err_devlink_reg_param:
- ice_devlink_unregister_params(pf);
-err_netdev_reg:
- ice_devlink_destroy_pf_port(pf);
-err_create_pf_port:
-err_send_version_unroll:
- ice_vsi_release_all(pf);
-err_alloc_sw_unroll:
+err_init_link:
+ ice_deinit_pf_sw(pf);
+err_init_pf_sw:
+ ice_dealloc_vsis(pf);
+err_alloc_vsis:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+static void ice_deinit(struct ice_pf *pf)
+{
set_bit(ICE_SERVICE_DIS, pf->state);
set_bit(ICE_DOWN, pf->state);
- devm_kfree(dev, pf->first_sw);
-err_msix_misc_unroll:
- ice_free_irq_msix_misc(pf);
-err_init_interrupt_unroll:
- ice_clear_interrupt_scheme(pf);
-err_init_vsi_stats_unroll:
- devm_kfree(dev, pf->vsi_stats);
- pf->vsi_stats = NULL;
-err_init_vsi_unroll:
- devm_kfree(dev, pf->vsi);
-err_init_pf_unroll:
- ice_deinit_pf(pf);
- ice_devlink_destroy_regions(pf);
- ice_deinit_hw(hw);
-err_exit_unroll:
- pci_disable_pcie_error_reporting(pdev);
+
+ ice_deinit_pf_sw(pf);
+ ice_dealloc_vsis(pf);
+ ice_deinit_dev(pf);
+}
+
+/**
+ * ice_load - load pf by init hw and starting VSI
+ * @pf: pointer to the pf instance
+ */
+int ice_load(struct ice_pf *pf)
+{
+ struct ice_vsi_cfg_params params = {};
+ struct ice_vsi *vsi;
+ int err;
+
+ err = ice_reset(&pf->hw, ICE_RESET_PFR);
+ if (err)
+ return err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ vsi = ice_get_main_vsi(pf);
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ err = ice_vsi_cfg(vsi, &params);
+ if (err)
+ goto err_vsi_cfg;
+
+ err = ice_start_eth(ice_get_main_vsi(pf));
+ if (err)
+ goto err_start_eth;
+
+ err = ice_init_rdma(pf);
+ if (err)
+ goto err_init_rdma;
+
+ ice_init_features(pf);
+ ice_service_task_restart(pf);
+
+ clear_bit(ICE_DOWN, pf->state);
+
+ return 0;
+
+err_init_rdma:
+ ice_vsi_close(ice_get_main_vsi(pf));
+err_start_eth:
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+err_vsi_cfg:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+/**
+ * ice_unload - unload pf by stopping VSI and deinit hw
+ * @pf: pointer to the pf instance
+ */
+void ice_unload(struct ice_pf *pf)
+{
+ ice_deinit_features(pf);
+ ice_deinit_rdma(pf);
+ ice_vsi_close(ice_get_main_vsi(pf));
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+ ice_deinit_dev(pf);
+}
+
+/**
+ * ice_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ice_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+
+ if (pdev->is_virtfn) {
+ dev_err(dev, "can't probe a virtual function\n");
+ return -EINVAL;
+ }
+
+ /* this driver uses devres, see
+ * Documentation/driver-api/driver-model/devres.rst
+ */
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
+ if (err) {
+ dev_err(dev, "BAR0 I/O map error %d\n", err);
+ return err;
+ }
+
+ pf = ice_allocate_pf(dev);
+ if (!pf)
+ return -ENOMEM;
+
+ /* initialize Auxiliary index to invalid value */
+ pf->aux_idx = -1;
+
+ /* set up for high or low DMA */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(dev, "DMA configuration failed: 0x%x\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ pf->pdev = pdev;
+ pci_set_drvdata(pdev, pf);
+ set_bit(ICE_DOWN, pf->state);
+ /* Disable service task until DOWN bit is cleared */
+ set_bit(ICE_SERVICE_DIS, pf->state);
+
+ hw = &pf->hw;
+ hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
+ pci_save_state(pdev);
+
+ hw->back = pf;
+ hw->port_info = NULL;
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+ ice_set_ctrlq_len(hw);
+
+ pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+
+#ifndef CONFIG_DYNAMIC_DEBUG
+ if (debug < -1)
+ hw->debug_mask = debug;
+#endif
+
+ err = ice_init(pf);
+ if (err)
+ goto err_init;
+
+ err = ice_init_eth(pf);
+ if (err)
+ goto err_init_eth;
+
+ err = ice_init_rdma(pf);
+ if (err)
+ goto err_init_rdma;
+
+ err = ice_init_devlink(pf);
+ if (err)
+ goto err_init_devlink;
+
+ ice_init_features(pf);
+
+ return 0;
+
+err_init_devlink:
+ ice_deinit_rdma(pf);
+err_init_rdma:
+ ice_deinit_eth(pf);
+err_init_eth:
+ ice_deinit(pf);
+err_init:
pci_disable_device(pdev);
return err;
}
@@ -5085,52 +5315,33 @@ static void ice_remove(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
int i;
- ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
break;
msleep(100);
}
- ice_tc_indir_block_remove(pf);
-
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
ice_service_task_stop(pf);
-
ice_aq_cancel_waiting_tasks(pf);
- ice_unplug_aux_dev(pf);
- if (pf->aux_idx >= 0)
- ida_free(&ice_aux_ida, pf->aux_idx);
- ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
- ice_deinit_lag(pf);
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_release(pf);
- if (ice_is_feature_supported(pf, ICE_F_GNSS))
- ice_gnss_exit(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_setup_mc_magic_wake(pf);
+ ice_deinit_features(pf);
+ ice_deinit_devlink(pf);
+ ice_deinit_rdma(pf);
+ ice_deinit_eth(pf);
+ ice_deinit(pf);
+
ice_vsi_release_all(pf);
- mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
- ice_devlink_destroy_pf_port(pf);
+
+ ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
- ice_free_irq_msix_misc(pf);
- ice_for_each_vsi(pf, i) {
- if (!pf->vsi[i])
- continue;
- ice_vsi_free_q_vectors(pf->vsi[i]);
- }
- devm_kfree(&pdev->dev, pf->vsi_stats);
- pf->vsi_stats = NULL;
- ice_deinit_pf(pf);
- ice_devlink_destroy_regions(pf);
- ice_deinit_hw(&pf->hw);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
@@ -5138,8 +5349,6 @@ static void ice_remove(struct pci_dev *pdev)
*/
ice_reset(&pf->hw, ICE_RESET_PFR);
pci_wait_for_pending_transaction(pdev);
- ice_clear_interrupt_scheme(pf);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
@@ -6173,24 +6382,21 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg - Setup the VSI
+ * ice_vsi_cfg_lan - Setup the VSI lan related config
* @vsi: the VSI being configured
*
* Return 0 on success and negative value on error
*/
-int ice_vsi_cfg(struct ice_vsi *vsi)
+int ice_vsi_cfg_lan(struct ice_vsi *vsi)
{
int err;
- if (vsi->netdev) {
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_set_rx_mode(vsi->netdev);
- if (vsi->type != ICE_VSI_LB) {
- err = ice_vsi_vlan_setup(vsi);
-
- if (err)
- return err;
- }
+ err = ice_vsi_vlan_setup(vsi);
+ if (err)
+ return err;
}
ice_vsi_cfg_dcb_rings(vsi);
@@ -6371,7 +6577,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
- vsi->netdev) {
+ vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
@@ -6382,7 +6588,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
* set the baseline so counters are ready when interface is up
*/
ice_update_eth_stats(vsi);
- ice_service_task_schedule(pf);
+
+ if (vsi->type == ICE_VSI_PF)
+ ice_service_task_schedule(pf);
return 0;
}
@@ -6395,7 +6603,7 @@ int ice_up(struct ice_vsi *vsi)
{
int err;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (!err)
err = ice_up_complete(vsi);
@@ -6963,7 +7171,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (err)
goto err_setup_rx;
@@ -7017,7 +7225,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (err)
goto err_setup_rx;
@@ -7102,7 +7310,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
continue;
/* rebuild the VSI */
- err = ice_vsi_rebuild(vsi, true);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) {
dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
@@ -7358,18 +7566,6 @@ clear_recovery:
}
/**
- * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
- * @vsi: Pointer to VSI structure
- */
-static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
-{
- if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
- else
- return ICE_RXBUF_3072;
-}
-
-/**
* ice_change_mtu - NDO callback to change the MTU
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -7381,6 +7577,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct bpf_prog *prog;
u8 count = 0;
int err = 0;
@@ -7389,7 +7586,8 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
- if (ice_is_xdp_ena_vsi(vsi)) {
+ prog = vsi->xdp_prog;
+ if (prog && !prog->aux->xdp_has_frags) {
int frame_size = ice_max_xdp_frame_size(vsi);
if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
@@ -7397,6 +7595,12 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
frame_size - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
+ } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
+ if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
+ netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
+ ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
+ return -EINVAL;
+ }
}
/* if a reset is in progress, wait for some time for it to complete */
@@ -8447,12 +8651,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
/* clear the VSI from scheduler tree */
ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
- /* Delete VSI from FW */
+ /* Delete VSI from FW, PF and HW VSI arrays */
ice_vsi_delete(ch->ch_vsi);
- /* Delete VSI from PF and HW VSI arrays */
- ice_vsi_clear(ch->ch_vsi);
-
/* free the channel */
kfree(ch);
}
@@ -8511,7 +8712,7 @@ static int ice_rebuild_channels(struct ice_pf *pf)
type = vsi->type;
/* rebuild ADQ VSI */
- err = ice_vsi_rebuild(vsi, true);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) {
dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
ice_vsi_type_str(type), vsi->idx, err);
@@ -8743,14 +8944,14 @@ config_tcf:
cur_rxq = vsi->num_rxq;
/* proceed with rebuild main VSI using correct number of queues */
- ret = ice_vsi_rebuild(vsi, false);
+ ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
if (ret) {
/* fallback to current number of queues */
dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
vsi->req_txq = cur_txq;
vsi->req_rxq = cur_rxq;
clear_bit(ICE_RESET_FAILED, pf->state);
- if (ice_vsi_rebuild(vsi, false)) {
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
dev_err(dev, "Rebuild of main VSI failed again\n");
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index c262dc886e6a..f6f52a248066 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -662,7 +662,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
/* Verify that the simple checksum is zero */
for (i = 0; i < sizeof(*tmp); i++)
- /* cppcheck-suppress objectIndex */
sum += ((u8 *)tmp)[i];
if (sum) {
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index d63161d73eb1..ac6f06f9a2ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -680,6 +680,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
+ bool link_up;
int err;
u8 idx;
@@ -695,11 +696,14 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
if (err)
return false;
+ /* Drop packets if the link went down */
+ link_up = ptp_port->link_up;
+
for_each_set_bit(idx, tx->in_use, tx->len) {
struct skb_shared_hwtstamps shhwtstamps = {};
u8 phy_idx = idx + tx->offset;
u64 raw_tstamp = 0, tstamp;
- bool drop_ts = false;
+ bool drop_ts = !link_up;
struct sk_buff *skb;
/* Drop packets which have waited for more than 2 seconds */
@@ -728,7 +732,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
- if (err)
+ if (err && !drop_ts)
continue;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
@@ -1770,6 +1774,38 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
}
/**
+ * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ */
+static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_perout_channel clk_cfg = {0};
+ int err;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PPS:
+ clk_cfg.gpio_pin = PPS_PIN_INDEX;
+ clk_cfg.period = NSEC_PER_SEC;
+ clk_cfg.ena = !!on;
+
+ err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
+ TIME_SYNC_PIN_INDEX, rq->extts.flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+/**
* ice_ptp_gettimex64 - Get the time of the clock
* @info: the driver's PTP info structure
* @ts: timespec64 structure to hold the current time value
@@ -2221,6 +2257,19 @@ ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
+ * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
+ * @info: PTP clock capabilities
+ */
+static void
+ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ info->pps = 1;
+ info->n_per_out = 0;
+ info->n_ext_ts = 1;
+}
+
+/**
* ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
* @pf: Board private structure
* @info: PTP info to fill
@@ -2258,6 +2307,23 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
+ * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E823 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for e823
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ info->enable = ice_ptp_gpio_enable_e823;
+ ice_ptp_setup_pins_e823(pf, info);
+}
+
+/**
* ice_ptp_set_caps - Set PTP capabilities
* @pf: Board private structure
*/
@@ -2269,7 +2335,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
dev_driver_string(dev), dev_name(dev));
info->owner = THIS_MODULE;
- info->max_adj = 999999999;
+ info->max_adj = 100000000;
info->adjtime = ice_ptp_adjtime;
info->adjfine = ice_ptp_adjfine;
info->gettimex64 = ice_ptp_gettimex64;
@@ -2277,6 +2343,8 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
if (ice_is_e810(&pf->hw))
ice_ptp_set_funcs_e810(pf, info);
+ else if (ice_is_e823(&pf->hw))
+ ice_ptp_set_funcs_e823(pf, info);
else
ice_ptp_set_funcs_e822(pf, info);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 6d08b397df2a..4eca8d195ef0 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1063,7 +1063,6 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
u16 max_child_nodes, num_added = 0;
- /* cppcheck-suppress unusedVariable */
u32 temp;
status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
@@ -1655,12 +1654,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
- int status;
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
+ int status;
+
if (!parent)
return -EIO;
@@ -1756,13 +1756,14 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
- int status;
if (!pi)
return -EINVAL;
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
+ int status;
+
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
i, num_nodes[i],
&first_node_teid,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 3ba1408c56a9..96a64c25e2ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -41,21 +41,6 @@ static void ice_free_vf_entries(struct ice_pf *pf)
}
/**
- * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
- * @vf: invalidate this VF's VSI after freeing it
- */
-static void ice_vf_vsi_release(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- ice_vsi_release(vsi);
- ice_vf_invalidate_vsi(vf);
-}
-
-/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
@@ -248,11 +233,16 @@ void ice_free_vfs(struct ice_pf *pf)
*/
static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
+ params.type = ICE_VSI_VF;
+ params.pi = ice_vf_get_port_info(vf);
+ params.vf = vf;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ vsi = ice_vsi_setup(pf, &params);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
@@ -583,51 +573,19 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
*/
static int ice_init_vf_vsi_res(struct ice_vf *vf)
{
- struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vf->pf;
- u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
- struct device *dev;
int err;
vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
- dev = ice_pf_to_dev(pf);
vsi = ice_vf_vsi_setup(vf);
if (!vsi)
return -ENOMEM;
- err = ice_vsi_add_vlan_zero(vsi);
- if (err) {
- dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- err = vlan_ops->ena_rx_filtering(vsi);
- if (err) {
- dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- eth_broadcast_addr(broadcast);
- err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
- vf->vf_id, err);
- goto release_vsi;
- }
-
- err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
- if (err) {
- dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
- vf->vf_id);
+ err = ice_vf_init_host_cfg(vf, vsi);
+ if (err)
goto release_vsi;
- }
-
- vf->num_mac = 1;
return 0;
@@ -697,6 +655,21 @@ static void ice_sriov_free_vf(struct ice_vf *vf)
}
/**
+ * ice_sriov_clear_reset_state - clears VF Reset status register
+ * @vf: the vf to configure
+ */
+static void ice_sriov_clear_reset_state(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+
+ /* Clear the reset status register so that VF immediately sees that
+ * the device is resetting, even if hardware hasn't yet gotten around
+ * to clearing VFGEN_RSTAT for us.
+ */
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
+}
+
+/**
* ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
* @vf: the vf to configure
*/
@@ -799,23 +772,19 @@ static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
}
/**
- * ice_sriov_vsi_rebuild - release and rebuild VF's VSI
- * @vf: VF to release and setup the VSI for
+ * ice_sriov_create_vsi - Create a new VSI for a VF
+ * @vf: VF to create the VSI for
*
- * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
- * configuration change, etc.).
+ * This is called by ice_vf_recreate_vsi to create the new VSI after the old
+ * VSI has been released.
*/
-static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
+static int ice_sriov_create_vsi(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
- ice_vf_vsi_release(vf);
- if (!ice_vf_vsi_setup(vf)) {
- dev_err(ice_pf_to_dev(pf),
- "Failed to release and setup the VF%u's VSI\n",
- vf->vf_id);
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
return -ENOMEM;
- }
return 0;
}
@@ -826,8 +795,6 @@ static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
*/
static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
{
- ice_vf_rebuild_host_cfg(vf);
- ice_vf_set_initialized(vf);
ice_ena_vf_mappings(vf);
wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
}
@@ -835,11 +802,13 @@ static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
static const struct ice_vf_ops ice_sriov_vf_ops = {
.reset_type = ICE_VF_RESET,
.free = ice_sriov_free_vf,
+ .clear_reset_state = ice_sriov_clear_reset_state,
.clear_mbx_register = ice_sriov_clear_mbx_register,
.trigger_reset_register = ice_sriov_trigger_reset_register,
.poll_reset_status = ice_sriov_poll_reset_status,
.clear_reset_trigger = ice_sriov_clear_reset_trigger,
- .vsi_rebuild = ice_sriov_vsi_rebuild,
+ .irq_close = NULL,
+ .create_vsi = ice_sriov_create_vsi,
.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
};
@@ -879,21 +848,9 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
/* set sriov vf ops for VFs created during SRIOV flow */
vf->vf_ops = &ice_sriov_vf_ops;
- vf->vf_sw_id = pf->first_sw;
- /* assign default capabilities */
- vf->spoofchk = true;
- vf->num_vf_qs = pf->vfs.num_qps_per;
- ice_vc_set_default_allowlist(vf);
-
- /* ctrl_vsi_idx will be set to a valid value only when VF
- * creates its first fdir rule.
- */
- ice_vf_ctrl_invalidate_vsi(vf);
- ice_vf_fdir_init(vf);
-
- ice_virtchnl_set_dflt_ops(vf);
+ ice_initialize_vf_entry(vf);
- mutex_init(&vf->cfg_lock);
+ vf->vf_sw_id = pf->first_sw;
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
@@ -1285,7 +1242,7 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
goto out_put_vf;
ivi->vf = vf_id;
- ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
+ ether_addr_copy(ivi->mac, vf->hw_lan_addr);
/* VF configuration for VLAN and applicable QoS */
ivi->vlan = ice_vf_get_port_vlan_id(vf);
@@ -1333,8 +1290,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return -EINVAL;
/* nothing left to do, unicast MAC already set */
- if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
- ether_addr_equal(vf->hw_lan_addr.addr, mac)) {
+ if (ether_addr_equal(vf->dev_lan_addr, mac) &&
+ ether_addr_equal(vf->hw_lan_addr, mac)) {
ret = 0;
goto out_put_vf;
}
@@ -1348,8 +1305,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* VF is notified of its new MAC via the PF's response to the
* VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
- ether_addr_copy(vf->dev_lan_addr.addr, mac);
- ether_addr_copy(vf->hw_lan_addr.addr, mac);
+ ether_addr_copy(vf->dev_lan_addr, mac);
+ ether_addr_copy(vf->hw_lan_addr, mac);
if (is_zero_ether_addr(mac)) {
/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf->pf_set_mac = false;
@@ -1750,7 +1707,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
- vf->dev_lan_addr.addr,
+ vf->dev_lan_addr,
test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
? "on" : "off");
}
@@ -1794,7 +1751,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
- vf->dev_lan_addr.addr);
+ vf->dev_lan_addr);
}
}
mutex_unlock(&pf->vfs.table_lock);
@@ -1884,7 +1841,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
if (pf_vsi)
dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dev_lan_addr.addr[0],
+ &vf->dev_lan_addr[0],
pf_vsi->netdev->dev_addr);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 95f392ab9670..6b48cbc049c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -792,7 +792,7 @@ static struct ice_vsi *
ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
{
struct ice_rx_ring *ring = NULL;
- struct ice_vsi *ch_vsi = NULL;
+ struct ice_vsi *dest_vsi = NULL;
struct ice_pf *pf = vsi->back;
struct device *dev;
u32 tc_class;
@@ -810,7 +810,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
return ERR_PTR(-EOPNOTSUPP);
}
/* Locate ADQ VSI depending on hw_tc number */
- ch_vsi = vsi->tc_map_vsi[tc_class];
+ dest_vsi = vsi->tc_map_vsi[tc_class];
break;
case ICE_FWD_TO_Q:
/* Locate the Rx queue */
@@ -824,7 +824,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
/* Determine destination VSI even though the action is
* FWD_TO_QUEUE, because QUEUE is associated with VSI
*/
- ch_vsi = tc_fltr->dest_vsi;
+ dest_vsi = tc_fltr->dest_vsi;
break;
default:
dev_err(dev,
@@ -832,13 +832,13 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
tc_fltr->action.fltr_act);
return ERR_PTR(-EINVAL);
}
- /* Must have valid ch_vsi (it could be main VSI or ADQ VSI) */
- if (!ch_vsi) {
+ /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */
+ if (!dest_vsi) {
dev_err(dev,
"Unable to add filter because specified destination VSI doesn't exist\n");
return ERR_PTR(-EINVAL);
}
- return ch_vsi;
+ return dest_vsi;
}
/**
@@ -860,7 +860,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 flags = tc_fltr->flags;
- struct ice_vsi *ch_vsi;
+ struct ice_vsi *dest_vsi;
struct device *dev;
u16 lkups_cnt = 0;
u16 l4_proto = 0;
@@ -883,9 +883,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
}
/* validate forwarding action VSI and queue */
- ch_vsi = ice_tc_forward_action(vsi, tc_fltr);
- if (IS_ERR(ch_vsi))
- return PTR_ERR(ch_vsi);
+ if (ice_is_forward_action(tc_fltr->action.fltr_act)) {
+ dest_vsi = ice_tc_forward_action(vsi, tc_fltr);
+ if (IS_ERR(dest_vsi))
+ return PTR_ERR(dest_vsi);
+ }
lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
@@ -904,7 +906,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
switch (tc_fltr->action.fltr_act) {
case ICE_FWD_TO_VSI:
- rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
@@ -915,7 +917,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
case ICE_FWD_TO_Q:
/* HW queue number in global space */
rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
- rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
@@ -923,14 +925,15 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.queue,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
break;
- default:
- rule_info.sw_act.flag |= ICE_FLTR_TX;
- /* In case of Tx (LOOKUP_TX), src needs to be src VSI */
- rule_info.sw_act.src = vsi->idx;
- /* 'Rx' is false, direction of rule(LOOKUPTRX) */
- rule_info.rx = false;
+ case ICE_DROP_PACKET:
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto exit;
}
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
@@ -953,11 +956,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->dest_vsi_handle = rule_added.vsi_handle;
if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI ||
tc_fltr->action.fltr_act == ICE_FWD_TO_Q) {
- tc_fltr->dest_vsi = ch_vsi;
+ tc_fltr->dest_vsi = dest_vsi;
/* keep track of advanced switch filter for
* destination VSI
*/
- ch_vsi->num_chnl_fltr++;
+ dest_vsi->num_chnl_fltr++;
/* keeps track of channel filters for PF VSI */
if (vsi->type == ICE_VSI_PF &&
@@ -978,6 +981,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
rule_added.rule_id);
break;
+ case ICE_DROP_PACKET:
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n",
+ lkups_cnt, flags, rule_added.rid, rule_added.rule_id);
+ break;
default:
break;
}
@@ -1712,6 +1719,9 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
case FLOW_ACTION_RX_QUEUE_MAPPING:
/* forward to queue */
return ice_tc_forward_to_queue(vsi, fltr, act);
+ case FLOW_ACTION_DROP:
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+ return 0;
default:
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index d916d1e92aa3..8d5e22ac7023 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -211,4 +211,14 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
+static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
+{
+ switch (fltr_act) {
+ case ICE_FWD_TO_VSI:
+ case ICE_FWD_TO_Q:
+ return true;
+ default:
+ return false;
+ }
+}
#endif /* _ICE_TC_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 086f0b3ab68d..dfd22862e926 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -85,7 +85,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
ICE_TX_DESC_CMD_RE;
- tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
+ tx_buf->type = ICE_TX_BUF_DUMMY;
tx_buf->raw_buf = raw_packet;
tx_desc->cmd_type_offset_bsz =
@@ -112,27 +112,29 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
- if (tx_buf->skb) {
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
- devm_kfree(ring->dev, tx_buf->raw_buf);
- else if (ice_ring_is_xdp(ring))
- page_frag_free(tx_buf->raw_buf);
- else
- dev_kfree_skb_any(tx_buf->skb);
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buf, len)) {
+ if (dma_unmap_len(tx_buf, len))
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
+
+ switch (tx_buf->type) {
+ case ICE_TX_BUF_DUMMY:
+ devm_kfree(ring->dev, tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_SKB:
+ dev_kfree_skb_any(tx_buf->skb);
+ break;
+ case ICE_TX_BUF_XDP_TX:
+ page_frag_free(tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_XDP_XMIT:
+ xdp_return_frame(tx_buf->xdpf);
+ break;
}
tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* tx_buf must be completely set up in the transmit path */
}
@@ -174,8 +176,6 @@ tx_skip_free:
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
- tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
if (!tx_ring->netdev)
return;
@@ -267,7 +267,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
DMA_TO_DEVICE);
/* clear tx_buf data */
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* unmap remaining buffers */
@@ -382,6 +382,7 @@ err:
*/
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
+ struct xdp_buff *xdp = &rx_ring->xdp;
struct device *dev = rx_ring->dev;
u32 size;
u16 i;
@@ -390,16 +391,16 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
-
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
+ if (xdp->data) {
+ xdp_return_buff(xdp);
+ xdp->data = NULL;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
@@ -437,6 +438,7 @@ rx_skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->first_desc = 0;
rx_ring->next_to_use = 0;
}
@@ -506,6 +508,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->first_desc = 0;
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
@@ -523,8 +526,16 @@ err:
return -ENOMEM;
}
+/**
+ * ice_rx_frame_truesize
+ * @rx_ring: ptr to Rx ring
+ * @size: size
+ *
+ * calculate the truesize with taking into the account PAGE_SIZE of
+ * underlying arch
+ */
static unsigned int
-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
+ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
{
unsigned int truesize;
@@ -545,34 +556,39 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused s
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
+ * @rx_buf: Rx buffer to store the XDP action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static int
+static void
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+ struct ice_rx_buf *rx_buf)
{
- int err;
+ unsigned int ret = ICE_XDP_PASS;
u32 act;
+ if (!xdp_prog)
+ goto exit;
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
- return ICE_XDP_PASS;
+ break;
case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
- err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
- if (err == ICE_XDP_CONSUMED)
+ if (ret == ICE_XDP_CONSUMED)
goto out_failure;
- return err;
+ break;
case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
+ if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
goto out_failure;
- return ICE_XDP_REDIR;
+ ret = ICE_XDP_REDIR;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
@@ -581,8 +597,31 @@ out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- return ICE_XDP_CONSUMED;
+ ret = ICE_XDP_CONSUMED;
}
+exit:
+ rx_buf->act = ret;
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring, ret);
+}
+
+/**
+ * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdpf: XDP frame that will be converted to XDP buff
+ * @xdp_ring: XDP ring for transmission
+ */
+static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf,
+ struct ice_tx_ring *xdp_ring)
+{
+ struct xdp_buff xdp;
+
+ xdp.data_hard_start = (void *)xdpf;
+ xdp.data = xdpf->data;
+ xdp.data_end = xdp.data + xdpf->len;
+ xdp.frame_sz = xdpf->frame_sz;
+ xdp.flags = xdpf->flags;
+
+ return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);
}
/**
@@ -605,6 +644,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *xdp_ring;
+ struct ice_tx_buf *tx_buf;
int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state))
@@ -627,16 +667,18 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
}
+ tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
for (i = 0; i < n; i++) {
- struct xdp_frame *xdpf = frames[i];
+ const struct xdp_frame *xdpf = frames[i];
int err;
- err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ err = ice_xmit_xdp_ring(xdpf, xdp_ring);
if (err != ICE_XDP_TX)
break;
nxmit++;
}
+ tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
@@ -706,7 +748,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -783,7 +825,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
- * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
@@ -791,7 +832,7 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
* page freed
*/
static bool
-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -802,7 +843,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
+ if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@@ -824,33 +865,44 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
}
/**
- * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
* @rx_ring: Rx descriptor ring to transact packets on
+ * @xdp: xdp buff to place the data into
* @rx_buf: buffer containing page to add
- * @skb: sk_buff to place the data into
* @size: packet length from rx_desc
*
- * This function will add the data contained in rx_buf->page to the skb.
- * It will just attach the page as a frag to the skb.
- * The function will then update the page offset.
+ * This function will add the data contained in rx_buf->page to the xdp buf.
+ * It will just attach the page as a frag.
*/
-static void
-ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct sk_buff *skb, unsigned int size)
+static int
+ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct ice_rx_buf *rx_buf, const unsigned int size)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!size)
- return;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->page_offset, size, truesize);
+ return 0;
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(xdp);
+ }
- /* page is being used so we must update the page offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ rx_buf->page_offset, size);
+ sinfo->xdp_frags_size += size;
+
+ if (page_is_pfmemalloc(rx_buf->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ return 0;
}
/**
@@ -886,19 +938,18 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
- * @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
- int *rx_buf_pgcnt)
+ const unsigned int ntc)
{
struct ice_rx_buf *rx_buf;
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- *rx_buf_pgcnt =
+ rx_buf = &rx_ring->rx_buf[ntc];
+ rx_buf->pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buf->page);
#else
@@ -922,26 +973,25 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
*
- * This function builds an skb around an existing Rx buffer, taking care
- * to set up the skb correctly and avoid any memcpy overhead.
+ * This function builds an skb around an existing XDP buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead. Driver has
+ * already combined frags (if any) to skb_shared_info.
*/
static struct sk_buff *
-ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
u8 metasize = xdp->data - xdp->data_meta;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(xdp->data_end -
- xdp->data_hard_start);
-#endif
+ struct skb_shared_info *sinfo = NULL;
+ unsigned int nr_frags;
struct sk_buff *skb;
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
* likely have a consumer accessing first few bytes of meta
@@ -949,7 +999,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = napi_build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
return NULL;
@@ -964,8 +1014,11 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
if (metasize)
skb_metadata_set(skb, metasize);
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
@@ -981,24 +1034,30 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
* skb correctly.
*/
static struct sk_buff *
-ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
+ struct skb_shared_info *sinfo = NULL;
+ struct ice_rx_buf *rx_buf;
+ unsigned int nr_frags = 0;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(xdp->data_meta);
+ net_prefetch(xdp->data);
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- ICE_RX_HDR_SIZE + metasize,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
+ rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
skb_record_rx_queue(skb, rx_ring->q_index);
/* Determine available headroom for copy */
headlen = size;
@@ -1006,32 +1065,42 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
- ALIGN(headlen + metasize, sizeof(long)));
-
- if (metasize) {
- skb_metadata_set(skb, metasize);
- __skb_pull(skb, metasize);
- }
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
+ sizeof(long)));
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
if (size) {
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ /* besides adding here a partial frag, we are going to add
+ * frags from xdp_buff, make sure there is enough space for
+ * them
+ */
+ if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
skb_add_rx_frag(skb, 0, rx_buf->page,
- rx_buf->page_offset + headlen, size, truesize);
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ rx_buf->page_offset + headlen, size,
+ xdp->frame_sz);
} else {
- /* buffer is unused, reset bias back to rx_buf; data was copied
- * onto skb's linear part so there's no need for adjusting
- * page offset and we can reuse this buffer as-is
+ /* buffer is unused, change the act that should be taken later
+ * on; data was copied onto skb's linear part so there's no
+ * need for adjusting page offset and we can reuse this buffer
+ * as-is
*/
- rx_buf->pagecnt_bias++;
+ rx_buf->act = ICE_SKB_CONSUMED;
+ }
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
+
+ memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
+ sizeof(skb_frag_t) * nr_frags);
+
+ xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
}
return skb;
@@ -1041,26 +1110,17 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
- * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
- * This function will update next_to_clean and then clean up the contents
- * of the rx_buf. It will either recycle the buffer or unmap it and free
- * the associated resources.
+ * This function will clean up the contents of the rx_buf. It will either
+ * recycle the buffer or unmap it and free the associated resources.
*/
static void
-ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- int rx_buf_pgcnt)
+ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- u16 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
+ if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
@@ -1076,27 +1136,6 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
}
/**
- * ice_is_non_eop - process handling of non-EOP buffers
- * @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- *
- * If the buffer is an EOP buffer, this function exits returning false,
- * otherwise return true indicating that this is in fact a non-EOP buffer.
- */
-static bool
-ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
-{
- /* if we are the last buffer then there is nothing else to do */
-#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
- if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
- return false;
-
- rx_ring->ring_stats->rx_stats.non_eop_descs++;
-
- return true;
-}
-
-/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1110,39 +1149,42 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
*/
int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
+ struct xdp_buff *xdp = &rx_ring->xdp;
struct ice_tx_ring *xdp_ring = NULL;
- unsigned int xdp_res, xdp_xmit = 0;
- struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
- struct xdp_buff xdp;
+ u32 ntc = rx_ring->next_to_clean;
+ u32 cnt = rx_ring->count;
+ u32 cached_ntc = ntc;
+ u32 xdp_xmit = 0;
+ u32 cached_ntu;
bool failure;
+ u32 first;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+ xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
- xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
- if (xdp_prog)
+ if (xdp_prog) {
xdp_ring = rx_ring->xdp_ring;
+ cached_ntu = xdp_ring->next_to_use;
+ }
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
- unsigned char *hard_start;
+ struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
- int rx_buf_pgcnt;
u16 vlan_tag = 0;
u16 rx_ptype;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
- rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
@@ -1166,8 +1208,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- ice_put_rx_buf(rx_ring, NULL, 0);
- cleaned_count++;
+ if (++ntc == cnt)
+ ntc = 0;
continue;
}
@@ -1175,65 +1217,56 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
+ rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
- if (!size) {
- xdp.data = NULL;
- xdp.data_end = NULL;
- xdp.data_hard_start = NULL;
- xdp.data_meta = NULL;
- goto construct_skb;
- }
+ if (!xdp->data) {
+ void *hard_start;
- hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
- offset;
- xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
#if (PAGE_SIZE > 4096)
- /* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
+ xdp_buff_clear_frags_flag(xdp);
+ } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ break;
+ }
+ if (++ntc == cnt)
+ ntc = 0;
- if (!xdp_prog)
- goto construct_skb;
+ /* skip if it is NOP desc */
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
- if (!xdp_res)
+ ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
+ if (rx_buf->act == ICE_XDP_PASS)
goto construct_skb;
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
- } else {
- rx_buf->pagecnt_bias++;
- }
- total_rx_bytes += size;
+ total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
continue;
construct_skb:
- if (skb) {
- ice_add_rx_frag(rx_ring, rx_buf, skb, size);
- } else if (likely(xdp.data)) {
- if (ice_ring_uses_build_skb(rx_ring))
- skb = ice_build_skb(rx_ring, rx_buf, &xdp);
- else
- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
- }
+ if (likely(ice_ring_uses_build_skb(rx_ring)))
+ skb = ice_build_skb(rx_ring, xdp);
+ else
+ skb = ice_construct_skb(rx_ring, xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
- rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
- if (rx_buf)
- rx_buf->pagecnt_bias++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ rx_buf->act = ICE_XDP_CONSUMED;
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring,
+ ICE_XDP_CONSUMED);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
break;
}
-
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
- cleaned_count++;
-
- /* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc))
- continue;
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1245,10 +1278,8 @@ construct_skb:
vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
- if (eth_skb_pad(skb)) {
- skb = NULL;
+ if (eth_skb_pad(skb))
continue;
- }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1262,18 +1293,34 @@ construct_skb:
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
ice_receive_skb(rx_ring, skb, vlan_tag);
- skb = NULL;
/* update budget accounting */
total_rx_pkts++;
}
+ first = rx_ring->first_desc;
+ while (cached_ntc != first) {
+ struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
+
+ if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ xdp_xmit |= buf->act;
+ } else if (buf->act & ICE_XDP_CONSUMED) {
+ buf->pagecnt_bias++;
+ } else if (buf->act == ICE_XDP_PASS) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ }
+
+ ice_put_rx_buf(rx_ring, buf);
+ if (++cached_ntc >= cnt)
+ cached_ntc = 0;
+ }
+ rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
- failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
+ failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
- if (xdp_prog)
- ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
- rx_ring->skb = skb;
+ if (xdp_xmit)
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
if (rx_ring->ring_stats)
ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
@@ -1682,6 +1729,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
DMA_TO_DEVICE);
tx_buf = &tx_ring->tx_buf[i];
+ tx_buf->type = ICE_TX_BUF_FRAG;
}
/* record SW timestamp if HW timestamp is not available */
@@ -1996,7 +2044,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (err < 0)
return err;
- /* cppcheck-suppress unreadVariable */
protocol = vlan_get_protocol(skb);
if (eth_p_mpls(protocol))
@@ -2033,8 +2080,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
}
/* reset pointers to inner headers */
-
- /* cppcheck-suppress unreadVariable */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
@@ -2300,6 +2345,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ice_trace(xmit_frame_ring, tx_ring, skb);
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto out_drop;
+
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
@@ -2328,6 +2376,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buf[tx_ring->next_to_use];
first->skb = skb;
+ first->type = ICE_TX_BUF_SKB;
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
first->gso_segs = 1;
first->tx_flags = 0;
@@ -2500,11 +2549,11 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ if (tx_buf->type == ICE_TX_BUF_DUMMY)
devm_kfree(tx_ring->dev, tx_buf->raw_buf);
/* clear next_to_watch to prevent false hangs */
- tx_buf->raw_buf = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
tx_buf->tx_flags = 0;
tx_buf->next_to_watch = NULL;
dma_unmap_len_set(tx_buf, len, 0);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 4fd0e5d0a313..fff0efe28373 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -9,10 +9,12 @@
#define ICE_DFLT_IRQ_WORK 256
#define ICE_RXBUF_3072 3072
#define ICE_RXBUF_2048 2048
+#define ICE_RXBUF_1664 1664
#define ICE_RXBUF_1536 1536
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
+#define ICE_MAX_FRAME_LEGACY_RX 8320
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -110,15 +112,16 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
+#define ICE_RX_DESC_UNUSED(R) \
+ ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->first_desc - (R)->next_to_use - 1)
+
#define ICE_RING_QUARTER(R) ((R)->count >> 2)
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
-/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
- * freed instead of returned like skb packets.
- */
-#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
+/* Free, was ICE_TX_FLAGS_DUMMY_PKT */
#define ICE_TX_FLAGS_TSYN BIT(4)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
@@ -134,6 +137,7 @@ static inline int ice_skb_pad(void)
#define ICE_XDP_TX BIT(1)
#define ICE_XDP_REDIR BIT(2)
#define ICE_XDP_EXIT BIT(3)
+#define ICE_SKB_CONSUMED ICE_XDP_CONSUMED
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -142,15 +146,44 @@ static inline int ice_skb_pad(void)
#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
+/**
+ * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion
+ * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required
+ * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
+ * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
+ * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
+ * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
+ * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
+ * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
+ */
+enum ice_tx_buf_type {
+ ICE_TX_BUF_EMPTY = 0U,
+ ICE_TX_BUF_DUMMY,
+ ICE_TX_BUF_FRAG,
+ ICE_TX_BUF_SKB,
+ ICE_TX_BUF_XDP_TX,
+ ICE_TX_BUF_XDP_XMIT,
+ ICE_TX_BUF_XSK_TX,
+};
+
struct ice_tx_buf {
- struct ice_tx_desc *next_to_watch;
union {
- struct sk_buff *skb;
- void *raw_buf; /* used for XDP */
+ struct ice_tx_desc *next_to_watch;
+ u32 rs_idx;
+ };
+ union {
+ void *raw_buf; /* used for XDP_TX and FDir rules */
+ struct sk_buff *skb; /* used for .ndo_start_xmit() */
+ struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */
+ struct xdp_buff *xdp; /* used for XDP_TX ZC */
};
unsigned int bytecount;
- unsigned short gso_segs;
- u32 tx_flags;
+ union {
+ unsigned int gso_segs;
+ unsigned int nr_frags; /* used for mbuf XDP */
+ };
+ u32 type:16; /* &ice_tx_buf_type */
+ u32 tx_flags:16;
DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
};
@@ -170,7 +203,9 @@ struct ice_rx_buf {
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
- u16 pagecnt_bias;
+ unsigned int pgcnt;
+ unsigned int act;
+ unsigned int pagecnt_bias;
};
struct ice_q_stats {
@@ -273,42 +308,44 @@ struct ice_rx_ring {
struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
+ u16 q_index; /* Queue number of ring */
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 next_to_alloc;
+ /* CL2 - 2nd cacheline starts here */
union {
struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf;
};
- /* CL2 - 2nd cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
+ struct xdp_buff xdp;
/* CL3 - 3rd cacheline starts here */
- u16 q_index; /* Queue number of ring */
-
- u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
+ struct bpf_prog *xdp_prog;
+ u16 rx_offset;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u16 next_to_alloc;
- u16 rx_offset;
- u16 rx_buf_len;
+ u16 first_desc;
/* stats structs */
struct ice_ring_stats *ring_stats;
struct rcu_head rcu; /* to avoid race on free */
- /* CL4 - 3rd cacheline starts here */
+ /* CL4 - 4th cacheline starts here */
struct ice_channel *ch;
- struct bpf_prog *xdp_prog;
struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool;
- struct sk_buff *skb;
dma_addr_t dma; /* physical address of ring */
u64 cached_phctime;
+ u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
u8 flags;
+ /* CL5 - 5th cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
struct ice_tx_ring {
@@ -326,12 +363,11 @@ struct ice_tx_ring {
struct xsk_buff_pool *xsk_pool;
u16 next_to_use;
u16 next_to_clean;
- u16 next_rs;
- u16 next_dd;
u16 q_handle; /* Queue handle per TC */
u16 reg_idx; /* HW register index of the ring */
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
+ u16 xdp_tx_active;
/* stats structs */
struct ice_ring_stats *ring_stats;
/* CL3 - 3rd cacheline starts here */
@@ -342,7 +378,6 @@ struct ice_tx_ring {
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
/* CL4 - 4th cacheline starts here */
- u16 xdp_tx_active;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
@@ -431,7 +466,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
union ice_32b_rx_flex_desc;
-bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
ice_select_queue(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 25f04266c668..7bc5aa340c7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -221,128 +221,217 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
}
/**
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
+ * @dev: device for DMA mapping
+ * @tx_buf: Tx buffer to clean
+ * @bq: XDP bulk flush struct
+ */
+static void
+ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
+ struct xdp_frame_bulk *bq)
+{
+ dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ switch (tx_buf->type) {
+ case ICE_TX_BUF_XDP_TX:
+ page_frag_free(tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_XDP_XMIT:
+ xdp_return_frame_bulk(tx_buf->xdpf, bq);
+ break;
+ }
+
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+}
+
+/**
* ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
* @xdp_ring: XDP ring to clean
*/
-static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
{
- unsigned int total_bytes = 0, total_pkts = 0;
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- u16 ntc = xdp_ring->next_to_clean;
- struct ice_tx_desc *next_dd_desc;
- u16 next_dd = xdp_ring->next_dd;
- struct ice_tx_buf *tx_buf;
- int i;
+ int total_bytes = 0, total_pkts = 0;
+ struct device *dev = xdp_ring->dev;
+ u32 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u32 cnt = xdp_ring->count;
+ struct xdp_frame_bulk bq;
+ u32 frags, xdp_tx = 0;
+ u32 ready_frames = 0;
+ u32 idx;
+ u32 ret;
+
+ idx = xdp_ring->tx_buf[ntc].rs_idx;
+ tx_desc = ICE_TX_DESC(xdp_ring, idx);
+ if (tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+ if (idx >= ntc)
+ ready_frames = idx - ntc + 1;
+ else
+ ready_frames = idx + cnt - ntc + 1;
+ }
- next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
- if (!(next_dd_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- return;
+ if (unlikely(!ready_frames))
+ return 0;
+ ret = ready_frames;
+
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock(); /* xdp_return_frame_bulk() */
- for (i = 0; i < tx_thresh; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
+ while (ready_frames) {
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
+ struct ice_tx_buf *head = tx_buf;
+ /* bytecount holds size of head + frags */
total_bytes += tx_buf->bytecount;
- /* normally tx_buf->gso_segs was taken but at this point
- * it's always 1 for us
- */
+ frags = tx_buf->nr_frags;
total_pkts++;
-
- page_frag_free(tx_buf->raw_buf);
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- tx_buf->raw_buf = NULL;
+ /* count head + frags */
+ ready_frames -= frags + 1;
+ xdp_tx++;
ntc++;
- if (ntc >= xdp_ring->count)
+ if (ntc == cnt)
ntc = 0;
+
+ for (int i = 0; i < frags; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ ice_clean_xdp_tx_buf(dev, tx_buf, &bq);
+ ntc++;
+ if (ntc == cnt)
+ ntc = 0;
+ }
+
+ ice_clean_xdp_tx_buf(dev, head, &bq);
}
- next_dd_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
- if (xdp_ring->next_dd > xdp_ring->count)
- xdp_ring->next_dd = tx_thresh - 1;
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
+ tx_desc->cmd_type_offset_bsz = 0;
xdp_ring->next_to_clean = ntc;
+ xdp_ring->xdp_tx_active -= xdp_tx;
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
+
+ return ret;
}
/**
- * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
- * @data: packet data pointer
- * @size: packet data size
+ * __ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdp: XDP buffer to be placed onto Tx descriptors
* @xdp_ring: XDP ring for transmission
+ * @frame: whether this comes from .ndo_xdp_xmit()
*/
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
+ bool frame)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- u16 i = xdp_ring->next_to_use;
+ struct skb_shared_info *sinfo = NULL;
+ u32 size = xdp->data_end - xdp->data;
+ struct device *dev = xdp_ring->dev;
+ u32 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_head;
struct ice_tx_buf *tx_buf;
- dma_addr_t dma;
+ u32 cnt = xdp_ring->count;
+ void *data = xdp->data;
+ u32 nr_frags = 0;
+ u32 free_space;
+ u32 frag = 0;
+
+ free_space = ICE_DESC_UNUSED(xdp_ring);
+ if (free_space < ICE_RING_QUARTER(xdp_ring))
+ free_space += ice_clean_xdp_irq(xdp_ring);
+
+ if (unlikely(!free_space))
+ goto busy;
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ if (free_space < nr_frags + 1)
+ goto busy;
+ }
- if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
- ice_clean_xdp_irq(xdp_ring);
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_head = &xdp_ring->tx_buf[ntu];
+ tx_buf = tx_head;
- if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ICE_XDP_CONSUMED;
- }
+ for (;;) {
+ dma_addr_t dma;
- dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(xdp_ring->dev, dma))
- return ICE_XDP_CONSUMED;
+ dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_unmap;
- tx_buf = &xdp_ring->tx_buf[i];
- tx_buf->bytecount = size;
- tx_buf->gso_segs = 1;
- tx_buf->raw_buf = data;
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
- /* record length, and DMA address */
- dma_unmap_len_set(tx_buf, len, size);
- dma_unmap_addr_set(tx_buf, dma, dma);
+ if (frame) {
+ tx_buf->type = ICE_TX_BUF_FRAG;
+ } else {
+ tx_buf->type = ICE_TX_BUF_XDP_TX;
+ tx_buf->raw_buf = data;
+ }
- tx_desc = ICE_TX_DESC(xdp_ring, i);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
- size, 0);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
- xdp_ring->xdp_tx_active++;
- i++;
- if (i == xdp_ring->count) {
- i = 0;
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = tx_thresh - 1;
+ ntu++;
+ if (ntu == cnt)
+ ntu = 0;
+
+ if (frag == nr_frags)
+ break;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_buf = &xdp_ring->tx_buf[ntu];
+
+ data = skb_frag_address(&sinfo->frags[frag]);
+ size = skb_frag_size(&sinfo->frags[frag]);
+ frag++;
}
- xdp_ring->next_to_use = i;
- if (i > xdp_ring->next_rs) {
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
+ /* store info about bytecount and frag count in first desc */
+ tx_head->bytecount = xdp_get_buff_len(xdp);
+ tx_head->nr_frags = nr_frags;
+
+ if (frame) {
+ tx_head->type = ICE_TX_BUF_XDP_XMIT;
+ tx_head->xdpf = xdp->data_hard_start;
}
+ /* update last descriptor from a frame with EOP */
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
+
+ xdp_ring->xdp_tx_active++;
+ xdp_ring->next_to_use = ntu;
+
return ICE_XDP_TX;
-}
-/**
- * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
- * @xdp: XDP buffer
- * @xdp_ring: XDP Tx ring
- *
- * Returns negative on failure, 0 on success.
- */
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
-{
- struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+dma_unmap:
+ for (;;) {
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ if (tx_buf == tx_head)
+ break;
+
+ if (!ntu)
+ ntu += cnt;
+ ntu--;
+ }
+ return ICE_XDP_CONSUMED;
- if (unlikely(!xdpf))
- return ICE_XDP_CONSUMED;
+busy:
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ return ICE_XDP_CONSUMED;
}
/**
@@ -354,14 +443,21 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
* should be called when a batch of packets has been processed in the
* napi loop.
*/
-void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
+ u32 first_idx)
{
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
+
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
+ /* store index of descriptor with RS bit set in the first
+ * ice_tx_buf of given NAPI batch
+ */
+ tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index c7d2954dc9ea..115969ecdf7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,6 +6,36 @@
#include "ice.h"
/**
+ * ice_set_rx_bufs_act - propagate Rx buffer action to frags
+ * @xdp: XDP buffer representing frame (linear and frags part)
+ * @rx_ring: Rx ring struct
+ * act: action to store onto Rx buffers related to XDP buffer parts
+ *
+ * Set action that should be taken before putting Rx buffer from first frag
+ * to one before last. Last one is handled by caller of this function as it
+ * is the EOP frag that is currently being processed. This function is
+ * supposed to be called only when XDP buffer contains frags.
+ */
+static inline void
+ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
+ const unsigned int act)
+{
+ const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 first = rx_ring->first_desc;
+ u32 nr_frags = sinfo->nr_frags;
+ u32 cnt = rx_ring->count;
+ struct ice_rx_buf *buf;
+
+ for (int i = 0; i < nr_frags; i++) {
+ buf = &rx_ring->rx_buf[first];
+ buf->act = act;
+
+ if (++first == cnt)
+ first = 0;
+ }
+}
+
+/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
@@ -21,6 +51,28 @@ ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
return !!(status_err_n & cpu_to_le16(stat_err_bits));
}
+/**
+ * ice_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
+ */
+static inline bool
+ice_is_non_eop(const struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ /* if we are the last buffer then there is nothing else to do */
+#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
+ if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
+ return false;
+
+ rx_ring->ring_stats->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
static inline __le64
ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
{
@@ -70,9 +122,28 @@ static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}
-void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res);
+/**
+ * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ *
+ * returns index of descriptor that had RS bit produced on
+ */
+static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
+{
+ u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct ice_tx_desc *tx_desc;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+
+ return rs_idx;
+}
+
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring);
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
+ bool frame);
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 375eb6493f0f..0e57bd1b85fd 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -237,16 +237,49 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
*/
static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
{
+ /* Close any IRQ mapping now */
+ if (vf->vf_ops->irq_close)
+ vf->vf_ops->irq_close(vf);
+
ice_vf_clear_counters(vf);
vf->vf_ops->clear_reset_trigger(vf);
}
/**
+ * ice_vf_recreate_vsi - Release and re-create the VF's VSI
+ * @vf: VF to recreate the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
+ * VF configuration change, etc)
+ *
+ * It releases and then re-creates a new VSI.
+ */
+static int ice_vf_recreate_vsi(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int err;
+
+ ice_vf_vsi_release(vf);
+
+ err = vf->vf_ops->create_vsi(vf);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to recreate the VF%u's VSI, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_rebuild_vsi - rebuild the VF's VSI
* @vf: VF to rebuild the VSI for
*
* This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
* host, PFR, CORER, etc.).
+ *
+ * It reprograms the VSI configuration back into hardware.
*/
static int ice_vf_rebuild_vsi(struct ice_vf *vf)
{
@@ -256,7 +289,7 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
if (WARN_ON(!vsi))
return -EINVAL;
- if (ice_vsi_rebuild(vsi, true)) {
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
vf->vf_id);
return -EIO;
@@ -271,6 +304,21 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
}
/**
+ * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
+ * @vf: the VF being reset
+ *
+ * Perform reset tasks which must occur after the VSI has been re-created or
+ * rebuilt during a VF reset.
+ */
+static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_rebuild_host_cfg(vf);
+ ice_vf_set_initialized(vf);
+
+ vf->vf_ops->post_vsi_rebuild(vf);
+}
+
+/**
* ice_is_any_vf_in_unicast_promisc - check if any VF(s)
* are in unicast promiscuous mode
* @pf: PF structure for accessing VF(s)
@@ -495,7 +543,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi(vf);
- vf->vf_ops->post_vsi_rebuild(vf);
+ ice_vf_post_vsi_rebuild(vf);
mutex_unlock(&vf->cfg_lock);
}
@@ -639,14 +687,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_vf_pre_vsi_rebuild(vf);
- if (vf->vf_ops->vsi_rebuild(vf)) {
+ if (ice_vf_recreate_vsi(vf)) {
dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
vf->vf_id);
err = -EFAULT;
goto out_unlock;
}
- vf->vf_ops->post_vsi_rebuild(vf);
+ ice_vf_post_vsi_rebuild(vf);
vsi = ice_get_vf_vsi(vf);
if (WARN_ON(!vsi)) {
err = -EINVAL;
@@ -673,7 +721,7 @@ out_unlock:
* ice_set_vf_state_qs_dis - Set VF queues state to disabled
* @vf: pointer to the VF structure
*/
-void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
/* Clear Rx/Tx enabled queues flag */
bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
@@ -681,9 +729,45 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
}
+/**
+ * ice_set_vf_state_dis - Set VF state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_dis(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ vf->vf_ops->clear_reset_state(vf);
+}
+
/* Private functions only accessed from other virtualization files */
/**
+ * ice_initialize_vf_entry - Initialize a VF entry
+ * @vf: pointer to the VF structure
+ */
+void ice_initialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vfs *vfs;
+
+ vfs = &pf->vfs;
+
+ /* assign default capabilities */
+ vf->spoofchk = true;
+ vf->num_vf_qs = vfs->num_qps_per;
+ ice_vc_set_default_allowlist(vf);
+ ice_virtchnl_set_dflt_ops(vf);
+
+ /* ctrl_vsi_idx will be set to a valid value only when iAVF
+ * creates its first fdir rule.
+ */
+ ice_vf_ctrl_invalidate_vsi(vf);
+ ice_vf_fdir_init(vf);
+
+ mutex_init(&vf->cfg_lock);
+}
+
+/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
*/
@@ -924,18 +1008,18 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
vf->num_mac++;
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
- status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
if (status) {
dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
- &vf->hw_lan_addr.addr[0], vf->vf_id,
+ &vf->hw_lan_addr[0], vf->vf_id,
status);
return status;
}
vf->num_mac++;
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
}
return 0;
@@ -1115,11 +1199,16 @@ void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
*/
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
+ params.type = ICE_VSI_CTRL;
+ params.pi = ice_vf_get_port_info(vf);
+ params.vf = vf;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ vsi = ice_vsi_setup(pf, &params);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
ice_vf_ctrl_invalidate_vsi(vf);
@@ -1129,6 +1218,60 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
}
/**
+ * ice_vf_init_host_cfg - Initialize host admin configuration
+ * @vf: VF to initialize
+ * @vsi: the VSI created at initialization
+ *
+ * Initialize the VF host configuration. Called during VF creation to setup
+ * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
+ * should only be called during VF creation.
+ */
+int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ eth_broadcast_addr(broadcast);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ vf->num_mac = 1;
+
+ err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
+ if (err) {
+ dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
* @vf: VF to remove access to VSI for
*/
@@ -1139,6 +1282,24 @@ void ice_vf_invalidate_vsi(struct ice_vf *vf)
}
/**
+ * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
+ * @vf: pointer to the VF structure
+ *
+ * Release the VF associated with this VSI and then invalidate the VSI
+ * indexes.
+ */
+void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vsi_release(vsi);
+ ice_vf_invalidate_vsi(vf);
+}
+
+/**
* ice_vf_set_initialized - VF is ready for VIRTCHNL communication
* @vf: VF to set in initialized state
*
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 52bd9a3816bf..ef30f05b5d02 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -56,11 +56,13 @@ struct ice_mdd_vf_events {
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
void (*free)(struct ice_vf *vf);
+ void (*clear_reset_state)(struct ice_vf *vf);
void (*clear_mbx_register)(struct ice_vf *vf);
void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
bool (*poll_reset_status)(struct ice_vf *vf);
void (*clear_reset_trigger)(struct ice_vf *vf);
- int (*vsi_rebuild)(struct ice_vf *vf);
+ void (*irq_close)(struct ice_vf *vf);
+ int (*create_vsi)(struct ice_vf *vf);
void (*post_vsi_rebuild)(struct ice_vf *vf);
};
@@ -96,8 +98,8 @@ struct ice_vf {
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
- struct virtchnl_ether_addr dev_lan_addr;
- struct virtchnl_ether_addr hw_lan_addr;
+ u8 dev_lan_addr[ETH_ALEN];
+ u8 hw_lan_addr[ETH_ALEN];
struct ice_time_mac legacy_last_added_umac;
DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
@@ -213,7 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
-void ice_set_vf_state_qs_dis(struct ice_vf *vf);
+void ice_set_vf_state_dis(struct ice_vf *vf);
bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
void
ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
@@ -259,7 +261,7 @@ static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
return -EOPNOTSUPP;
}
-static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+static inline void ice_set_vf_state_dis(struct ice_vf *vf)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 9c8ef2b01f0f..6f3293b793b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -23,6 +23,7 @@
#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
#endif
+void ice_initialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
@@ -35,7 +36,9 @@ void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi);
void ice_vf_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_vsi_release(struct ice_vf *vf);
void ice_vf_set_initialized(struct ice_vf *vf);
#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index dab3cd5d300e..e24e3f5017ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -507,7 +507,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
- vf->hw_lan_addr.addr);
+ vf->hw_lan_addr);
/* match guest capabilities */
vf->driver_caps = vfres->vf_cap_flags;
@@ -1802,10 +1802,10 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
* was correctly specified over VIRTCHNL
*/
if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
- is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
+ is_zero_ether_addr(vf->hw_lan_addr)) ||
ice_is_vc_addr_primary(vc_ether_addr)) {
- ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
- ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
+ ether_addr_copy(vf->dev_lan_addr, mac_addr);
+ ether_addr_copy(vf->hw_lan_addr, mac_addr);
}
/* hardware and device MACs are already set, but its possible that the
@@ -1836,7 +1836,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
int ret;
/* device MAC already added */
- if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
+ if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
return 0;
if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
@@ -1891,8 +1891,8 @@ ice_update_legacy_cached_mac(struct ice_vf *vf,
ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
return;
- ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
- ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr);
}
/**
@@ -1906,15 +1906,15 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
u8 *mac_addr = vc_ether_addr->addr;
if (!is_valid_ether_addr(mac_addr) ||
- !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ !ether_addr_equal(vf->dev_lan_addr, mac_addr))
return;
/* allow the device MAC to be repopulated in the add flow and don't
- * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
+ * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant
* to be persistent on VM reboot and across driver unload/load, which
* won't work if we clear the hardware MAC here
*/
- eth_zero_addr(vf->dev_lan_addr.addr);
+ eth_zero_addr(vf->dev_lan_addr);
ice_update_legacy_cached_mac(vf, vc_ether_addr);
}
@@ -1934,7 +1934,7 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
int status;
if (!ice_can_vf_change_mac(vf) &&
- ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ ether_addr_equal(vf->dev_lan_addr, mac_addr))
return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
@@ -3733,7 +3733,7 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
int result;
if (!is_unicast_ether_addr(mac_addr) ||
- ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
+ ether_addr_equal(mac_addr, vf->hw_lan_addr))
continue;
if (vf->pf_set_mac) {
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index c6a58343d81d..e6ef6b303222 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -113,7 +113,7 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
- if (!pf->vsi[vf->lan_vsi_idx])
+ if (!ice_get_vf_vsi(vf))
return -EINVAL;
return 0;
@@ -494,7 +494,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
vf_prof = fdir->fdir_prof[flow];
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
return;
@@ -572,7 +572,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi)
return -EINVAL;
@@ -1205,7 +1205,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 374b7f10b549..31565bbafa22 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -598,6 +598,112 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
/**
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
+ * @xdp_ring: XDP Tx ring
+ */
+static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+{
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u16 cnt = xdp_ring->count;
+ struct ice_tx_buf *tx_buf;
+ u16 completed_frames = 0;
+ u16 xsk_frames = 0;
+ u16 last_rs;
+ int i;
+
+ last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
+ tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
+ if (tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+ if (last_rs >= ntc)
+ completed_frames = last_rs - ntc + 1;
+ else
+ completed_frames = last_rs + cnt - ntc + 1;
+ }
+
+ if (!completed_frames)
+ return;
+
+ if (likely(!xdp_ring->xdp_tx_active)) {
+ xsk_frames = completed_frames;
+ goto skip;
+ }
+
+ ntc = xdp_ring->next_to_clean;
+ for (i = 0; i < completed_frames; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+ xsk_buff_free(tx_buf->xdp);
+ xdp_ring->xdp_tx_active--;
+ } else {
+ xsk_frames++;
+ }
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+skip:
+ tx_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_to_clean += completed_frames;
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+}
+
+/**
+ * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
+ * @xdp: XDP buffer to xmit
+ * @xdp_ring: XDP ring to produce descriptor onto
+ *
+ * note that this function works directly on xdp_buff, no need to convert
+ * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
+ * side will be able to xsk_buff_free() it.
+ *
+ * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
+ * was not enough space on XDP ring
+ */
+static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
+ struct ice_tx_ring *xdp_ring)
+{
+ u32 size = xdp->data_end - xdp->data;
+ u32 ntu = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+
+ if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) {
+ ice_clean_xdp_irq_zc(xdp_ring);
+ if (!ICE_DESC_UNUSED(xdp_ring)) {
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
+ return ICE_XDP_CONSUMED;
+ }
+ }
+
+ dma = xsk_buff_xdp_get_dma(xdp);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ tx_buf->xdp = xdp;
+ tx_buf->type = ICE_TX_BUF_XSK_TX;
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, size, 0);
+ xdp_ring->xdp_tx_active++;
+
+ if (++ntu == xdp_ring->count)
+ ntu = 0;
+ xdp_ring->next_to_use = ntu;
+
+ return ICE_XDP_TX;
+}
+
+/**
* ice_run_xdp_zc - Executes an XDP program in zero-copy path
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
@@ -630,7 +736,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
break;
case XDP_TX:
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
@@ -760,7 +866,7 @@ construct_skb:
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
- ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
@@ -776,78 +882,6 @@ construct_skb:
}
/**
- * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
- * @xdp_ring: XDP Tx ring
- * @tx_buf: Tx buffer to clean
- */
-static void
-ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
-{
- page_frag_free(tx_buf->raw_buf);
- xdp_ring->xdp_tx_active--;
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
-}
-
-/**
- * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
- * @xdp_ring: XDP Tx ring
- */
-static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
-{
- u16 ntc = xdp_ring->next_to_clean;
- struct ice_tx_desc *tx_desc;
- u16 cnt = xdp_ring->count;
- struct ice_tx_buf *tx_buf;
- u16 completed_frames = 0;
- u16 xsk_frames = 0;
- u16 last_rs;
- int i;
-
- last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
- tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
- if ((tx_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
- if (last_rs >= ntc)
- completed_frames = last_rs - ntc + 1;
- else
- completed_frames = last_rs + cnt - ntc + 1;
- }
-
- if (!completed_frames)
- return;
-
- if (likely(!xdp_ring->xdp_tx_active)) {
- xsk_frames = completed_frames;
- goto skip;
- }
-
- ntc = xdp_ring->next_to_clean;
- for (i = 0; i < completed_frames; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
-
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
-
- ntc++;
- if (ntc >= xdp_ring->count)
- ntc = 0;
- }
-skip:
- tx_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_to_clean += completed_frames;
- if (xdp_ring->next_to_clean >= cnt)
- xdp_ring->next_to_clean -= cnt;
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
-}
-
-/**
* ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
* @xdp_ring: XDP ring to produce the HW Tx descriptor on
* @desc: AF_XDP descriptor to pull the DMA address and length from
@@ -921,20 +955,6 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d
}
/**
- * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
- * @xdp_ring: XDP ring to produce the HW Tx descriptors on
- */
-static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
-{
- u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
- struct ice_tx_desc *tx_desc;
-
- tx_desc = ICE_TX_DESC(xdp_ring, ntu);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
-}
-
-/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
*
@@ -1068,12 +1088,12 @@ void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
while (ntc != ntu) {
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
- if (tx_buf->raw_buf)
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- else
+ if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+ xsk_buff_free(tx_buf->xdp);
+ } else {
xsk_frames++;
-
- tx_buf->raw_buf = NULL;
+ }
ntc++;
if (ntc >= xdp_ring->count)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b5b443883da9..03bc1e8af575 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2835,6 +2835,22 @@ static int igb_offload_txtime(struct igb_adapter *adapter,
return 0;
}
+static int igb_tc_query_caps(struct igb_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->broken_mqprio = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static LIST_HEAD(igb_block_cb_list);
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -2843,6 +2859,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
struct igb_adapter *adapter = netdev_priv(dev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return igb_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_CBS:
return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK:
@@ -2896,8 +2914,14 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
bpf_prog_put(old_prog);
/* bpf is just replaced, RXQ and MTU are already setup */
- if (!need_reset)
+ if (!need_reset) {
return 0;
+ } else {
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+ }
if (running)
igb_open(dev);
@@ -3210,8 +3234,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_pci_reg;
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -3333,6 +3355,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -3648,7 +3671,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -3859,8 +3881,6 @@ static void igb_remove(struct pci_dev *pdev)
kfree(adapter->shadow_vfta);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index a15927e77272..a1d815af507d 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -396,6 +396,35 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw)
rd32(IGC_MPC);
}
+bool igc_is_device_id_i225(struct igc_hw *hw)
+{
+ switch (hw->device_id) {
+ case IGC_DEV_ID_I225_LM:
+ case IGC_DEV_ID_I225_V:
+ case IGC_DEV_ID_I225_I:
+ case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_K2:
+ case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I225_IT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool igc_is_device_id_i226(struct igc_hw *hw)
+{
+ switch (hw->device_id) {
+ case IGC_DEV_ID_I226_LM:
+ case IGC_DEV_ID_I226_V:
+ case IGC_DEV_ID_I226_K:
+ case IGC_DEV_ID_I226_IT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static struct igc_mac_operations igc_mac_ops_base = {
.init_hw = igc_init_hw_base,
.check_for_link = igc_check_for_copper_link,
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index ce530f5fd7bd..7a992befca24 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -7,6 +7,8 @@
/* forward declaration */
void igc_rx_fifo_flush_base(struct igc_hw *hw);
void igc_power_down_phy_copper_base(struct igc_hw *hw);
+bool igc_is_device_id_i225(struct igc_hw *hw);
+bool igc_is_device_id_i226(struct igc_hw *hw);
/* Transmit Descriptor - Advanced */
union igc_adv_tx_desc {
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index e9747ec5ac0b..9dec3563ce3a 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -524,6 +524,7 @@
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
+#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 1dd2a7fee8d4..2928a6c73692 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -5978,6 +5978,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
const struct tc_taprio_qopt_offload *qopt)
{
int queue_uses[IGC_MAX_TX_QUEUES] = { };
+ struct igc_hw *hw = &adapter->hw;
struct timespec64 now;
size_t n;
@@ -5990,8 +5991,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
* in the future, it will hold all the packets until that
* time, causing a lot of TX Hangs, so to avoid that, we
* reject schedules that would start in the future.
+ * Note: Limitation above is no longer in i226.
*/
- if (!is_base_time_past(qopt->base_time, &now))
+ if (!is_base_time_past(qopt->base_time, &now) &&
+ igc_is_device_id_i225(hw))
return false;
for (n = 0; n < qopt->num_entries; n++) {
@@ -6061,6 +6064,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
bool queue_configured[IGC_MAX_TX_QUEUES] = { };
+ struct igc_hw *hw = &adapter->hw;
u32 start_time = 0, end_time = 0;
size_t n;
int i;
@@ -6073,7 +6077,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (qopt->base_time < 0)
return -ERANGE;
- if (adapter->base_time)
+ if (igc_is_device_id_i225(hw) && adapter->base_time)
return -EALREADY;
if (!validate_schedule(adapter, qopt))
@@ -6221,12 +6225,35 @@ static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
return igc_tsn_offload_apply(adapter);
}
+static int igc_tc_query_caps(struct igc_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->broken_mqprio = true;
+
+ if (hw->mac.type == igc_i225)
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return igc_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_TAPRIO:
return igc_tsn_enable_qbv_scheduling(adapter, type_data);
@@ -6451,8 +6478,6 @@ static int igc_probe(struct pci_dev *pdev,
if (err)
goto err_pci_reg;
- pci_enable_pcie_error_reporting(pdev);
-
err = pci_enable_ptm(pdev, NULL);
if (err < 0)
dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
@@ -6550,6 +6575,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= netdev->vlan_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -6657,7 +6685,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -6705,8 +6732,6 @@ static void igc_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index bb10d7b65232..a386c8d61dbf 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
+#include "igc_hw.h"
#include "igc_tsn.h"
static bool is_any_launchtime(struct igc_adapter *adapter)
@@ -92,7 +93,8 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
- IGC_TQAVCTRL_ENHANCED_QAV);
+ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -117,20 +119,10 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
ktime_t base_time, systim;
int i;
- cycle = adapter->cycle_time;
- base_time = adapter->base_time;
-
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
- tqavctrl = rd32(IGC_TQAVCTRL);
- tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
- wr32(IGC_TQAVCTRL, tqavctrl);
-
- wr32(IGC_QBVCYCLET_S, cycle);
- wr32(IGC_QBVCYCLET, cycle);
-
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
@@ -233,21 +225,46 @@ skip_cbs:
wr32(IGC_TXQCTL(i), txqctl);
}
+ tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
+ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+
+ cycle = adapter->cycle_time;
+ base_time = adapter->base_time;
+
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
systim = ktime_set(sec, nsec);
-
if (ktime_compare(systim, base_time) > 0) {
- s64 n;
+ s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
- n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
+ } else {
+ /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit
+ * has to be configured before the cycle time and base time.
+ * Tx won't hang if there is a GCL is already running,
+ * so in this case we don't need to set FutScdDis.
+ */
+ if (igc_is_device_id_i226(hw) &&
+ !(rd32(IGC_BASET_H) || rd32(IGC_BASET_L)))
+ tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS;
}
- baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ wr32(IGC_QBVCYCLET_S, cycle);
+ wr32(IGC_QBVCYCLET, cycle);
+ baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
wr32(IGC_BASET_H, baset_h);
+
+ /* In i226, Future base time is only supported when FutScdDis bit
+ * is enabled and only active for re-configuration.
+ * In this case, initialize the base time with zero to create
+ * "re-configuration" scenario then only set the desired base time.
+ */
+ if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS)
+ wr32(IGC_BASET_L, 0);
wr32(IGC_BASET_L, baset_l);
return 0;
@@ -274,17 +291,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)
int igc_tsn_offload_apply(struct igc_adapter *adapter)
{
- int err;
+ struct igc_hw *hw = &adapter->hw;
- if (netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev) && igc_is_device_id_i225(hw)) {
schedule_work(&adapter->reset_task);
return 0;
}
- err = igc_tsn_enable_offload(adapter);
- if (err < 0)
- return err;
+ igc_tsn_reset(adapter);
- adapter->flags = igc_tsn_new_flags(adapter);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index aeeb34e64610..e27af72aada8 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -29,6 +29,11 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
if (old_prog)
bpf_prog_put(old_prog);
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+
if (if_running)
igc_open(dev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 38c4609bd429..878dd8dff528 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3292,13 +3292,14 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
+ bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw);
u32 links_reg, links_orig;
u32 i;
/* If Crosstalk fix enabled do the sanity check of making sure
* the SFP+ cage is full.
*/
- if (ixgbe_need_crosstalk_fix(hw)) {
+ if (crosstalk_fix_active) {
u32 sfp_cage_full;
switch (hw->mac.type) {
@@ -3346,10 +3347,24 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
}
} else {
- if (links_reg & IXGBE_LINKS_UP)
+ if (links_reg & IXGBE_LINKS_UP) {
+ if (crosstalk_fix_active) {
+ /* Check the link state again after a delay
+ * to filter out spurious link up
+ * notifications.
+ */
+ mdelay(5);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (!(links_reg & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ return 0;
+ }
+ }
*link_up = true;
- else
+ } else {
*link_up = false;
+ }
}
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 53a969e34883..13a6fca31004 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -557,8 +557,10 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
/**
* ixgbe_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
**/
-static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
+static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = xs->xso.real_dev;
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -570,23 +572,22 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
int i;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
- netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
- xs->id.proto);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for ipsec offload");
return -EINVAL;
}
if (xs->props.mode != XFRM_MODE_TRANSPORT) {
- netdev_err(dev, "Unsupported mode for ipsec offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload");
return -EINVAL;
}
if (ixgbe_ipsec_check_mgmt_ip(xs)) {
- netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
+ NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters");
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- netdev_err(dev, "Unsupported ipsec offload type\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");
return -EINVAL;
}
@@ -594,14 +595,14 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
struct rx_sa rsa;
if (xs->calg) {
- netdev_err(dev, "Compression offload not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");
return -EINVAL;
}
/* find the first unused index */
ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Rx table!\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");
return ret;
}
sa_idx = (u16)ret;
@@ -616,7 +617,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
/* get the key and salt */
ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
}
@@ -676,7 +677,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else {
/* no match and no empty slot */
- netdev_err(dev, "No space for SA in Rx IP SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx IP SA table");
memset(&rsa, 0, sizeof(rsa));
return -ENOSPC;
}
@@ -711,7 +712,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
/* find the first unused index */
ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Tx table\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table");
return ret;
}
sa_idx = (u16)ret;
@@ -725,7 +726,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
return ret;
}
@@ -950,7 +951,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
/* set up the HW offload */
- err = ixgbe_ipsec_add_sa(xs);
+ err = ixgbe_ipsec_add_sa(xs, NULL);
if (err)
goto err_aead;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 4507fba8747a..773c35fecace 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6647,7 +6647,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
goto err;
- rx_ring->xdp_prog = adapter->xdp_prog;
+ WRITE_ONCE(rx_ring->xdp_prog, adapter->xdp_prog);
return 0;
err:
@@ -8943,7 +8943,8 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
int regnum = addr;
if (devad != MDIO_DEVAD_NONE)
- regnum |= (devad << 16) | MII_ADDR_C45;
+ return mdiobus_c45_read(adapter->mii_bus, prtad,
+ devad, regnum);
return mdiobus_read(adapter->mii_bus, prtad, regnum);
}
@@ -8966,7 +8967,8 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
int regnum = addr;
if (devad != MDIO_DEVAD_NONE)
- regnum |= (devad << 16) | MII_ADDR_C45;
+ return mdiobus_c45_write(adapter->mii_bus, prtad, devad,
+ regnum, value);
return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
}
@@ -10303,14 +10305,15 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
synchronize_rcu();
err = ixgbe_setup_tc(dev, adapter->hw_tcs);
- if (err) {
- rcu_assign_pointer(adapter->xdp_prog, old_prog);
+ if (err)
return -EINVAL;
- }
+ if (!prog)
+ xdp_features_clear_redirect_target(dev);
} else {
- for (i = 0; i < adapter->num_rx_queues; i++)
- (void)xchg(&adapter->rx_ring[i]->xdp_prog,
- adapter->xdp_prog);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ WRITE_ONCE(adapter->rx_ring[i]->xdp_prog,
+ adapter->xdp_prog);
+ }
}
if (old_prog)
@@ -10326,6 +10329,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
+ xdp_features_set_redirect_target(dev, true);
}
return 0;
@@ -10814,8 +10818,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -11023,6 +11025,9 @@ skip_sriov:
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
/* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
@@ -11243,7 +11248,6 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -11332,8 +11336,6 @@ static void ixgbe_remove(struct pci_dev *pdev)
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 123dca9ce468..689470c1e8ad 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -680,14 +680,14 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
}
/**
- * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags
+ * ixgbe_mii_bus_read_generic_c22 - Read a clause 22 register with gssr flags
* @hw: pointer to hardware structure
* @addr: address
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
- int regnum, u32 gssr)
+static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
+ int regnum, u32 gssr)
{
u32 hwaddr, cmd;
s32 data;
@@ -696,31 +696,52 @@ static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
return -EBUSY;
hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
- if (regnum & MII_ADDR_C45) {
- hwaddr |= regnum & GENMASK(21, 0);
- cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
- } else {
- hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
- cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
- IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
- }
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
+ IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
data = ixgbe_msca_cmd(hw, cmd);
if (data < 0)
goto mii_bus_read_done;
- /* For a clause 45 access the address cycle just completed, we still
- * need to do the read command, otherwise just get the data
- */
- if (!(regnum & MII_ADDR_C45))
- goto do_mii_bus_read;
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
+
+mii_bus_read_done:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return data;
+}
+
+/**
+ * ixgbe_mii_bus_read_generic_c45 - Read a clause 45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
+ int devad, int regnum, u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 data;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ hwaddr |= devad << 16 | regnum;
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
+
+ data = ixgbe_msca_cmd(hw, cmd);
+ if (data < 0)
+ goto mii_bus_read_done;
cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;
data = ixgbe_msca_cmd(hw, cmd);
if (data < 0)
goto mii_bus_read_done;
-do_mii_bus_read:
data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
@@ -730,15 +751,15 @@ mii_bus_read_done:
}
/**
- * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags
+ * ixgbe_mii_bus_write_generic_c22 - Write a clause 22 register with gssr flags
* @hw: pointer to hardware structure
* @addr: address
* @regnum: register number
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
- int regnum, u16 val, u32 gssr)
+static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
+ int regnum, u16 val, u32 gssr)
{
u32 hwaddr, cmd;
s32 err;
@@ -749,20 +770,43 @@ static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
- if (regnum & MII_ADDR_C45) {
- hwaddr |= regnum & GENMASK(21, 0);
- cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
- } else {
- hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
- cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
- IXGBE_MSCA_MDI_COMMAND;
- }
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ err = ixgbe_msca_cmd(hw, cmd);
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return err;
+}
+
+/**
+ * ixgbe_mii_bus_write_generic_c45 - Write a clause 45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ * @val: value to write
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
+ int devad, int regnum, u16 val,
+ u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 err;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ hwaddr |= devad << 16 | regnum;
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
- /* For clause 45 this is an address cycle, for clause 22 this is the
- * entire transaction
- */
err = ixgbe_msca_cmd(hw, cmd);
- if (err < 0 || !(regnum & MII_ADDR_C45))
+ if (err < 0)
goto mii_bus_write_done;
cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND;
@@ -774,70 +818,144 @@ mii_bus_write_done:
}
/**
- * ixgbe_mii_bus_read - Read a clause 22/45 register
+ * ixgbe_mii_bus_read_c22 - Read a clause 22 register
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @regnum: register number
+ **/
+static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr);
+}
+
+/**
+ * ixgbe_mii_bus_read_c45 - Read a clause 45 register
* @bus: pointer to mii_bus structure which points to our driver private
+ * @devad: device address to read
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
+static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
+ int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr);
+}
+
+/**
+ * ixgbe_mii_bus_write_c22 - Write a clause 22 register
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
- return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+ return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr);
}
/**
- * ixgbe_mii_bus_write - Write a clause 22/45 register
+ * ixgbe_mii_bus_write_c45 - Write a clause 45 register
* @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
+ * @devad: device address to read
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
+static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
+ int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
- return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+ return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val,
+ gssr);
}
/**
- * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a
+ * ixgbe_x550em_a_mii_bus_read_c22 - Read a clause 22 register on x550em_a
* @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
- int regnum)
+static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
+ int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_read_c45 - Read a clause 45 register on x550em_a
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ **/
+static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
+ int devad, int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_write_c22 - Write a clause 22 register on x550em_a
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
+ int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
- return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+ return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr);
}
/**
- * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a
+ * ixgbe_x550em_a_mii_bus_write_c45 - Write a clause 45 register on x550em_a
* @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
+ * @devad: device address to read
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr,
- int regnum, u16 val)
+static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
+ int devad, int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
- return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+ return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val,
+ gssr);
}
/**
@@ -909,8 +1027,11 @@ out:
**/
s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
{
- s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
- s32 (*read)(struct mii_bus *bus, int addr, int regnum);
+ s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum);
+ s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
+ u16 val);
+ s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
@@ -929,12 +1050,16 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
if (!ixgbe_x550em_a_has_mii(hw))
return 0;
- read = &ixgbe_x550em_a_mii_bus_read;
- write = &ixgbe_x550em_a_mii_bus_write;
+ read_c22 = ixgbe_x550em_a_mii_bus_read_c22;
+ write_c22 = ixgbe_x550em_a_mii_bus_write_c22;
+ read_c45 = ixgbe_x550em_a_mii_bus_read_c45;
+ write_c45 = ixgbe_x550em_a_mii_bus_write_c45;
break;
default:
- read = &ixgbe_mii_bus_read;
- write = &ixgbe_mii_bus_write;
+ read_c22 = ixgbe_mii_bus_read_c22;
+ write_c22 = ixgbe_mii_bus_write_c22;
+ read_c45 = ixgbe_mii_bus_read_c45;
+ write_c45 = ixgbe_mii_bus_write_c45;
break;
}
@@ -942,8 +1067,10 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
if (!bus)
return -ENOMEM;
- bus->read = read;
- bus->write = write;
+ bus->read = read_c22;
+ bus->write = write_c22;
+ bus->read_c45 = read_c45;
+ bus->write_c45 = write_c45;
/* Use the position of the device in the PCI hierarchy as the id */
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index c1cf540d162a..66cf17f19408 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -257,8 +257,10 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
**/
-static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
+static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
@@ -270,18 +272,17 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
ipsec = adapter->ipsec;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
- netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
- xs->id.proto);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
return -EINVAL;
}
if (xs->props.mode != XFRM_MODE_TRANSPORT) {
- netdev_err(dev, "Unsupported mode for ipsec offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload");
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- netdev_err(dev, "Unsupported ipsec offload type\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");
return -EINVAL;
}
@@ -289,14 +290,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
struct rx_sa rsa;
if (xs->calg) {
- netdev_err(dev, "Compression offload not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");
return -EINVAL;
}
/* find the first unused index */
ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Rx table!\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");
return ret;
}
sa_idx = (u16)ret;
@@ -311,7 +312,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
/* get the key and salt */
ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
}
@@ -350,7 +351,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
/* find the first unused index */
ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Tx table\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table");
return ret;
}
sa_idx = (u16)ret;
@@ -364,7 +365,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
return ret;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ea0a230c1153..a44e4bd56142 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4634,6 +4634,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
/* MTU range: 68 - 1504 or 9710 */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index ef878973b859..8662543ca5c8 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -146,9 +146,6 @@ static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id,
u32 val;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
return ret;
@@ -177,9 +174,6 @@ static int orion_mdio_smi_write(struct mii_bus *bus, int mii_id,
struct orion_mdio_dev *dev = bus->priv;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
return ret;
@@ -204,21 +198,17 @@ static const struct orion_mdio_ops orion_mdio_xsmi_ops = {
.poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX,
};
-static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id,
- int regnum)
+static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum)
{
struct orion_mdio_dev *dev = bus->priv;
- u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
int ret;
- if (!(regnum & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
if (ret < 0)
return ret;
- writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG);
writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
(dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
MVMDIO_XSMI_READ_OPERATION,
@@ -237,21 +227,17 @@ static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id,
return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0);
}
-static int orion_mdio_xsmi_write(struct mii_bus *bus, int mii_id,
- int regnum, u16 value)
+static int orion_mdio_xsmi_write_c45(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum, u16 value)
{
struct orion_mdio_dev *dev = bus->priv;
- u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
int ret;
- if (!(regnum & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
if (ret < 0)
return ret;
- writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG);
writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
(dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
MVMDIO_XSMI_WRITE_OPERATION | value,
@@ -302,8 +288,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
bus->write = orion_mdio_smi_write;
break;
case BUS_TYPE_XSMI:
- bus->read = orion_mdio_xsmi_read;
- bus->write = orion_mdio_xsmi_write;
+ bus->read_c45 = orion_mdio_xsmi_read_c45;
+ bus->write_c45 = orion_mdio_xsmi_write_c45;
break;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f8925cac61e4..0e39d199ff06 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -38,7 +38,7 @@
#include <net/ipv6.h>
#include <net/tso.h>
#include <net/page_pool.h>
-#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <linux/bpf_trace.h>
/* Registers */
@@ -5612,6 +5612,12 @@ static int mvneta_probe(struct platform_device *pdev)
NETIF_F_TSO | NETIF_F_RXCSUM;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ if (!pp->bm_priv)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 4da45c5abba5..9b4ecbe4f36d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6866,6 +6866,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->vlan_features |= features;
netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
+
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index d2584ebb7a70..5727d67e0259 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -195,6 +195,9 @@ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
+M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \
+M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
+ cpt_flt_eng_info_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@@ -297,6 +300,8 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
+M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
+ msg_req, nix_inline_ipsec_cfg) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -1196,7 +1201,7 @@ struct nix_inline_ipsec_cfg {
u32 cpt_credit;
struct {
u8 egrp;
- u8 opcode;
+ u16 opcode;
u16 param1;
u16 param2;
} gen_cfg;
@@ -1205,6 +1210,8 @@ struct nix_inline_ipsec_cfg {
u8 cpt_slot;
} inst_qsel;
u8 enable;
+ u16 bpid;
+ u32 credit_th;
};
/* Per NIX LF inline IPSec configuration */
@@ -1609,6 +1616,8 @@ struct cpt_lf_alloc_req_msg {
u16 sso_pf_func;
u16 eng_grpmsk;
int blkaddr;
+ u8 ctx_ilen_valid : 1;
+ u8 ctx_ilen : 7;
};
#define CPT_INLINE_INBOUND 0
@@ -1692,6 +1701,28 @@ struct cpt_inst_lmtst_req {
u64 rsvd;
};
+/* Mailbox message format to request for CPT LF reset */
+struct cpt_lf_rst_req {
+ struct mbox_msghdr hdr;
+ u32 slot;
+ u32 rsvd;
+};
+
+/* Mailbox message format to request for CPT faulted engines */
+struct cpt_flt_eng_info_req {
+ struct mbox_msghdr hdr;
+ int blkaddr;
+ bool reset;
+ u32 rsvd;
+};
+
+struct cpt_flt_eng_info_rsp {
+ struct mbox_msghdr hdr;
+ u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
+ u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
+ u64 rsvd;
+};
+
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3f5e09b77d4b..8683ce57ed3f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1164,8 +1164,16 @@ cpt:
goto nix_err;
}
+ err = rvu_cpt_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
+ goto mcs_err;
+ }
+
return 0;
+mcs_err:
+ rvu_mcs_exit(rvu);
nix_err:
rvu_nix_freemem(rvu);
npa_err:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 7f0a64731c67..389663a13d1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -108,6 +108,8 @@ struct rvu_block {
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
struct rvu *rvu;
+ u64 cpt_flt_eng_map[3];
+ u64 cpt_rcvrd_eng_map[3];
};
struct nix_mcast {
@@ -459,6 +461,7 @@ struct rvu {
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
+ struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
int nix_blkaddr[MAX_NIX_BLKS];
@@ -510,6 +513,7 @@ struct rvu {
struct ptp *ptp;
int mcs_blk_cnt;
+ int cpt_pf_num;
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
@@ -524,6 +528,8 @@ struct rvu {
struct list_head mcs_intrq_head;
/* mcs interrupt queue lock */
spinlock_t mcs_intrq_lock;
+ /* CPT interrupt lock */
+ spinlock_t cpt_intr_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -546,6 +552,17 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
+static inline void rvu_bar2_sel_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ /* HW requires read back of RVU_AF_BAR2_SEL register to make sure completion of
+ * write operation.
+ */
+ rvu_write64(rvu, block, offset, val);
+ rvu_read64(rvu, block, offset);
+ /* Barrier to ensure read completes before accessing LF registers */
+ mb();
+}
+
/* Silicon revisions */
static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
{
@@ -865,11 +882,15 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu);
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
int slot);
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
+int rvu_cpt_init(struct rvu *rvu);
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
+/* CN10K NIX */
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
+
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7dbbc115cde4..4ad9ff025c96 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -538,3 +538,21 @@ void rvu_program_channels(struct rvu *rvu)
rvu_lbk_set_channels(rvu);
rvu_rpm_set_channels(rvu);
}
+
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ int blkaddr = nix_hw->blkaddr;
+ u64 cfg;
+
+ /* Set AF vWQE timer interval to a LF configurable range of
+ * 6.4us to 1.632ms.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
+
+ /* Enable NIX RX stream and global conditional clock to
+ * avoild multiple free of NPA buffers.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
+ cfg |= BIT_ULL(1) | BIT_ULL(2);
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 38bbae5d9ae0..f047185f38e0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -17,7 +17,7 @@
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
-#define CPT_CTX_ILEN 2ULL
+#define CPT_CTX_ILEN 1ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
@@ -37,34 +37,68 @@
(_rsp)->free_sts_##etype = free_sts; \
})
-static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
{
struct rvu_block *block = ptr;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
- u64 reg0, reg1, reg2;
-
- reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
- reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
- if (!is_rvu_otx2(rvu)) {
- reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
- dev_err_ratelimited(rvu->dev,
- "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
- reg0, reg1, reg2);
- } else {
- dev_err_ratelimited(rvu->dev,
- "Received CPTAF FLT irq : 0x%llx, 0x%llx",
- reg0, reg1);
+ u64 reg, val;
+ int i, eng;
+ u8 grp;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
+ dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
+
+ i = -1;
+ while ((i = find_next_bit((unsigned long *)&reg, 64, i + 1)) < 64) {
+ switch (vec) {
+ case 0:
+ eng = i;
+ break;
+ case 1:
+ eng = i + 64;
+ break;
+ case 2:
+ eng = i + 128;
+ break;
+ }
+ grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
+ /* Disable and enable the engine which triggers fault */
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
+
+ spin_lock(&rvu->cpt_intr_lock);
+ block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
+ val = val & 0x3;
+ if (val == 0x1 || val == 0x2)
+ block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
+ spin_unlock(&rvu->cpt_intr_lock);
}
-
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
- if (!is_rvu_otx2(rvu))
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
return IRQ_HANDLED;
}
+static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
+}
+
+static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
+}
+
+static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
+}
+
static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
{
struct rvu_block *block = ptr;
@@ -119,8 +153,10 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
int i;
/* Disable all CPT AF interrupts */
- for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
+
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
@@ -151,7 +187,7 @@ static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
/* Disable all CPT AF interrupts */
for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
@@ -172,16 +208,31 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
+ irq_handler_t flt_fn;
int i, ret;
for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
+
+ switch (i) {
+ case CPT_10K_AF_INT_VEC_FLT0:
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
+ break;
+ case CPT_10K_AF_INT_VEC_FLT1:
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
+ break;
+ case CPT_10K_AF_INT_VEC_FLT2:
+ flt_fn = rvu_cpt_af_flt2_intr_handler;
+ break;
+ }
ret = rvu_cpt_do_register_interrupt(block, off + i,
- rvu_cpt_af_flt_intr_handler,
- &rvu->irq_name[(off + i) * NAME_SIZE]);
+ flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ if (i == CPT_10K_AF_INT_VEC_FLT2)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
+ else
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
}
ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
@@ -208,8 +259,8 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
+ irq_handler_t flt_fn;
int i, offs, ret = 0;
- char irq_name[16];
if (!is_block_implemented(rvu->hw, blkaddr))
return 0;
@@ -226,13 +277,20 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
return cpt_10k_register_interrupts(block, offs);
for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
- snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
+ switch (i) {
+ case CPT_AF_INT_VEC_FLT0:
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
+ break;
+ case CPT_AF_INT_VEC_FLT1:
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
+ break;
+ }
ret = rvu_cpt_do_register_interrupt(block, offs + i,
- rvu_cpt_af_flt_intr_handler,
- irq_name);
+ flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
if (ret)
goto err;
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
}
ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
@@ -290,7 +348,7 @@ static int get_cpt_pf_num(struct rvu *rvu)
static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
- int cpt_pf_num = get_cpt_pf_num(rvu);
+ int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@@ -302,7 +360,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
- int cpt_pf_num = get_cpt_pf_num(rvu);
+ int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@@ -371,8 +429,12 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
/* Set CPT LF group and priority */
val = (u64)req->eng_grpmsk << 48 | 1;
- if (!is_rvu_otx2(rvu))
- val |= (CPT_CTX_ILEN << 17);
+ if (!is_rvu_otx2(rvu)) {
+ if (req->ctx_ilen_valid)
+ val |= (req->ctx_ilen << 17);
+ else
+ val |= (CPT_CTX_ILEN << 17);
+ }
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
@@ -762,10 +824,21 @@ int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
- int blkaddr)
+ int blkaddr, struct cpt_rxc_time_cfg_req *save)
{
u64 dfrg_reg;
+ if (save) {
+ /* Save older config */
+ dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
+ save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
+ save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
+ save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
+
+ save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ }
+
dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
@@ -790,7 +863,7 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
- cpt_rxc_time_cfg(rvu, req, blkaddr);
+ cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
return 0;
}
@@ -801,9 +874,67 @@ int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
}
+int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+ u64 ctl, ctl2;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+ ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+
+ ret = rvu_lf_reset(rvu, block, cptlf);
+ if (ret)
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
+ struct cpt_flt_eng_info_rsp *rsp)
+{
+ struct rvu_block *block;
+ unsigned long flags;
+ int blkaddr, vec;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ block = &rvu->hw->block[blkaddr];
+ for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
+ spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
+ rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
+ rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
+ if (req->reset) {
+ block->cpt_flt_eng_map[vec] = 0x0;
+ block->cpt_rcvrd_eng_map[vec] = 0x0;
+ }
+ spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
+ }
+ return 0;
+}
+
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
- struct cpt_rxc_time_cfg_req req;
+ struct cpt_rxc_time_cfg_req req, prev;
int timeout = 2000;
u64 reg;
@@ -819,7 +950,7 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
req.active_thres = 1;
req.active_limit = 1;
- cpt_rxc_time_cfg(rvu, &req, blkaddr);
+ cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
@@ -845,70 +976,68 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+
+ /* Restore config */
+ cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
}
-#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
-#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
-#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
-#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
+#define INFLIGHT GENMASK_ULL(8, 0)
+#define GRB_CNT GENMASK_ULL(39, 32)
+#define GWB_CNT GENMASK_ULL(47, 40)
+#define XQ_XOR GENMASK_ULL(63, 63)
+#define DQPTR GENMASK_ULL(19, 0)
+#define NQPTR GENMASK_ULL(51, 32)
static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
{
- int i = 0, hard_lp_ctr = 100000;
- u64 inprog, grp_ptr;
- u16 nq_ptr, dq_ptr;
+ int timeout = 1000000;
+ u64 inprog, inst_ptr;
+ u64 qsize, pending;
+ int i = 0;
/* Disable instructions enqueuing */
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
- /* Disable executions in the LF's queue */
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- inprog &= ~BIT_ULL(16);
+ inprog |= BIT_ULL(16);
rvu_write64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
- /* Wait for CPT queue to become execution-quiescent */
+ qsize = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF;
do {
- inprog = rvu_read64(rvu, blkaddr,
- CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- if (INPROG_GRB_PARTIAL(inprog)) {
- i = 0;
- hard_lp_ctr--;
- } else {
- i++;
- }
-
- grp_ptr = rvu_read64(rvu, blkaddr,
- CPT_AF_BAR2_ALIASX(slot,
- CPT_LF_Q_GRP_PTR));
- nq_ptr = (grp_ptr >> 32) & 0x7FFF;
- dq_ptr = grp_ptr & 0x7FFF;
-
- } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
+ inst_ptr = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR));
+ pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
+ FIELD_GET(NQPTR, inst_ptr) -
+ FIELD_GET(DQPTR, inst_ptr);
+ udelay(1);
+ timeout--;
+ } while ((pending != 0) && (timeout != 0));
- if (hard_lp_ctr == 0)
- dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+ if (timeout == 0)
+ dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n");
- i = 0;
- hard_lp_ctr = 100000;
+ timeout = 1000000;
+ /* Wait for CPT queue to become execution-quiescent */
do {
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- if ((INPROG_INFLIGHT(inprog) == 0) &&
- (INPROG_GWB(inprog) < 40) &&
- ((INPROG_GRB(inprog) == 0) ||
- (INPROG_GRB((inprog)) == 40))) {
+ if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
+ (FIELD_GET(GRB_CNT, inprog) == 0)) {
i++;
} else {
i = 0;
- hard_lp_ctr--;
+ timeout--;
}
- } while (hard_lp_ctr && (i < 10));
+ } while ((timeout != 0) && (i < 10));
- if (hard_lp_ctr == 0)
- dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+ if (timeout == 0)
+ dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n");
+ /* Wait for 2 us to flush all queue writes to memory */
+ udelay(2);
}
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
@@ -918,18 +1047,15 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
cpt_rxc_teardown(rvu, blkaddr);
+ mutex_lock(&rvu->alias_lock);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
cpt_lf_disable_iqueue(rvu, blkaddr, slot);
- /* Set group drop to help clear out hardware */
- reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- reg |= BIT_ULL(17);
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
-
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ mutex_unlock(&rvu->alias_lock);
return 0;
}
@@ -940,7 +1066,7 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
int nix_blkaddr)
{
- int cpt_pf_num = get_cpt_pf_num(rvu);
+ int cpt_pf_num = rvu->cpt_pf_num;
struct cpt_inst_lmtst_req *req;
dma_addr_t res_daddr;
int timeout = 3000;
@@ -1064,7 +1190,7 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
for (i = 0; i < max_ctx_entries; i++) {
cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
@@ -1077,10 +1203,19 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
reg);
}
}
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
unlock:
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
+
+int rvu_cpt_init(struct rvu *rvu)
+{
+ /* Retrieve CPT PF number */
+ rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+ spin_lock_init(&rvu->cpt_intr_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 6b8747ebc08c..26e639e57dae 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2058,6 +2058,13 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int err, restore_tx_en = 0;
u64 cfg;
+ if (!is_rvu_otx2(rvu)) {
+ /* Skip SMQ flush if pkt count is zero */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
+ if (!cfg)
+ return 0;
+ }
+
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -4309,6 +4316,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
+ if (!is_rvu_otx2(rvu))
+ rvu_nix_block_cn10k_init(rvu, nix_hw);
+
if (is_block_implemented(hw, blkaddr)) {
err = nix_setup_txschq(rvu, nix_hw, blkaddr);
if (err)
@@ -4731,6 +4741,10 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+#define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
+#define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
+#define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
+
static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
int blkaddr)
{
@@ -4767,14 +4781,23 @@ static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *r
val);
/* Set CPT credit */
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
- req->cpt_credit);
+ val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
+ if ((val & 0x3FFFFF) != 0x3FFFFF)
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF - val);
+
+ val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
+ val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
+ val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
} else {
rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
0x0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
- 0x3FFFFF);
+ val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
+ if ((val & 0x3FFFFF) != 0x3FFFFF)
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF - val);
}
}
@@ -4792,6 +4815,30 @@ int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
+ struct msg_req *req,
+ struct nix_inline_ipsec_cfg *rsp)
+
+{
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
+ rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
+ rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
+ rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
+ rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
+
+ val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
+ rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
+ rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
+ rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
+
+ return 0;
+}
+
int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
struct nix_inline_ipsec_lf_cfg *req,
struct msg_rsp *rsp)
@@ -4835,6 +4882,7 @@ int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
return 0;
}
+
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
{
bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index f69102d20c90..20ebb9c95c73 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -200,10 +200,8 @@ void npc_config_secret_key(struct rvu *rvu, int blkaddr)
struct rvu_hwinfo *hw = rvu->hw;
u8 intf;
- if (!hwcap->npc_hash_extract) {
- dev_info(rvu->dev, "HW does not support secret key configuration\n");
+ if (!hwcap->npc_hash_extract)
return;
- }
for (intf = 0; intf < hw->npc_intfs; intf++) {
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
@@ -221,10 +219,8 @@ void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
struct rvu_hwinfo *hw = rvu->hw;
u8 intf;
- if (!hwcap->npc_hash_extract) {
- dev_dbg(rvu->dev, "Field hash extract feature is not supported\n");
+ if (!hwcap->npc_hash_extract)
return;
- }
for (intf = 0; intf < hw->npc_intfs; intf++) {
npc_program_mkex_hash_rx(rvu, blkaddr, intf);
@@ -1853,19 +1849,13 @@ int rvu_npc_exact_init(struct rvu *rvu)
/* Check exact match feature is supported */
npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
- if (!(npc_const3 & BIT_ULL(62))) {
- dev_info(rvu->dev, "%s: No support for exact match support\n",
- __func__);
+ if (!(npc_const3 & BIT_ULL(62)))
return 0;
- }
/* Check if kex profile has enabled EXACT match nibble */
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
- if (!(cfg & NPC_EXACT_NIBBLE_HIT)) {
- dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n",
- __func__);
+ if (!(cfg & NPC_EXACT_NIBBLE_HIT))
return 0;
- }
/* Set capability to true */
rvu->hw->cap.npc_exact_match_enabled = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 0e0d536645ac..1729b22580ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -189,6 +189,7 @@
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_VWQE_TIMER (0x00F8)
#define NIX_AF_RX_MCAST_BASE (0x0100)
#define NIX_AF_RX_MCAST_CFG (0x0110)
#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
@@ -426,6 +427,7 @@
#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
+#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
@@ -545,6 +547,8 @@
#define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40
+#define CPT_LF_Q_SIZE 0x100
+#define CPT_LF_Q_INST_PTR 0x110
#define CPT_LF_Q_GRP_PTR 0x120
#define CPT_LF_CTX_FLUSH 0x510
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index c1ea60bc2630..179433d0a54a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2512,10 +2512,13 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
/* Network stack and XDP shared same rx queues.
* Use separate tx queues for XDP and network stack.
*/
- if (pf->xdp_prog)
+ if (pf->xdp_prog) {
pf->hw.xdp_queues = pf->hw.rx_queues;
- else
+ xdp_features_set_redirect_target(dev, false);
+ } else {
pf->hw.xdp_queues = 0;
+ xdp_features_clear_redirect_target(dev);
+ }
pf->hw.tot_tx_queues += pf->hw.xdp_queues;
@@ -2878,6 +2881,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index cf456d62677f..87fff539d39d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -965,7 +965,7 @@ static int pxa168_init_phy(struct net_device *dev)
if (dev->phydev)
return 0;
- phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+ phy = mdiobus_scan_c22(pep->smi_bus, pep->phy_addr);
if (IS_ERR(phy))
return PTR_ERR(phy);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e3123723522e..14be6ea51b88 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -51,6 +51,7 @@ static const struct mtk_reg_map mtk_reg_map = {
.delay_irq = 0x0a0c,
.irq_status = 0x0a20,
.irq_mask = 0x0a28,
+ .adma_rx_dbg0 = 0x0a38,
.int_grp = 0x0a50,
},
.qdma = {
@@ -82,6 +83,8 @@ static const struct mtk_reg_map mtk_reg_map = {
[0] = 0x2800,
[1] = 0x2c00,
},
+ .pse_iq_sta = 0x0110,
+ .pse_oq_sta = 0x0118,
};
static const struct mtk_reg_map mt7628_reg_map = {
@@ -112,6 +115,7 @@ static const struct mtk_reg_map mt7986_reg_map = {
.delay_irq = 0x620c,
.irq_status = 0x6220,
.irq_mask = 0x6228,
+ .adma_rx_dbg0 = 0x6238,
.int_grp = 0x6250,
},
.qdma = {
@@ -143,6 +147,8 @@ static const struct mtk_reg_map mt7986_reg_map = {
[0] = 0x4800,
[1] = 0x4c00,
},
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
};
/* strings used by ethtool */
@@ -215,8 +221,8 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
return -ETIMEDOUT;
}
-static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
- u32 write_data)
+static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
+ u32 write_data)
{
int ret;
@@ -224,35 +230,13 @@ static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
if (ret < 0)
return ret;
- if (phy_reg & MII_ADDR_C45) {
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C45 |
- PHY_IAC_CMD_C45_ADDR |
- PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
- PHY_IAC_ADDR(phy_addr) |
- PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
- MTK_PHY_IAC);
-
- ret = mtk_mdio_busy_wait(eth);
- if (ret < 0)
- return ret;
-
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C45 |
- PHY_IAC_CMD_WRITE |
- PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
- PHY_IAC_ADDR(phy_addr) |
- PHY_IAC_DATA(write_data),
- MTK_PHY_IAC);
- } else {
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C22 |
- PHY_IAC_CMD_WRITE |
- PHY_IAC_REG(phy_reg) |
- PHY_IAC_ADDR(phy_addr) |
- PHY_IAC_DATA(write_data),
- MTK_PHY_IAC);
- }
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
@@ -261,7 +245,8 @@ static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
return 0;
}
-static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
+static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
+ u32 devad, u32 phy_reg, u32 write_data)
{
int ret;
@@ -269,33 +254,47 @@ static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
if (ret < 0)
return ret;
- if (phy_reg & MII_ADDR_C45) {
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C45 |
- PHY_IAC_CMD_C45_ADDR |
- PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
- PHY_IAC_ADDR(phy_addr) |
- PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
- MTK_PHY_IAC);
-
- ret = mtk_mdio_busy_wait(eth);
- if (ret < 0)
- return ret;
-
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C45 |
- PHY_IAC_CMD_C45_READ |
- PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
- PHY_IAC_ADDR(phy_addr),
- MTK_PHY_IAC);
- } else {
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C22 |
- PHY_IAC_CMD_C22_READ |
- PHY_IAC_REG(phy_reg) |
- PHY_IAC_ADDR(phy_addr),
- MTK_PHY_IAC);
- }
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(phy_reg),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_C22_READ |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
@@ -304,19 +303,70 @@ static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
}
-static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
- int phy_reg, u16 val)
+static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
+ u32 devad, u32 phy_reg)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(phy_reg),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_READ |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
+}
+
+static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
+ int phy_reg, u16 val)
{
struct mtk_eth *eth = bus->priv;
- return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
+ return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
}
-static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
+static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
+ int devad, int phy_reg, u16 val)
{
struct mtk_eth *eth = bus->priv;
- return _mtk_mdio_read(eth, phy_addr, phy_reg);
+ return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
+}
+
+static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
+}
+
+static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
+ int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
}
static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
@@ -760,9 +810,10 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
eth->mii_bus->name = "mdio";
- eth->mii_bus->read = mtk_mdio_read;
- eth->mii_bus->write = mtk_mdio_write;
- eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
+ eth->mii_bus->read = mtk_mdio_read_c22;
+ eth->mii_bus->write = mtk_mdio_write_c22;
+ eth->mii_bus->read_c45 = mtk_mdio_read_c45;
+ eth->mii_bus->write_c45 = mtk_mdio_write_c45;
eth->mii_bus->priv = eth;
eth->mii_bus->parent = eth->dev;
@@ -2988,14 +3039,29 @@ static void mtk_dma_free(struct mtk_eth *eth)
kfree(eth->scratch_head);
}
+static bool mtk_hw_reset_check(struct mtk_eth *eth)
+{
+ u32 val = mtk_r32(eth, MTK_INT_STATUS2);
+
+ return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
+ (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
+ (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
+}
+
static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ if (test_bit(MTK_RESETTING, &eth->state))
+ return;
+
+ if (!mtk_hw_reset_check(eth))
+ return;
+
eth->netdev[mac->id]->stats.tx_errors++;
- netif_err(eth, tx_err, dev,
- "transmit timed out\n");
+ netif_err(eth, tx_err, dev, "transmit timed out\n");
+
schedule_work(&eth->pending_work);
}
@@ -3475,22 +3541,188 @@ static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
}
-static int mtk_hw_init(struct mtk_eth *eth)
+static void mtk_hw_reset(struct mtk_eth *eth)
+{
+ u32 val;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+ val = RSTCTRL_PPE0_V2;
+ } else {
+ val = RSTCTRL_PPE0;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1;
+
+ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+ 0x3ffffff);
+}
+
+static u32 mtk_hw_reset_read(struct mtk_eth *eth)
+{
+ u32 val;
+
+ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
+ return val;
+}
+
+static void mtk_hw_warm_reset(struct mtk_eth *eth)
+{
+ u32 rst_mask, val;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
+ RSTCTRL_FE);
+ if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
+ val & RSTCTRL_FE, 1, 1000)) {
+ dev_err(eth->dev, "warm reset failed\n");
+ mtk_hw_reset(eth);
+ return;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
+ else
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ rst_mask |= RSTCTRL_PPE1;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
+
+ udelay(1);
+ val = mtk_hw_reset_read(eth);
+ if (!(val & rst_mask))
+ dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
+ val, rst_mask);
+
+ rst_mask |= RSTCTRL_FE;
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
+
+ udelay(1);
+ val = mtk_hw_reset_read(eth);
+ if (val & rst_mask)
+ dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
+ val, rst_mask);
+}
+
+static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
+{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
+ bool oq_hang, cdm1_busy, adma_busy;
+ bool wtx_busy, cdm_full, oq_free;
+ u32 wdidx, val, gdm1_fc, gdm2_fc;
+ bool qfsm_hang, qfwd_hang;
+ bool ret = false;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return false;
+
+ /* WDMA sanity checks */
+ wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
+
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
+ wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
+
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
+ cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
+
+ oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
+
+ if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
+ if (++eth->reset.wdma_hang_count > 2) {
+ eth->reset.wdma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ /* QDMA sanity checks */
+ qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
+ qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
+
+ gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
+ gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
+ gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
+ gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
+ gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
+ gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
+
+ if (qfsm_hang && qfwd_hang &&
+ ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
+ (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
+ if (++eth->reset.qdma_hang_count > 2) {
+ eth->reset.qdma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ /* ADMA sanity checks */
+ oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
+ cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
+ adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
+ !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
+
+ if (oq_hang && cdm1_busy && adma_busy) {
+ if (++eth->reset.adma_hang_count > 2) {
+ eth->reset.adma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ eth->reset.wdma_hang_count = 0;
+ eth->reset.qdma_hang_count = 0;
+ eth->reset.adma_hang_count = 0;
+out:
+ eth->reset.wdidx = wdidx;
+
+ return ret;
+}
+
+static void mtk_hw_reset_monitor_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
+ reset.monitor_work);
+
+ if (test_bit(MTK_RESETTING, &eth->state))
+ goto out;
+
+ /* DMA stuck checks */
+ if (mtk_hw_check_dma_hang(eth))
+ schedule_work(&eth->pending_work);
+
+out:
+ schedule_delayed_work(&eth->reset.monitor_work,
+ MTK_DMA_MONITOR_TIMEOUT);
+}
+
+static int mtk_hw_init(struct mtk_eth *eth, bool reset)
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
ETHSYS_DMA_AG_MAP_PPE;
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret;
- if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+ if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
return 0;
- pm_runtime_enable(eth->dev);
- pm_runtime_get_sync(eth->dev);
+ if (!reset) {
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
- ret = mtk_clk_enable(eth);
- if (ret)
- goto err_disable_pm;
+ ret = mtk_clk_enable(eth);
+ if (ret)
+ goto err_disable_pm;
+ }
if (eth->ethsys)
regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
@@ -3514,22 +3746,14 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0;
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
- val = RSTCTRL_PPE0_V2;
- } else {
- val = RSTCTRL_PPE0;
- }
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
- val |= RSTCTRL_PPE1;
+ msleep(100);
- ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
+ if (reset)
+ mtk_hw_warm_reset(eth);
+ else
+ mtk_hw_reset(eth);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
- 0x3ffffff);
-
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@@ -3631,8 +3855,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0;
err_disable_pm:
- pm_runtime_put_sync(eth->dev);
- pm_runtime_disable(eth->dev);
+ if (!reset) {
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+ }
return ret;
}
@@ -3711,52 +3937,86 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
+static void mtk_prepare_for_reset(struct mtk_eth *eth)
+{
+ u32 val;
+ int i;
+
+ /* disabe FE P3 and P4 */
+ val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= MTK_FE_LINK_DOWN_P4;
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
+
+ /* adjust PPE configurations to prepare for reset */
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_prepare_reset(eth->ppe[i]);
+
+ /* disable NETSYS interrupts */
+ mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
+
+ /* force link down GMAC */
+ for (i = 0; i < 2; i++) {
+ val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
+ mtk_w32(eth, val, MTK_MAC_MCR(i));
+ }
+}
+
static void mtk_pending_work(struct work_struct *work)
{
struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
- int err, i;
unsigned long restart = 0;
+ u32 val;
+ int i;
rtnl_lock();
-
- dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
set_bit(MTK_RESETTING, &eth->state);
+ mtk_prepare_for_reset(eth);
+ mtk_wed_fe_reset();
+ /* Run again reset preliminary configuration in order to avoid any
+ * possible race during FE reset since it can run releasing RTNL lock.
+ */
+ mtk_prepare_for_reset(eth);
+
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i])
+ if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
continue;
+
mtk_stop(eth->netdev[i]);
__set_bit(i, &restart);
}
- dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
- /* restart underlying hardware such as power, clock, pin mux
- * and the connected phy
- */
- mtk_hw_deinit(eth);
+ usleep_range(15000, 16000);
if (eth->dev->pins)
pinctrl_select_state(eth->dev->pins->p,
eth->dev->pins->default_state);
- mtk_hw_init(eth);
+ mtk_hw_init(eth, true);
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!test_bit(i, &restart))
continue;
- err = mtk_open(eth->netdev[i]);
- if (err) {
+
+ if (mtk_open(eth->netdev[i])) {
netif_alert(eth, ifup, eth->netdev[i],
- "Driver up/down cycle failed, closing device.\n");
+ "Driver up/down cycle failed\n");
dev_close(eth->netdev[i]);
}
}
- dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+ /* enabe FE P3 and P4 */
+ val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val &= ~MTK_FE_LINK_DOWN_P4;
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
clear_bit(MTK_RESETTING, &eth->state);
+ mtk_wed_fe_reset_complete();
+
rtnl_unlock();
}
@@ -3801,6 +4061,7 @@ static int mtk_cleanup(struct mtk_eth *eth)
mtk_unreg_dev(eth);
mtk_free_dev(eth);
cancel_work_sync(&eth->pending_work);
+ cancel_delayed_work_sync(&eth->reset.monitor_work);
return 0;
}
@@ -4190,6 +4451,12 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
register_netdevice_notifier(&mac->device_notifier);
}
+ if (mtk_page_pool_enabled(eth))
+ eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
return 0;
free_netdev:
@@ -4255,6 +4522,7 @@ static int mtk_probe(struct platform_device *pdev)
eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
+ INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
@@ -4368,7 +4636,7 @@ static int mtk_probe(struct platform_device *pdev)
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(&eth->pending_work, mtk_pending_work);
- err = mtk_hw_init(eth);
+ err = mtk_hw_init(eth, false);
if (err)
goto err_wed_exit;
@@ -4457,6 +4725,8 @@ static int mtk_probe(struct platform_device *pdev)
netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
+ schedule_delayed_work(&eth->reset.monitor_work,
+ MTK_DMA_MONITOR_TIMEOUT);
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 2d9186d32bc0..afc9d52e79bf 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -77,12 +77,24 @@
#define MTK_HW_LRO_REPLACE_DELTA 1000
#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
+/* Frame Engine Global Configuration */
+#define MTK_FE_GLO_CFG 0x00
+#define MTK_FE_LINK_DOWN_P3 BIT(11)
+#define MTK_FE_LINK_DOWN_P4 BIT(12)
+
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
#define RST_GL_PSE BIT(0)
/* Frame Engine Interrupt Status Register */
#define MTK_INT_STATUS2 0x08
+#define MTK_FE_INT_ENABLE 0x0c
+#define MTK_FE_INT_FQ_EMPTY BIT(8)
+#define MTK_FE_INT_TSO_FAIL BIT(12)
+#define MTK_FE_INT_TSO_ILLEGAL BIT(13)
+#define MTK_FE_INT_TSO_ALIGN BIT(14)
+#define MTK_FE_INT_RFIFO_OV BIT(18)
+#define MTK_FE_INT_RFIFO_UF BIT(19)
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
@@ -272,6 +284,8 @@
#define MTK_RX_DONE_INT_V2 BIT(14)
+#define MTK_CDM_TXFIFO_RDY BIT(7)
+
/* QDMA Interrupt grouping registers */
#define MTK_RLS_DONE_INT BIT(0)
@@ -562,6 +576,17 @@
#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+#define MTK_FE_CDM1_FSM 0x220
+#define MTK_FE_CDM2_FSM 0x224
+#define MTK_FE_CDM3_FSM 0x238
+#define MTK_FE_CDM4_FSM 0x298
+#define MTK_FE_CDM5_FSM 0x318
+#define MTK_FE_CDM6_FSM 0x328
+#define MTK_FE_GDM1_FSM 0x228
+#define MTK_FE_GDM2_FSM 0x22C
+
+#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -958,6 +983,7 @@ struct mtk_reg_map {
u32 delay_irq; /* delay interrupt */
u32 irq_status; /* interrupt status */
u32 irq_mask; /* interrupt mask */
+ u32 adma_rx_dbg0;
u32 int_grp;
} pdma;
struct {
@@ -986,6 +1012,8 @@ struct mtk_reg_map {
u32 gdma_to_ppe;
u32 ppe_base;
u32 wdma_base[2];
+ u32 pse_iq_sta;
+ u32 pse_oq_sta;
};
/* struct mtk_eth_data - This is the structure holding all differences
@@ -1028,6 +1056,8 @@ struct mtk_soc_data {
} txrx;
};
+#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
+
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
@@ -1154,6 +1184,14 @@ struct mtk_eth {
struct rhashtable flow_table;
struct bpf_prog __rcu *prog;
+
+ struct {
+ struct delayed_work monitor_work;
+ u32 wdidx;
+ u8 wdma_hang_count;
+ u8 qdma_hang_count;
+ u8 adma_hang_count;
+ } reset;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 1ff024f42444..6883eb34cd8b 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -729,6 +729,33 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
}
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
+{
+ if (!ppe)
+ return -EINVAL;
+
+ /* disable KA */
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
+ ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
+ usleep_range(10000, 11000);
+
+ /* set KA timer to maximum */
+ ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
+
+ /* set KA tick select */
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
+ usleep_range(10000, 11000);
+
+ /* disable scan mode */
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
+ usleep_range(10000, 11000);
+
+ return mtk_ppe_wait_busy(ppe);
+}
+
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
int version, int index)
{
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index b5e432031340..5e8bc48252b1 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -308,6 +308,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
void mtk_ppe_deinit(struct mtk_eth *eth);
void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
index 59596d823d8b..0fdb983b0a88 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -58,6 +58,12 @@
#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
+#define MTK_PPE_TB_TICK_SEL BIT(24)
+
+#define MTK_PPE_BIND_LMT1 0x230
+#define MTK_PPE_NTU_KEEPALIVE GENMASK(23, 16)
+
+#define MTK_PPE_KEEPALIVE 0x234
enum {
MTK_PPE_SCAN_MODE_DISABLED,
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 7050351250b7..02c03325911f 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1378,9 +1378,6 @@ static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
unsigned int val, data;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mtk_star_mdio_rwok_clear(priv);
val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
@@ -1407,9 +1404,6 @@ static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
struct mtk_star_priv *priv = mii->priv;
unsigned int val;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mtk_star_mdio_rwok_clear(priv);
val = data;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index a6271449617f..95d890870984 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -206,6 +206,48 @@ mtk_wed_wo_reset(struct mtk_wed_device *dev)
iounmap(reg);
}
+void mtk_wed_fe_reset(void)
+{
+ int i;
+
+ mutex_lock(&hw_lock);
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw = hw_list[i];
+ struct mtk_wed_device *dev = hw->wed_dev;
+ int err;
+
+ if (!dev || !dev->wlan.reset)
+ continue;
+
+ /* reset callback blocks until WLAN reset is completed */
+ err = dev->wlan.reset(dev);
+ if (err)
+ dev_err(dev->dev, "wlan reset failed: %d\n", err);
+ }
+
+ mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_fe_reset_complete(void)
+{
+ int i;
+
+ mutex_lock(&hw_lock);
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw = hw_list[i];
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (!dev || !dev->wlan.reset_complete)
+ continue;
+
+ dev->wlan.reset_complete(dev);
+ }
+
+ mutex_unlock(&hw_lock);
+}
+
static struct mtk_wed_hw *
mtk_wed_assign(struct mtk_wed_device *dev)
{
@@ -745,7 +787,6 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
ring->desc_size = sizeof(*ring->desc);
ring->size = size;
- memset(ring->desc, 0, size);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index e012b8a82133..43ab77eaf683 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -128,6 +128,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
void mtk_wed_exit(void);
int mtk_wed_flow_add(int index);
void mtk_wed_flow_remove(int index);
+void mtk_wed_fe_reset(void);
+void mtk_wed_fe_reset_complete(void);
#else
static inline void
mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
@@ -147,6 +149,13 @@ static inline void mtk_wed_flow_remove(int index)
{
}
+static inline void mtk_wed_fe_reset(void)
+{
+}
+
+static inline void mtk_wed_fe_reset_complete(void)
+{
+}
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index a0a39643caf7..69fba29055e9 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -138,7 +138,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
int n_buf = 0;
- spin_lock_bh(&q->lock);
while (q->queued < q->n_desc) {
struct mtk_wed_wo_queue_entry *entry;
dma_addr_t addr;
@@ -172,7 +171,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
q->queued++;
n_buf++;
}
- spin_unlock_bh(&q->lock);
return n_buf;
}
@@ -260,7 +258,6 @@ mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
int n_desc, int buf_size, int index,
struct mtk_wed_wo_queue_regs *regs)
{
- spin_lock_init(&q->lock);
q->regs = *regs;
q->n_desc = n_desc;
q->buf_size = buf_size;
@@ -292,7 +289,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
struct page *page;
int i;
- spin_lock_bh(&q->lock);
for (i = 0; i < q->n_desc; i++) {
struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
@@ -301,7 +297,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(entry->buf);
entry->buf = NULL;
}
- spin_unlock_bh(&q->lock);
if (!q->cache.va)
return;
@@ -316,7 +311,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
struct page *page;
- spin_lock_bh(&q->lock);
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
@@ -325,7 +319,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf);
}
- spin_unlock_bh(&q->lock);
if (!q->cache.va)
return;
@@ -351,8 +344,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
int ret = 0, index;
u32 ctrl;
- spin_lock_bh(&q->lock);
-
q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
index = (q->head + 1) % q->n_desc;
if (q->tail == index) {
@@ -383,8 +374,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
mtk_wed_wo_queue_kick(wo, q, q->head);
mtk_wed_wo_kickout(wo);
out:
- spin_unlock_bh(&q->lock);
-
dev_kfree_skb(skb);
return ret;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
index c8fb85795864..dbcf42ce9173 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -211,7 +211,6 @@ struct mtk_wed_wo_queue {
struct mtk_wed_wo_queue_regs regs;
struct page_frag_cache cache;
- spinlock_t lock;
struct mtk_wed_wo_queue_desc *desc;
dma_addr_t desc_dma;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 98b5ffb4d729..9e3b76182088 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -58,9 +58,7 @@ u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
return hi | lo;
}
-void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
- struct skb_shared_hwtstamps *hwts,
- u64 timestamp)
+u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp)
{
unsigned int seq;
u64 nsec;
@@ -70,8 +68,15 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
} while (read_seqretry(&mdev->clock_lock, seq));
+ return ns_to_ktime(nsec);
+}
+
+void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ struct skb_shared_hwtstamps *hwts,
+ u64 timestamp)
+{
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
- hwts->hwtstamp = ns_to_ktime(nsec);
+ hwts->hwtstamp = mlx4_en_get_hwtstamp(mdev, timestamp);
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8800d3f1f55c..e11bc0ac880e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2889,6 +2889,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_bpf = mlx4_xdp,
};
+static const struct xdp_metadata_ops mlx4_xdp_metadata_ops = {
+ .xmo_rx_timestamp = mlx4_en_xdp_rx_timestamp,
+ .xmo_rx_hash = mlx4_en_xdp_rx_hash,
+};
+
struct mlx4_en_bond {
struct work_struct work;
struct mlx4_en_priv *priv;
@@ -3310,6 +3315,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->netdev_ops = &mlx4_netdev_ops_master;
else
dev->netdev_ops = &mlx4_netdev_ops;
+ dev->xdp_metadata_ops = &mlx4_xdp_metadata_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -3410,6 +3416,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8f762fc170b3..0869d4fff17b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -661,9 +661,41 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
#endif
+struct mlx4_en_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_dev *mdev;
+ struct mlx4_en_rx_ring *ring;
+ struct net_device *dev;
+};
+
+int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
+ return -EOPNOTSUPP;
+
+ *timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
+ mlx4_en_get_cqe_ts(_ctx->cqe));
+ return 0;
+}
+
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+ struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
+ return -EOPNOTSUPP;
+
+ *hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
+ return 0;
+}
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_xdp_buff mxbuf = {};
int factor = priv->cqe_factor;
struct mlx4_en_rx_ring *ring;
struct bpf_prog *xdp_prog;
@@ -671,7 +703,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
bool doorbell_pending;
bool xdp_redir_flush;
struct mlx4_cqe *cqe;
- struct xdp_buff xdp;
int polled = 0;
int index;
@@ -681,7 +712,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring = priv->rx_ring[cq_ring];
xdp_prog = rcu_dereference_bh(ring->xdp_prog);
- xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
+ xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
xdp_redir_flush = false;
@@ -776,24 +807,28 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp_prepare_buff(&xdp, va - frags[0].page_offset,
- frags[0].page_offset, length, false);
- orig_data = xdp.data;
-
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
-
- length = xdp.data_end - xdp.data;
- if (xdp.data != orig_data) {
- frags[0].page_offset = xdp.data -
- xdp.data_hard_start;
- va = xdp.data;
+ xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset,
+ frags[0].page_offset, length, true);
+ orig_data = mxbuf.xdp.data;
+ mxbuf.cqe = cqe;
+ mxbuf.mdev = priv->mdev;
+ mxbuf.ring = ring;
+ mxbuf.dev = dev;
+
+ act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp);
+
+ length = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ if (mxbuf.xdp.data != orig_data) {
+ frags[0].page_offset = mxbuf.xdp.data -
+ mxbuf.xdp.data_hard_start;
+ va = mxbuf.xdp.data;
}
switch (act) {
case XDP_PASS:
break;
case XDP_REDIRECT:
- if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) {
+ if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) {
ring->xdp_redirect++;
xdp_redir_flush = true;
frags[0].page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c5758637b7be..2f79378fbf6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -699,32 +699,32 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
} else {
inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
- memset(((void *)(inl + 1)) + skb->len, 0,
+ memset(inl->data + skb->len, 0,
MIN_PKT_LEN - skb->len);
}
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen, fragptr,
+ memcpy(inl->data + hlen, fragptr,
skb_frag_size(&shinfo->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (hlen <= spc) {
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (hlen < spc) {
- memcpy(((void *)(inl + 1)) + hlen,
+ memcpy(inl->data + hlen,
fragptr, spc - hlen);
fragptr += spc - hlen;
}
- inl = (void *) (inl + 1) + spc;
- memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+ inl = (void *)inl->data + spc;
+ memcpy(inl->data, fragptr, skb->len - spc);
} else {
- skb_copy_from_linear_data(skb, inl + 1, spc);
- inl = (void *) (inl + 1) + spc;
- skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+ skb_copy_from_linear_data(skb, inl->data, spc);
+ inl = (void *)inl->data + spc;
+ skb_copy_from_linear_data_offset(skb, spc, inl->data,
hlen - spc);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen - spc,
+ memcpy(inl->data + hlen - spc,
fragptr,
skb_frag_size(&shinfo->frags[0]));
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 3ae246391549..277738c50c56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -265,29 +265,29 @@ static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
union devlink_param_value value;
value.vbool = !!mlx4_internal_err_reset;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
value.vu32 = 1UL << log_num_mac;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
value.vbool = enable_64b_cqe_eqe;
- devlink_param_driverinit_value_set(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
value.vbool = enable_4k_uar;
- devlink_param_driverinit_value_set(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- value);
+ devl_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
value.vbool = false;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
}
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
@@ -3910,37 +3910,37 @@ static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
union devlink_param_value saved_value;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ &saved_value);
if (!err && mlx4_internal_err_reset != saved_value.vbool) {
mlx4_internal_err_reset = saved_value.vbool;
/* Notify on value changed on runtime configuration mode */
- devlink_param_value_changed(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
+ devl_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
}
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
if (!err)
log_num_mac = order_base_2(saved_value.vu32);
- err = devlink_param_driverinit_value_get(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ &saved_value);
if (!err)
enable_64b_cqe_eqe = saved_value.vbool;
- err = devlink_param_driverinit_value_get(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ &saved_value);
if (!err)
enable_4k_uar = saved_value.vbool;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ &saved_value);
if (!err && crdump->snapshot_enable != saved_value.vbool) {
crdump->snapshot_enable = saved_value.vbool;
- devlink_param_value_changed(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
+ devl_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
}
}
@@ -4021,8 +4021,8 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&dev->persist->interface_state_mutex);
mutex_init(&dev->persist->pci_status_mutex);
- ret = devlink_params_register(devlink, mlx4_devlink_params,
- ARRAY_SIZE(mlx4_devlink_params));
+ ret = devl_params_register(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
if (ret)
goto err_devlink_unregister;
mlx4_devlink_set_params_init_values(devlink);
@@ -4031,14 +4031,13 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_params_unregister;
pci_save_state(pdev);
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devl_unlock(devlink);
devlink_register(devlink);
return 0;
err_params_unregister:
- devlink_params_unregister(devlink, mlx4_devlink_params,
- ARRAY_SIZE(mlx4_devlink_params));
+ devl_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
@@ -4181,8 +4180,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
mlx4_pci_disable_device(dev);
- devlink_params_unregister(devlink, mlx4_devlink_params,
- ARRAY_SIZE(mlx4_devlink_params));
+ devl_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
devl_unlock(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3d4226ddba5e..544e09b97483 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -796,10 +796,15 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
int mlx4_en_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
+struct xdp_md;
+int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp);
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash);
+
/*
* Functions for time stamping
*/
u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
+u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp);
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 26685fd0fdaa..bb1d7b039a7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -85,7 +85,7 @@ config MLX5_BRIDGE
config MLX5_CLS_ACT
bool "MLX5 TC classifier action support"
- depends on MLX5_ESWITCH && NET_CLS_ACT
+ depends on MLX5_ESWITCH && NET_CLS_ACT && NET_TC_SKB_EXT
default y
help
mlx5 ConnectX offloads support for TC classifier action (NET_CLS_ACT),
@@ -100,7 +100,7 @@ config MLX5_CLS_ACT
config MLX5_TC_CT
bool "MLX5 TC connection tracking offload support"
- depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT
default y
help
Say Y here if you want to support offloading connection tracking rules
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index cd4a1ab0ea78..8d4e25cc54ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -47,7 +47,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
en/tc/post_act.o en/tc/int_port.o en/tc/meter.o \
- en/tc/post_meter.o
+ en/tc/post_meter.o en/tc/act_stats.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
@@ -97,7 +97,7 @@ mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
- en_accel/ipsec_offload.o
+ en_accel/ipsec_offload.o lib/ipsec_fs_roce.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c837103a9ee3..b00e33ed05e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -47,6 +47,25 @@
#define CREATE_TRACE_POINTS
#include "diag/cmd_tracepoint.h"
+struct mlx5_ifc_mbox_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mbox_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
enum {
CMD_IF_REV = 5,
};
@@ -70,6 +89,27 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
};
+static u16 in_to_opcode(void *in)
+{
+ return MLX5_GET(mbox_in, in, opcode);
+}
+
+/* Returns true for opcodes that might be triggered very frequently and throttle
+ * the command interface. Limit their command slots usage.
+ */
+static bool mlx5_cmd_is_throttle_opcode(u16 op)
+{
+ switch (op) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_SYNC_CRYPTO:
+ return true;
+ }
+ return false;
+}
+
static struct mlx5_cmd_work_ent *
cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
@@ -91,6 +131,7 @@ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
ent->context = context;
ent->cmd = cmd;
ent->page_queue = page_queue;
+ ent->op = in_to_opcode(in->first.data);
refcount_set(&ent->refcnt, 1);
return ent;
@@ -483,6 +524,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
case MLX5_CMD_OP_SAVE_VHCA_STATE:
case MLX5_CMD_OP_LOAD_VHCA_STATE:
+ case MLX5_CMD_OP_SYNC_CRYPTO:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -ENOLINK;
@@ -685,6 +727,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(SYNC_CRYPTO);
default: return "unknown command opcode";
}
}
@@ -752,25 +795,6 @@ static int cmd_status_to_err(u8 status)
}
}
-struct mlx5_ifc_mbox_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_mbox_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
-
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
-
- u8 reserved_at_40[0x40];
-};
-
void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
{
u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
@@ -788,11 +812,12 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
u16 opcode, op_mod;
u16 uid;
- opcode = MLX5_GET(mbox_in, in, opcode);
+ opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
- if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
+ if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
+ opcode != MLX5_CMD_OP_CREATE_UCTX)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
}
@@ -800,7 +825,7 @@ int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
{
/* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
if (err == -ENXIO) {
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u16 opcode = in_to_opcode(in);
u32 syndrome;
u8 status;
@@ -829,9 +854,9 @@ static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
{
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
- u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
struct mlx5_cmd_mailbox *next = msg->next;
int n = mlx5_calc_cmd_blocks(msg);
+ u16 op = ent->op;
int data_only;
u32 offset = 0;
int dump_len;
@@ -883,11 +908,6 @@ static void dump_command(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
}
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
- return MLX5_GET(mbox_in, in->first.data, opcode);
-}
-
static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
static void cb_timeout_handler(struct work_struct *work)
@@ -905,13 +925,13 @@ static void cb_timeout_handler(struct work_struct *work)
/* Maybe got handled by eq recover ? */
if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
goto out; /* phew, already handled */
}
ent->ret = -ETIMEDOUT;
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
- ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ ent->idx, mlx5_command_str(ent->op), ent->op);
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
out:
@@ -985,7 +1005,6 @@ static void cmd_work_handler(struct work_struct *work)
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
- ent->op = be32_to_cpu(lay->in[0]) >> 16;
if (ent->in->next)
lay->in_ptr = cpu_to_be64(ent->in->next->dma);
lay->inlen = cpu_to_be32(ent->in->len);
@@ -1098,12 +1117,12 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
*/
if (wait_for_completion_timeout(&ent->done, timeout)) {
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
return;
}
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
ent->ret = -ETIMEDOUT;
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
@@ -1130,12 +1149,10 @@ out_err:
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
- mlx5_command_str(msg_to_opcode(ent->in)),
- msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
} else if (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
- mlx5_command_str(msg_to_opcode(ent->in)),
- msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1169,7 +1186,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
u8 status = 0;
int err = 0;
s64 ds;
- u16 op;
if (callback && page_queue)
return -EINVAL;
@@ -1209,9 +1225,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
ds = ent->ts2 - ent->ts1;
- op = MLX5_GET(mbox_in, in->first.data, opcode);
- if (op < MLX5_CMD_OP_MAX) {
- stats = &cmd->stats[op];
+ if (ent->op < MLX5_CMD_OP_MAX) {
+ stats = &cmd->stats[ent->op];
spin_lock_irq(&stats->lock);
stats->sum += ds;
++stats->n;
@@ -1219,7 +1234,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
}
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
- mlx5_command_str(op), ds);
+ mlx5_command_str(ent->op), ds);
out_free:
status = ent->status;
@@ -1816,7 +1831,7 @@ cache_miss:
static int is_manage_pages(void *in)
{
- return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
+ return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
}
/* Notes:
@@ -1827,8 +1842,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size, mlx5_cmd_cbk_t callback, void *context,
bool force_polling)
{
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
struct mlx5_cmd_msg *inb, *outb;
+ u16 opcode = in_to_opcode(in);
+ bool throttle_op;
int pages_queue;
gfp_t gfp;
u8 token;
@@ -1837,13 +1853,21 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
return -ENXIO;
+ throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
+ if (throttle_op) {
+ /* atomic context may not sleep */
+ if (callback)
+ return -EINVAL;
+ down(&dev->cmd.throttle_sem);
+ }
+
pages_queue = is_manage_pages(in);
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
inb = alloc_msg(dev, in_size, gfp);
if (IS_ERR(inb)) {
err = PTR_ERR(inb);
- return err;
+ goto out_up;
}
token = alloc_token(&dev->cmd);
@@ -1877,6 +1901,9 @@ out_out:
mlx5_free_cmd_msg(dev, outb);
out_in:
free_msg(dev, inb);
+out_up:
+ if (throttle_op)
+ up(&dev->cmd.throttle_sem);
return err;
}
@@ -1950,8 +1977,8 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op
int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
+ u16 opcode = in_to_opcode(in);
return cmd_status_err(dev, err, opcode, op_mod, out);
}
@@ -1996,8 +2023,8 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size)
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
+ u16 opcode = in_to_opcode(in);
err = cmd_status_err(dev, err, opcode, op_mod, out);
return mlx5_cmd_check(dev, err, in, out);
@@ -2049,7 +2076,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx;
work->user_callback = callback;
- work->opcode = MLX5_GET(mbox_in, in, opcode);
+ work->opcode = in_to_opcode(in);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
@@ -2220,6 +2247,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
sema_init(&cmd->sem, cmd->max_reg_cmds);
sema_init(&cmd->pages_sem, 1);
+ sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2));
cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 0571e40c6ee5..445fe30c3d0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -59,6 +59,9 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
return false;
+ if (mlx5_core_is_management_pf(dev))
+ return false;
+
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return false;
@@ -111,9 +114,9 @@ static bool is_eth_enabled(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
- &val);
+ err = devl_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ &val);
return err ? false : val.vbool;
}
@@ -144,9 +147,9 @@ static bool is_vnet_enabled(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
- DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
- &val);
+ err = devl_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ &val);
return err ? false : val.vbool;
}
@@ -198,6 +201,9 @@ bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return false;
+ if (mlx5_core_is_management_pf(dev))
+ return false;
+
if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
return false;
@@ -215,9 +221,9 @@ static bool is_ib_enabled(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
- DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
- &val);
+ err = devl_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &val);
return err ? false : val.vbool;
}
@@ -343,7 +349,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
- priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -372,10 +377,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
/* Pay attention that this is not PCI driver that
* mlx5_core_dev is connected, but auxiliary driver.
- *
- * Here we can race of module unload with devlink
- * reload, but we don't need to take extra lock because
- * we are holding global mlx5_intf_mutex.
*/
if (!adev->dev.driver)
continue;
@@ -391,12 +392,11 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break;
}
}
- priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
mutex_unlock(&mlx5_intf_mutex);
return ret;
}
-void mlx5_detach_device(struct mlx5_core_dev *dev)
+void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
@@ -406,7 +406,6 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
- priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
continue;
@@ -426,7 +425,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
adrv = to_auxiliary_drv(adev->dev.driver);
- if (adrv->suspend) {
+ if (adrv->suspend && suspend) {
adrv->suspend(adev, pm);
continue;
}
@@ -435,7 +434,6 @@ skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
- priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -534,22 +532,16 @@ del_adev:
int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- int err = 0;
lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0;
- priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
- goto out;
-
- err = add_drivers(dev);
+ return 0;
-out:
- priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
- return err;
+ return add_drivers(dev);
}
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 5bd83c0275f8..c5d2fdcabd56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -7,6 +7,7 @@
#include "fw_reset.h"
#include "fs_core.h"
#include "eswitch.h"
+#include "lag/lag.h"
#include "esw/qos.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
@@ -104,7 +105,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, true);
err = mlx5_health_wait_pci_up(dev);
if (err)
NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
@@ -156,13 +157,18 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (mlx5_core_is_mp_slave(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported for multi port slave");
+ return -EOPNOTSUPP;
+ }
+
if (pci_num_vf(pdev)) {
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
}
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, false);
break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
@@ -263,9 +269,10 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap_event_ctx trap_event_ctx;
enum devlink_trap_action action_orig;
struct mlx5_devlink_trap *dl_trap;
- int err = 0;
+ int err;
if (is_mdev_switchdev_mode(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
@@ -275,26 +282,25 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
dl_trap = mlx5_find_trap_by_id(dev, trap->id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
- err = -EOPNOTSUPP;
- goto out;
- }
+ if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP)
+ return -EOPNOTSUPP;
if (action == dl_trap->trap.action)
- goto out;
+ return 0;
action_orig = dl_trap->trap.action;
dl_trap->trap.action = action;
+ trap_event_ctx.trap = &dl_trap->trap;
+ trap_event_ctx.err = 0;
err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
- &dl_trap->trap);
- if (err)
+ &trap_event_ctx);
+ if (err == NOTIFY_BAD)
dl_trap->trap.action = action_orig;
-out:
- return err;
+
+ return trap_event_ctx.err;
}
static const struct devlink_ops mlx5_devlink_ops = {
@@ -396,70 +402,6 @@ void mlx5_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
-static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- char *value = val.vstr;
- int err = 0;
-
- if (!strcmp(value, "dmfs")) {
- return 0;
- } else if (!strcmp(value, "smfs")) {
- u8 eswitch_mode;
- bool smfs_cap;
-
- eswitch_mode = mlx5_eswitch_mode(dev);
- smfs_cap = mlx5_fs_dr_is_supported(dev);
-
- if (!smfs_cap) {
- err = -EOPNOTSUPP;
- NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported by current device");
- }
-
- else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
- NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported when eswitch offloads enabled.");
- err = -EOPNOTSUPP;
- }
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int mlx5_devlink_fs_mode_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- enum mlx5_flow_steering_mode mode;
-
- if (!strcmp(ctx->val.vstr, "smfs"))
- mode = MLX5_FLOW_STEERING_MODE_SMFS;
- else
- mode = MLX5_FLOW_STEERING_MODE_DMFS;
- dev->priv.steering->mode = mode;
-
- return 0;
-}
-
-static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
- strcpy(ctx->val.vstr, "smfs");
- else
- strcpy(ctx->val.vstr, "dmfs");
- return 0;
-}
-
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -496,68 +438,54 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id
return 0;
}
-static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!MLX5_ESWITCH_MANAGER(dev))
return -EOPNOTSUPP;
- return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool);
+ if (ctx->val.vbool)
+ return mlx5_lag_mpesw_enable(dev);
+
+ mlx5_lag_mpesw_disable(dev);
+ return 0;
}
-static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!MLX5_ESWITCH_MANAGER(dev))
return -EOPNOTSUPP;
- ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
+ ctx->val.vbool = mlx5_lag_is_mpesw(dev);
return 0;
}
-static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
+static int mlx5_devlink_esw_multiport_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- u8 esw_mode;
if (!MLX5_ESWITCH_MANAGER(dev)) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
return -EOPNOTSUPP;
}
- esw_mode = mlx5_eswitch_mode(dev);
- if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
+
+ if (mlx5_eswitch_mode(dev) != MLX5_ESWITCH_OFFLOADS) {
NL_SET_ERR_MSG_MOD(extack,
- "E-Switch must either disabled or non switchdev mode");
+ "E-Switch must be in switchdev mode");
return -EBUSY;
}
- return 0;
-}
-
-#endif
-
-static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- mlx5_fw_reset_enable_remote_dev_reset_set(dev, ctx->val.vbool);
return 0;
}
-static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- ctx->val.vbool = mlx5_fw_reset_enable_remote_dev_reset_get(dev);
- return 0;
-}
+#endif
static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
@@ -567,11 +495,6 @@ static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
}
static const struct devlink_param mlx5_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
- "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
- mlx5_devlink_fs_mode_validate),
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate),
#ifdef CONFIG_MLX5_ESWITCH
@@ -580,16 +503,13 @@ static const struct devlink_param mlx5_devlink_params[] = {
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL,
mlx5_devlink_large_group_num_validate),
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
- "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
+ "esw_multiport", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_esw_port_metadata_get,
- mlx5_devlink_esw_port_metadata_set,
- mlx5_devlink_esw_port_metadata_validate),
+ mlx5_devlink_esw_multiport_get,
+ mlx5_devlink_esw_multiport_set,
+ mlx5_devlink_esw_multiport_validate),
#endif
- DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_enable_remote_dev_reset_get,
- mlx5_devlink_enable_remote_dev_reset_set, NULL),
DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_eq_depth_validate),
DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
@@ -602,33 +522,34 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
union devlink_param_value value;
value.vbool = MLX5_CAP_GEN(dev, roce);
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ value);
#ifdef CONFIG_MLX5_ESWITCH
value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
- devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
- value);
+ devl_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ value);
#endif
value.vu32 = MLX5_COMP_EQ_SIZE;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ value);
value.vu32 = MLX5_NUM_ASYNC_EQE;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ value);
}
-static const struct devlink_param enable_eth_param =
+static const struct devlink_param mlx5_devlink_eth_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, NULL);
+ NULL, NULL, NULL),
+};
-static int mlx5_devlink_eth_param_register(struct devlink *devlink)
+static int mlx5_devlink_eth_params_register(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
@@ -637,25 +558,27 @@ static int mlx5_devlink_eth_param_register(struct devlink *devlink)
if (!mlx5_eth_supported(dev))
return 0;
- err = devlink_param_register(devlink, &enable_eth_param);
+ err = devl_params_register(devlink, mlx5_devlink_eth_params,
+ ARRAY_SIZE(mlx5_devlink_eth_params));
if (err)
return err;
value.vbool = true;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ value);
return 0;
}
-static void mlx5_devlink_eth_param_unregister(struct devlink *devlink)
+static void mlx5_devlink_eth_params_unregister(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!mlx5_eth_supported(dev))
return;
- devlink_param_unregister(devlink, &enable_eth_param);
+ devl_params_unregister(devlink, mlx5_devlink_eth_params,
+ ARRAY_SIZE(mlx5_devlink_eth_params));
}
static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
@@ -670,11 +593,12 @@ static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
return 0;
}
-static const struct devlink_param enable_rdma_param =
+static const struct devlink_param mlx5_devlink_rdma_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, mlx5_devlink_enable_rdma_validate);
+ NULL, NULL, mlx5_devlink_enable_rdma_validate),
+};
-static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
+static int mlx5_devlink_rdma_params_register(struct devlink *devlink)
{
union devlink_param_value value;
int err;
@@ -682,30 +606,33 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return 0;
- err = devlink_param_register(devlink, &enable_rdma_param);
+ err = devl_params_register(devlink, mlx5_devlink_rdma_params,
+ ARRAY_SIZE(mlx5_devlink_rdma_params));
if (err)
return err;
value.vbool = true;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
return 0;
}
-static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
+static void mlx5_devlink_rdma_params_unregister(struct devlink *devlink)
{
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
- devlink_param_unregister(devlink, &enable_rdma_param);
+ devl_params_unregister(devlink, mlx5_devlink_rdma_params,
+ ARRAY_SIZE(mlx5_devlink_rdma_params));
}
-static const struct devlink_param enable_vnet_param =
+static const struct devlink_param mlx5_devlink_vnet_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_VNET, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, NULL);
+ NULL, NULL, NULL),
+};
-static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
+static int mlx5_devlink_vnet_params_register(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
@@ -714,56 +641,58 @@ static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
if (!mlx5_vnet_supported(dev))
return 0;
- err = devlink_param_register(devlink, &enable_vnet_param);
+ err = devl_params_register(devlink, mlx5_devlink_vnet_params,
+ ARRAY_SIZE(mlx5_devlink_vnet_params));
if (err)
return err;
value.vbool = true;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ value);
return 0;
}
-static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink)
+static void mlx5_devlink_vnet_params_unregister(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!mlx5_vnet_supported(dev))
return;
- devlink_param_unregister(devlink, &enable_vnet_param);
+ devl_params_unregister(devlink, mlx5_devlink_vnet_params,
+ ARRAY_SIZE(mlx5_devlink_vnet_params));
}
static int mlx5_devlink_auxdev_params_register(struct devlink *devlink)
{
int err;
- err = mlx5_devlink_eth_param_register(devlink);
+ err = mlx5_devlink_eth_params_register(devlink);
if (err)
return err;
- err = mlx5_devlink_rdma_param_register(devlink);
+ err = mlx5_devlink_rdma_params_register(devlink);
if (err)
goto rdma_err;
- err = mlx5_devlink_vnet_param_register(devlink);
+ err = mlx5_devlink_vnet_params_register(devlink);
if (err)
goto vnet_err;
return 0;
vnet_err:
- mlx5_devlink_rdma_param_unregister(devlink);
+ mlx5_devlink_rdma_params_unregister(devlink);
rdma_err:
- mlx5_devlink_eth_param_unregister(devlink);
+ mlx5_devlink_eth_params_unregister(devlink);
return err;
}
static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
{
- mlx5_devlink_vnet_param_unregister(devlink);
- mlx5_devlink_rdma_param_unregister(devlink);
- mlx5_devlink_eth_param_unregister(devlink);
+ mlx5_devlink_vnet_params_unregister(devlink);
+ mlx5_devlink_rdma_params_unregister(devlink);
+ mlx5_devlink_eth_params_unregister(devlink);
}
static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
@@ -791,11 +720,12 @@ static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
return 0;
}
-static const struct devlink_param max_uc_list_param =
+static const struct devlink_param mlx5_devlink_max_uc_list_params[] = {
DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, mlx5_devlink_max_uc_list_validate);
+ NULL, NULL, mlx5_devlink_max_uc_list_validate),
+};
-static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink)
+static int mlx5_devlink_max_uc_list_params_register(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
@@ -804,26 +734,28 @@ static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink)
if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
return 0;
- err = devlink_param_register(devlink, &max_uc_list_param);
+ err = devl_params_register(devlink, mlx5_devlink_max_uc_list_params,
+ ARRAY_SIZE(mlx5_devlink_max_uc_list_params));
if (err)
return err;
value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list);
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
return 0;
}
static void
-mlx5_devlink_max_uc_list_param_unregister(struct devlink *devlink)
+mlx5_devlink_max_uc_list_params_unregister(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
return;
- devlink_param_unregister(devlink, &max_uc_list_param);
+ devl_params_unregister(devlink, mlx5_devlink_max_uc_list_params,
+ ARRAY_SIZE(mlx5_devlink_max_uc_list_params));
}
#define MLX5_TRAP_DROP(_id, _group_id) \
@@ -869,13 +801,12 @@ void mlx5_devlink_traps_unregister(struct devlink *devlink)
ARRAY_SIZE(mlx5_trap_groups_arr));
}
-int mlx5_devlink_register(struct devlink *devlink)
+int mlx5_devlink_params_register(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
- err = devlink_params_register(devlink, mlx5_devlink_params,
- ARRAY_SIZE(mlx5_devlink_params));
+ err = devl_params_register(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
if (err)
return err;
@@ -885,27 +816,24 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto auxdev_reg_err;
- err = mlx5_devlink_max_uc_list_param_register(devlink);
+ err = mlx5_devlink_max_uc_list_params_register(devlink);
if (err)
goto max_uc_list_err;
- if (!mlx5_core_is_mp_slave(dev))
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
-
return 0;
max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
- devlink_params_unregister(devlink, mlx5_devlink_params,
- ARRAY_SIZE(mlx5_devlink_params));
+ devl_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
return err;
}
-void mlx5_devlink_unregister(struct devlink *devlink)
+void mlx5_devlink_params_unregister(struct devlink *devlink)
{
- mlx5_devlink_max_uc_list_param_unregister(devlink);
+ mlx5_devlink_max_uc_list_params_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
- devlink_params_unregister(devlink, mlx5_devlink_params,
- ARRAY_SIZE(mlx5_devlink_params));
+ devl_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index fd033df24856..212b12424146 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -11,6 +11,7 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
};
struct mlx5_trap_ctx {
@@ -24,6 +25,11 @@ struct mlx5_devlink_trap {
struct list_head list;
};
+struct mlx5_devlink_trap_event_ctx {
+ struct mlx5_trap_ctx *trap;
+ int err;
+};
+
struct mlx5_core_dev;
void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
struct devlink_port *dl_port);
@@ -35,7 +41,7 @@ void mlx5_devlink_traps_unregister(struct devlink *devlink);
struct devlink *mlx5_devlink_alloc(struct device *dev);
void mlx5_devlink_free(struct devlink *devlink);
-int mlx5_devlink_register(struct devlink *devlink);
-void mlx5_devlink_unregister(struct devlink *devlink);
+int mlx5_devlink_params_register(struct devlink *devlink);
+void mlx5_devlink_params_unregister(struct devlink *devlink);
#endif /* __MLX5_DEVLINK_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 2732128e7a6e..6d73127b7217 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -275,6 +275,10 @@ const char *parse_fs_dst(struct trace_seq *p,
fs_dest_range_field_to_str(dst->range.field),
dst->range.min, dst->range.max);
break;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ trace_seq_printf(p, "flow_table_type=%u id:%u\n", dst->ft->type,
+ dst->ft->id);
+ break;
case MLX5_FLOW_DESTINATION_TYPE_NONE:
trace_seq_printf(p, "none\n");
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 5b05b884b5fb..f40497823e65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -234,6 +234,8 @@ static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer)
int i;
for (i = 0; i < num_string_db; i++) {
+ if (!string_db_size_out[i])
+ continue;
tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL);
if (!tracer->str_db.buffer[i])
goto free_strings_db;
@@ -279,6 +281,8 @@ static void mlx5_tracer_read_strings_db(struct work_struct *work)
}
for (i = 0; i < num_string_db; i++) {
+ if (!tracer->str_db.size_out[i])
+ continue;
offset = 0;
MLX5_SET(mtrc_stdb, in, string_db_index, i);
num_of_reads = tracer->str_db.size_out[i] /
@@ -385,6 +389,8 @@ static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer
str_ptr = tracer_event->string_event.string_param;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ if (!tracer->str_db.size_out[i])
+ continue;
if (str_ptr > tracer->str_db.base_address_out[i] &&
str_ptr < tracer->str_db.base_address_out[i] +
tracer->str_db.size_out[i]) {
@@ -460,6 +466,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer,
tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id);
tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost);
+ tracer_event->out = trace;
switch (tracer_event->event_id) {
case TRACER_EVENT_TYPE_TIMESTAMP:
@@ -582,6 +589,26 @@ void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
mlx5_tracer_clean_message(str_frmt);
}
+static int mlx5_tracer_handle_raw_string(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+
+ cur_string = mlx5_tracer_message_insert(tracer, tracer_event);
+ if (!cur_string)
+ return -1;
+
+ cur_string->event_id = tracer_event->event_id;
+ cur_string->timestamp = tracer_event->string_event.timestamp;
+ cur_string->lost = tracer_event->lost_event;
+ cur_string->string = "0x%08x%08x";
+ cur_string->num_of_params = 2;
+ cur_string->params[0] = upper_32_bits(*tracer_event->out);
+ cur_string->params[1] = lower_32_bits(*tracer_event->out);
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ return 0;
+}
+
static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
@@ -590,7 +617,7 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
if (tracer_event->string_event.tdsn == 0) {
cur_string = mlx5_tracer_get_string(tracer, tracer_event);
if (!cur_string)
- return -1;
+ return mlx5_tracer_handle_raw_string(tracer, tracer_event);
cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
cur_string->last_param_num = 0;
@@ -603,9 +630,9 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
} else {
cur_string = mlx5_tracer_message_get(tracer, tracer_event);
if (!cur_string) {
- pr_debug("%s Got string event for unknown string tdsm: %d\n",
+ pr_debug("%s Got string event for unknown string tmsn: %d\n",
__func__, tracer_event->string_event.tmsn);
- return -1;
+ return mlx5_tracer_handle_raw_string(tracer, tracer_event);
}
cur_string->last_param_num += 1;
if (cur_string->last_param_num > TRACER_MAX_PARAMS) {
@@ -931,6 +958,14 @@ unlock:
return err;
}
+static void mlx5_fw_tracer_update_db(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, update_db_work);
+
+ mlx5_fw_tracer_reload(tracer);
+}
+
/* Create software resources (Buffers, etc ..) */
struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
{
@@ -958,6 +993,8 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change);
INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db);
INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces);
+ INIT_WORK(&tracer->update_db_work, mlx5_fw_tracer_update_db);
+ mutex_init(&tracer->state_lock);
err = mlx5_query_mtrc_caps(tracer);
@@ -1004,11 +1041,15 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
if (IS_ERR_OR_NULL(tracer))
return 0;
- dev = tracer->dev;
-
if (!tracer->str_db.loaded)
queue_work(tracer->work_queue, &tracer->read_fw_strings_work);
+ mutex_lock(&tracer->state_lock);
+ if (test_and_set_bit(MLX5_TRACER_STATE_UP, &tracer->state))
+ goto unlock;
+
+ dev = tracer->dev;
+
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
@@ -1029,6 +1070,8 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
goto err_notifier_unregister;
}
+unlock:
+ mutex_unlock(&tracer->state_lock);
return 0;
err_notifier_unregister:
@@ -1038,6 +1081,7 @@ err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work);
+ mutex_unlock(&tracer->state_lock);
return err;
}
@@ -1047,17 +1091,27 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
if (IS_ERR_OR_NULL(tracer))
return;
+ mutex_lock(&tracer->state_lock);
+ if (!test_and_clear_bit(MLX5_TRACER_STATE_UP, &tracer->state))
+ goto unlock;
+
mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
tracer->owner);
mlx5_eq_notifier_unregister(tracer->dev, &tracer->nb);
cancel_work_sync(&tracer->ownership_change_work);
cancel_work_sync(&tracer->handle_traces_work);
+ /* It is valid to get here from update_db_work. Hence, don't wait for
+ * update_db_work to finished.
+ */
+ cancel_work(&tracer->update_db_work);
if (tracer->owner)
mlx5_fw_tracer_ownership_release(tracer);
mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey);
mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
+unlock:
+ mutex_unlock(&tracer->state_lock);
}
/* Free software resources (Buffers, etc ..) */
@@ -1074,6 +1128,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
+ mutex_destroy(&tracer->state_lock);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
@@ -1083,6 +1138,8 @@ static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
struct mlx5_core_dev *dev;
int err;
+ if (test_and_set_bit(MLX5_TRACER_RECREATE_DB, &tracer->state))
+ return 0;
cancel_work_sync(&tracer->read_fw_strings_work);
mlx5_fw_tracer_clean_ready_list(tracer);
mlx5_fw_tracer_clean_print_hash(tracer);
@@ -1093,17 +1150,18 @@ static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
err = mlx5_query_mtrc_caps(tracer);
if (err) {
mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
- return err;
+ goto out;
}
err = mlx5_fw_tracer_allocate_strings_db(tracer);
if (err) {
mlx5_core_warn(dev, "FWTracer: Allocate strings DB failed %d\n", err);
- return err;
+ goto out;
}
mlx5_fw_tracer_init_saved_traces_array(tracer);
-
- return 0;
+out:
+ clear_bit(MLX5_TRACER_RECREATE_DB, &tracer->state);
+ return err;
}
int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer)
@@ -1143,6 +1201,9 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
queue_work(tracer->work_queue, &tracer->handle_traces_work);
break;
+ case MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE:
+ queue_work(tracer->work_queue, &tracer->update_db_work);
+ break;
default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
eqe->sub_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 4762b55b0b0e..5c548bb74f07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -63,6 +63,11 @@ struct mlx5_fw_trace_data {
char msg[TRACE_STR_MSG];
};
+enum mlx5_fw_tracer_state {
+ MLX5_TRACER_STATE_UP = BIT(0),
+ MLX5_TRACER_RECREATE_DB = BIT(1),
+};
+
struct mlx5_fw_tracer {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -104,6 +109,9 @@ struct mlx5_fw_tracer {
struct work_struct handle_traces_work;
struct hlist_head hash[MESSAGE_HASH_SIZE];
struct list_head ready_strings_list;
+ struct work_struct update_db_work;
+ struct mutex state_lock; /* Synchronize update work with reload flows */
+ unsigned long state;
};
struct tracer_string_format {
@@ -158,6 +166,7 @@ struct tracer_event {
struct tracer_string_event string_event;
struct tracer_timestamp_event timestamp_event;
};
+ u64 *out;
};
struct mlx5_ifc_tracer_event_bits {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index cdc87ecae5d3..9a3878f9e582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -75,6 +75,10 @@ int mlx5_ec_init(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev))
return 0;
+ /* Management PF don't have a peer PF */
+ if (mlx5_core_is_management_pf(dev))
+ return 0;
+
return mlx5_host_pf_init(dev);
}
@@ -85,6 +89,10 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev))
return;
+ /* Management PF don't have a peer PF */
+ if (mlx5_core_is_management_pf(dev))
+ return;
+
mlx5_host_pf_cleanup(dev);
err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 2d77fb8a8a01..88460b7796e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -247,7 +247,7 @@ struct mlx5e_rx_wqe_ll {
};
struct mlx5e_rx_wqe_cyc {
- struct mlx5_wqe_data_seg data[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data);
};
struct mlx5e_umr_wqe {
@@ -454,6 +454,7 @@ struct mlx5e_txqsq {
struct mlx5_clock *clock;
struct net_device *netdev;
struct mlx5_core_dev *mdev;
+ struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
/* control path */
@@ -626,10 +627,11 @@ struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt,
+ u32 head_offset, u32 page_idx);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- u32 cqe_bcnt);
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
@@ -968,6 +970,12 @@ struct mlx5e_priv {
struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl;
+ struct dentry *dfs_root;
+};
+
+struct mlx5e_dev {
+ struct mlx5e_priv *priv;
+ struct devlink_port dl_port;
};
struct mlx5e_rx_handlers {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 83adaabf59f5..c6b6e290fd79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -4,6 +4,31 @@
#include "en/devlink.h"
#include "eswitch.h"
+static const struct devlink_ops mlx5e_devlink_ops = {
+};
+
+struct mlx5e_dev *mlx5e_create_devlink(struct device *dev,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_dev *mlx5e_dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc_ns(&mlx5e_devlink_ops, sizeof(*mlx5e_dev),
+ devlink_net(priv_to_devlink(mdev)), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+ devlink_register(devlink);
+ return devlink_priv(devlink);
+}
+
+void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx5e_dev);
+
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
+
static void
mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
{
@@ -14,51 +39,36 @@ mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_i
memcpy(ppid->id, &parent_id, sizeof(parent_id));
}
-int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
+int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev,
+ struct mlx5_core_dev *mdev)
{
- struct devlink *devlink = priv_to_devlink(priv->mdev);
+ struct devlink *devlink = priv_to_devlink(mlx5e_dev);
struct devlink_port_attrs attrs = {};
struct netdev_phys_item_id ppid = {};
- struct devlink_port *dl_port;
unsigned int dl_port_index;
- int ret;
- if (mlx5_core_is_pf(priv->mdev)) {
+ if (mlx5_core_is_pf(mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = mlx5_get_dev_index(priv->mdev);
- if (MLX5_ESWITCH_MANAGER(priv->mdev)) {
- mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid);
+ attrs.phys.port_number = mlx5_get_dev_index(mdev);
+ if (MLX5_ESWITCH_MANAGER(mdev)) {
+ mlx5e_devlink_get_port_parent_id(mdev, &ppid);
memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
attrs.switch_id.id_len = ppid.id_len;
}
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev,
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(mdev,
MLX5_VPORT_UPLINK);
} else {
attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev, 0);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(mdev, 0);
}
- dl_port = mlx5e_devlink_get_dl_port(priv);
- memset(dl_port, 0, sizeof(*dl_port));
- devlink_port_attrs_set(dl_port, &attrs);
+ devlink_port_attrs_set(&mlx5e_dev->dl_port, &attrs);
- if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
- devl_lock(devlink);
- ret = devl_port_register(devlink, dl_port, dl_port_index);
- if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
- devl_unlock(devlink);
-
- return ret;
+ return devlink_port_register(devlink, &mlx5e_dev->dl_port,
+ dl_port_index);
}
-void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
+void mlx5e_devlink_port_unregister(struct mlx5e_dev *mlx5e_dev)
{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
- struct devlink *devlink = priv_to_devlink(priv->mdev);
-
- if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
- devl_lock(devlink);
- devl_port_unregister(dl_port);
- if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
- devl_unlock(devlink);
+ devlink_port_unregister(&mlx5e_dev->dl_port);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
index 4f238d4fff55..d5ec4461f300 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -7,13 +7,11 @@
#include <net/devlink.h>
#include "en.h"
-int mlx5e_devlink_port_register(struct mlx5e_priv *priv);
-void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
-
-static inline struct devlink_port *
-mlx5e_devlink_get_dl_port(struct mlx5e_priv *priv)
-{
- return &priv->mdev->mlx5e_res.dl_port;
-}
+struct mlx5e_dev *mlx5e_create_devlink(struct device *dev,
+ struct mlx5_core_dev *mdev);
+void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev);
+int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev,
+ struct mlx5_core_dev *mdev);
+void mlx5e_devlink_port_unregister(struct mlx5e_dev *mlx5e_dev);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 379c6dc9a3be..e5a44b0b9616 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -87,6 +87,7 @@ enum {
MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
@@ -145,7 +146,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
- bool state_destroy);
+ bool state_destroy,
+ struct dentry *dfs_root);
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
@@ -189,6 +191,8 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
__be16 proto, u16 vid);
void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs);
+
#define fs_err(fs, fmt, ...) \
mlx5_core_err(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
index 17325c5d6516..cf60f0a3ff23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
@@ -47,6 +47,7 @@ void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl)
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl)
{
+ WARN_ON(!hash_empty(tbl->hlist));
mutex_destroy(&tbl->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 4ad19c981294..a21bd1179477 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -411,9 +411,14 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
{
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 log_wqe_size, log_stride_size;
- return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
- mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
+ log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ WARN(log_wqe_size < log_stride_size,
+ "Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n",
+ log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk);
+ return log_wqe_size - log_stride_size;
}
u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
@@ -580,11 +585,16 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
u16 max_mtu_pkts;
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
+ mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n",
+ page_shift, umr_mode);
return -EOPNOTSUPP;
+ }
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
+ mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
return -EINVAL;
+ }
/* Current RQ length is too big for the given frame size, the
* needed number of WQEs exceeds the maximum.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 89510cac46c2..505ba41195b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -287,6 +287,78 @@ int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in)
return err;
}
+int mlx5e_port_query_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir,
+ u8 pool_idx, void *out, int size_out)
+{
+ u32 in[MLX5_ST_SZ_DW(sbpr_reg)] = {};
+
+ MLX5_SET(sbpr_reg, in, desc, desc);
+ MLX5_SET(sbpr_reg, in, dir, dir);
+ MLX5_SET(sbpr_reg, in, pool, pool_idx);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, size_out, MLX5_REG_SBPR, 0, 0);
+}
+
+int mlx5e_port_set_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir,
+ u8 pool_idx, u32 infi_size, u32 size)
+{
+ u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(sbpr_reg)] = {};
+
+ MLX5_SET(sbpr_reg, in, desc, desc);
+ MLX5_SET(sbpr_reg, in, dir, dir);
+ MLX5_SET(sbpr_reg, in, pool, pool_idx);
+ MLX5_SET(sbpr_reg, in, infi_size, infi_size);
+ MLX5_SET(sbpr_reg, in, size, size);
+ MLX5_SET(sbpr_reg, in, mode, 1);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_SBPR, 0, 1);
+}
+
+static int mlx5e_port_query_sbcm(struct mlx5_core_dev *mdev, u32 desc,
+ u8 pg_buff_idx, u8 dir, void *out,
+ int size_out)
+{
+ u32 in[MLX5_ST_SZ_DW(sbcm_reg)] = {};
+
+ MLX5_SET(sbcm_reg, in, desc, desc);
+ MLX5_SET(sbcm_reg, in, local_port, 1);
+ MLX5_SET(sbcm_reg, in, pg_buff, pg_buff_idx);
+ MLX5_SET(sbcm_reg, in, dir, dir);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, size_out, MLX5_REG_SBCM, 0, 0);
+}
+
+int mlx5e_port_set_sbcm(struct mlx5_core_dev *mdev, u32 desc, u8 pg_buff_idx,
+ u8 dir, u8 infi_size, u32 max_buff, u8 pool_idx)
+{
+ u32 out[MLX5_ST_SZ_DW(sbcm_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(sbcm_reg)] = {};
+ u32 min_buff;
+ int err;
+ u8 exc;
+
+ err = mlx5e_port_query_sbcm(mdev, desc, pg_buff_idx, dir, out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ exc = MLX5_GET(sbcm_reg, out, exc);
+ min_buff = MLX5_GET(sbcm_reg, out, min_buff);
+
+ MLX5_SET(sbcm_reg, in, desc, desc);
+ MLX5_SET(sbcm_reg, in, local_port, 1);
+ MLX5_SET(sbcm_reg, in, pg_buff, pg_buff_idx);
+ MLX5_SET(sbcm_reg, in, dir, dir);
+ MLX5_SET(sbcm_reg, in, exc, exc);
+ MLX5_SET(sbcm_reg, in, min_buff, min_buff);
+ MLX5_SET(sbcm_reg, in, infi_max, infi_size);
+ MLX5_SET(sbcm_reg, in, max_buff, max_buff);
+ MLX5_SET(sbcm_reg, in, pool, pool_idx);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_SBCM, 0, 1);
+}
+
/* buffer[i]: buffer that priority i mapped to */
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index 7a7defe60792..3f474e370828 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -57,6 +57,12 @@ u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
+int mlx5e_port_query_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir,
+ u8 pool_idx, void *out, int size_out);
+int mlx5e_port_set_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir,
+ u8 pool_idx, u32 infi_size, u32 size);
+int mlx5e_port_set_sbcm(struct mlx5_core_dev *mdev, u32 desc, u8 pg_buff_idx,
+ u8 dir, u8 infi_size, u32 max_buff, u8 pool_idx);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index c9d5d8d93994..7ac1ad9c46de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -73,6 +73,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
port_buffer->buffer[i].lossy);
}
+ port_buffer->headroom_size = total_used;
port_buffer->port_buffer_size =
MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
port_buffer->spare_buffer_size =
@@ -86,16 +87,204 @@ out:
return err;
}
+struct mlx5e_buffer_pool {
+ u32 infi_size;
+ u32 size;
+ u32 buff_occupancy;
+};
+
+static int mlx5e_port_query_pool(struct mlx5_core_dev *mdev,
+ struct mlx5e_buffer_pool *buffer_pool,
+ u32 desc, u8 dir, u8 pool_idx)
+{
+ u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {};
+ int err;
+
+ err = mlx5e_port_query_sbpr(mdev, desc, dir, pool_idx, out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ buffer_pool->size = MLX5_GET(sbpr_reg, out, size);
+ buffer_pool->infi_size = MLX5_GET(sbpr_reg, out, infi_size);
+ buffer_pool->buff_occupancy = MLX5_GET(sbpr_reg, out, buff_occupancy);
+
+ return err;
+}
+
+enum {
+ MLX5_INGRESS_DIR = 0,
+ MLX5_EGRESS_DIR = 1,
+};
+
+enum {
+ MLX5_LOSSY_POOL = 0,
+ MLX5_LOSSLESS_POOL = 1,
+};
+
+/* No limit on usage of shared buffer pool (max_buff=0) */
+#define MLX5_SB_POOL_NO_THRESHOLD 0
+/* Shared buffer pool usage threshold when calculated
+ * dynamically in alpha units. alpha=13 is equivalent to
+ * HW_alpha of [(1/128) * 2 ^ (alpha-1)] = 32, where HW_alpha
+ * equates to the following portion of the shared buffer pool:
+ * [32 / (1 + n * 32)] While *n* is the number of buffers
+ * that are using the shared buffer pool.
+ */
+#define MLX5_SB_POOL_THRESHOLD 13
+
+/* Shared buffer class management parameters */
+struct mlx5_sbcm_params {
+ u8 pool_idx;
+ u8 max_buff;
+ u8 infi_size;
+};
+
+static const struct mlx5_sbcm_params sbcm_default = {
+ .pool_idx = MLX5_LOSSY_POOL,
+ .max_buff = MLX5_SB_POOL_NO_THRESHOLD,
+ .infi_size = 0,
+};
+
+static const struct mlx5_sbcm_params sbcm_lossy = {
+ .pool_idx = MLX5_LOSSY_POOL,
+ .max_buff = MLX5_SB_POOL_NO_THRESHOLD,
+ .infi_size = 1,
+};
+
+static const struct mlx5_sbcm_params sbcm_lossless = {
+ .pool_idx = MLX5_LOSSLESS_POOL,
+ .max_buff = MLX5_SB_POOL_THRESHOLD,
+ .infi_size = 0,
+};
+
+static const struct mlx5_sbcm_params sbcm_lossless_no_threshold = {
+ .pool_idx = MLX5_LOSSLESS_POOL,
+ .max_buff = MLX5_SB_POOL_NO_THRESHOLD,
+ .infi_size = 1,
+};
+
+/**
+ * select_sbcm_params() - selects the shared buffer pool configuration
+ *
+ * @buffer: <input> port buffer to retrieve params of
+ * @lossless_buff_count: <input> number of lossless buffers in total
+ *
+ * The selection is based on the following rules:
+ * 1. If buffer size is 0, no shared buffer pool is used.
+ * 2. If buffer is lossy, use lossy shared buffer pool.
+ * 3. If there are more than 1 lossless buffers, use lossless shared buffer pool
+ * with threshold.
+ * 4. If there is only 1 lossless buffer, use lossless shared buffer pool
+ * without threshold.
+ *
+ * @return const struct mlx5_sbcm_params* selected values
+ */
+static const struct mlx5_sbcm_params *
+select_sbcm_params(struct mlx5e_bufferx_reg *buffer, u8 lossless_buff_count)
+{
+ if (buffer->size == 0)
+ return &sbcm_default;
+
+ if (buffer->lossy)
+ return &sbcm_lossy;
+
+ if (lossless_buff_count > 1)
+ return &sbcm_lossless;
+
+ return &sbcm_lossless_no_threshold;
+}
+
+static int port_update_pool_cfg(struct mlx5_core_dev *mdev,
+ struct mlx5e_port_buffer *port_buffer)
+{
+ const struct mlx5_sbcm_params *p;
+ u8 lossless_buff_count = 0;
+ int err;
+ int i;
+
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
+ return 0;
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ lossless_buff_count += ((port_buffer->buffer[i].size) &&
+ (!(port_buffer->buffer[i].lossy)));
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count);
+ err = mlx5e_port_set_sbcm(mdev, 0, i,
+ MLX5_INGRESS_DIR,
+ p->infi_size,
+ p->max_buff,
+ p->pool_idx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
+ u32 current_headroom_size,
+ u32 new_headroom_size)
+{
+ struct mlx5e_buffer_pool lossless_ipool;
+ struct mlx5e_buffer_pool lossy_epool;
+ u32 lossless_ipool_size;
+ u32 shared_buffer_size;
+ u32 total_buffer_size;
+ u32 lossy_epool_size;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
+ return 0;
+
+ err = mlx5e_port_query_pool(mdev, &lossy_epool, 0, MLX5_EGRESS_DIR,
+ MLX5_LOSSY_POOL);
+ if (err)
+ return err;
+
+ err = mlx5e_port_query_pool(mdev, &lossless_ipool, 0, MLX5_INGRESS_DIR,
+ MLX5_LOSSLESS_POOL);
+ if (err)
+ return err;
+
+ total_buffer_size = current_headroom_size + lossy_epool.size +
+ lossless_ipool.size;
+ shared_buffer_size = total_buffer_size - new_headroom_size;
+
+ if (shared_buffer_size < 4) {
+ pr_err("Requested port buffer is too large, not enough space left for shared buffer\n");
+ return -EINVAL;
+ }
+
+ /* Total shared buffer size is split in a ratio of 3:1 between
+ * lossy and lossless pools respectively.
+ */
+ lossy_epool_size = (shared_buffer_size / 4) * 3;
+ lossless_ipool_size = shared_buffer_size / 4;
+
+ mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
+ lossy_epool_size);
+ mlx5e_port_set_sbpr(mdev, 0, MLX5_INGRESS_DIR, MLX5_LOSSLESS_POOL, 0,
+ lossless_ipool_size);
+ return 0;
+}
+
static int port_set_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer)
{
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+ u32 new_headroom_size = 0;
+ u32 current_headroom_size;
void *in;
int err;
int i;
+ current_headroom_size = port_buffer->headroom_size;
+
in = kzalloc(sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -110,6 +299,7 @@ static int port_set_buffer(struct mlx5e_priv *priv,
u64 xoff = port_buffer->buffer[i].xoff;
u64 xon = port_buffer->buffer[i].xon;
+ new_headroom_size += size;
do_div(size, port_buff_cell_sz);
do_div(xoff, port_buff_cell_sz);
do_div(xon, port_buff_cell_sz);
@@ -119,6 +309,17 @@ static int port_set_buffer(struct mlx5e_priv *priv,
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
}
+ new_headroom_size /= port_buff_cell_sz;
+ current_headroom_size /= port_buff_cell_sz;
+ err = port_update_shared_buffer(priv->mdev, current_headroom_size,
+ new_headroom_size);
+ if (err)
+ goto out;
+
+ err = port_update_pool_cfg(priv->mdev, port_buffer);
+ if (err)
+ goto out;
+
err = mlx5e_port_set_pbmc(mdev, in);
out:
kfree(in);
@@ -174,6 +375,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
/**
* update_buffer_lossy - Update buffer configuration based on pfc
+ * @mdev: port function core device
* @max_mtu: netdev's max_mtu
* @pfc_en: <input> current pfc configuration
* @buffer: <input> current prio to buffer mapping
@@ -192,7 +394,8 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* @return: 0 if no error,
* sets change to true if buffer configuration was modified.
*/
-static int update_buffer_lossy(unsigned int max_mtu,
+static int update_buffer_lossy(struct mlx5_core_dev *mdev,
+ unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
struct mlx5e_port_buffer *port_buffer,
bool *change)
@@ -229,6 +432,10 @@ static int update_buffer_lossy(unsigned int max_mtu,
}
if (changed) {
+ err = port_update_pool_cfg(mdev, port_buffer);
+ if (err)
+ return err;
+
err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
@@ -293,23 +500,30 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
}
if (change & MLX5E_PORT_BUFFER_PFC) {
+ mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n",
+ __func__, pfc->pfc_en);
err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
if (err)
return err;
- err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
- &port_buffer, &update_buffer);
+ err = update_buffer_lossy(priv->mdev, max_mtu, pfc->pfc_en, buffer, xoff,
+ port_buff_cell_sz, &port_buffer,
+ &update_buffer);
if (err)
return err;
}
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
update_prio2buffer = true;
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
+ __func__, i, prio2buffer[i]);
+
err = fill_pfc_en(priv->mdev, &curr_pfc_en);
if (err)
return err;
- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
+ err = update_buffer_lossy(priv->mdev, max_mtu, curr_pfc_en, prio2buffer, xoff,
port_buff_cell_sz, &port_buffer, &update_buffer);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
index 80af7a5ac604..a6ef118de758 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -60,6 +60,7 @@ struct mlx5e_bufferx_reg {
struct mlx5e_port_buffer {
u32 port_buffer_size;
u32 spare_buffer_size;
+ u32 headroom_size;
struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER];
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 8469e9c38670..9a1bc93b7dc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -771,8 +771,8 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
- mlx5e_trigger_napi_sched(&c->napi);
}
+ mlx5e_trigger_napi_sched(&c->napi);
}
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index b6f5c1bcdbcd..016a61c52c45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -120,8 +120,8 @@ int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
- err = mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport,
- mdata->metadata_reg_c_0);
+ err = mlx5_esw_acl_ingress_vport_metadata_update(esw, rpriv->rep->vport,
+ mdata->metadata_reg_c_0);
if (err)
goto ingress_err;
@@ -167,7 +167,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
/* Reset bond_metadata to zero first then reset all ingress/egress
* acls and rx rules of unslave representor's vport
*/
- mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport, 0);
+ mlx5_esw_acl_ingress_vport_metadata_update(esw, rpriv->rep->vport, 0);
mlx5_esw_acl_egress_vport_unbond(esw, rpriv->rep->vport);
mlx5e_rep_bond_update(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index b08339d986d5..e24b46953542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies. */
-#include <net/dst_metadata.h>
#include <linux/netdevice.h>
#include <linux/if_macvlan.h>
#include <linux/list.h>
@@ -589,7 +588,7 @@ mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
act = mlx5e_tc_act_get(fl_act->id, ns_type);
if (!act || !act->stats_action)
- return -EOPNOTSUPP;
+ return mlx5e_tc_fill_action_stats(priv, fl_act);
return act->stats_action(priv, fl_act);
}
@@ -665,232 +664,54 @@ void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
mlx5e_rep_indr_block_unbind);
}
-static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv,
- u32 tunnel_id)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct tunnel_match_enc_opts enc_opts = {};
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct metadata_dst *tun_dst;
- struct tunnel_match_key key;
- u32 tun_id, enc_opts_id;
- struct net_device *dev;
- int err;
-
- enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
- tun_id = tunnel_id >> ENC_OPTS_BITS;
-
- if (!tun_id)
- return true;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
-
- err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel for tun_id: %d, err: %d\n",
- tun_id, err);
- return false;
- }
-
- if (enc_opts_id) {
- err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
- enc_opts_id, &enc_opts);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
- enc_opts_id, err);
- return false;
- }
- }
-
- if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, TUNNEL_KEY,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- enc_opts.key.len);
- } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, 0, TUNNEL_KEY,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- enc_opts.key.len);
- } else {
- netdev_dbg(priv->netdev,
- "Couldn't restore tunnel, unsupported addr_type: %d\n",
- key.enc_control.addr_type);
- return false;
- }
-
- if (!tun_dst) {
- netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
- return false;
- }
-
- tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
-
- if (enc_opts.key.len)
- ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
- enc_opts.key.data,
- enc_opts.key.len,
- enc_opts.key.dst_opt_type);
-
- skb_dst_set(skb, (struct dst_entry *)tun_dst);
- dev = dev_get_by_index(&init_net, key.filter_ifindex);
- if (!dev) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel device with ifindex: %d\n",
- key.filter_ifindex);
- return false;
- }
-
- /* Set fwd_dev so we do dev_put() after datapath */
- tc_priv->fwd_dev = dev;
-
- skb->dev = dev;
-
- return true;
-}
-
-static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1,
- struct mlx5e_tc_update_priv *tc_priv)
-{
- struct mlx5e_priv *priv = netdev_priv(skb->dev);
- u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
-
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- if (chain) {
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct tc_skb_ext *tc_skb_ext;
- struct mlx5_eswitch *esw;
- u32 zone_restore_id;
-
- tc_skb_ext = tc_skb_ext_alloc(skb);
- if (!tc_skb_ext) {
- WARN_ON(1);
- return false;
- }
- tc_skb_ext->chain = chain;
- zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
- esw = priv->mdev->priv.eswitch;
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
- zone_restore_id))
- return false;
- }
-#endif /* CONFIG_NET_TC_SKB_EXT */
-
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
-}
-
-static void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
-{
- if (tc_priv->fwd_dev)
- dev_put(tc_priv->fwd_dev);
-}
-
-static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5_mapped_obj *mapped_obj,
- struct mlx5e_tc_update_priv *tc_priv)
-{
- if (!mlx5e_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
- netdev_dbg(priv->netdev,
- "Failed to restore tunnel info for sampled packet\n");
- return;
- }
- mlx5e_tc_sample_skb(skb, mapped_obj);
- mlx5_rep_tc_post_napi_receive(tc_priv);
-}
-
-static bool mlx5e_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5_mapped_obj *mapped_obj,
- struct mlx5e_tc_update_priv *tc_priv,
- bool *forward_tx,
- u32 reg_c1)
-{
- u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
-
- /* Tunnel restore takes precedence over int port restore */
- if (tunnel_id)
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
-
- if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
- mapped_obj->int_port_metadata, forward_tx)) {
- /* Set fwd_dev for future dev_put */
- tc_priv->fwd_dev = skb->dev;
-
- return true;
- }
-
- return false;
-}
-
void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
struct sk_buff *skb)
{
- u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
+ u32 reg_c0, reg_c1, zone_restore_id, tunnel_id;
struct mlx5e_tc_update_priv tc_priv = {};
- struct mlx5_mapped_obj mapped_obj;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct mapping_ctx *mapping_ctx;
struct mlx5_eswitch *esw;
- bool forward_tx = false;
struct mlx5e_priv *priv;
- u32 reg_c0;
- int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
goto forward;
- /* If reg_c0 is not equal to the default flow tag then skb->mark
+ /* If mapped_obj_id is not equal to the default flow tag then skb->mark
* is not supported and must be reset back to 0.
*/
skb->mark = 0;
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
- err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find mapped object for reg_c0: %d, err: %d\n",
- reg_c0, err);
- goto free_skb;
- }
+ mapping_ctx = esw->offloads.reg_c0_obj_pool;
+ reg_c1 = be32_to_cpu(cqe->ft_metadata);
+ zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
+ tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
- if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
- if (!mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, &tc_priv) &&
- !mlx5_ipsec_is_rx_flow(cqe))
- goto free_skb;
- } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
- mlx5e_restore_skb_sample(priv, skb, &mapped_obj, &tc_priv);
- goto free_skb;
- } else if (mapped_obj.type == MLX5_MAPPED_OBJ_INT_PORT_METADATA) {
- if (!mlx5e_restore_skb_int_port(priv, skb, &mapped_obj, &tc_priv,
- &forward_tx, reg_c1))
- goto free_skb;
- } else {
- netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ ct_priv = uplink_priv->ct_priv;
+
+ if (!mlx5_ipsec_is_rx_flow(cqe) &&
+ !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id,
+ &tc_priv))
goto free_skb;
- }
forward:
- if (forward_tx)
+ if (tc_priv.skb_done)
+ goto free_skb;
+
+ if (tc_priv.forward_tx)
dev_queue_xmit(skb);
else
napi_gro_receive(rq->cq.napi, skb);
- mlx5_rep_tc_post_napi_receive(&tc_priv);
+ if (tc_priv.fwd_dev)
+ dev_put(tc_priv.fwd_dev);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 1ae15b8536a8..c462fe76495b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -736,10 +736,10 @@ static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(dl_port, &mlx5_rx_reporter_ops,
+ reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ &mlx5_rx_reporter_ops,
MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
@@ -754,6 +754,6 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
if (!priv->rx_reporter)
return;
- devlink_port_health_reporter_destroy(priv->rx_reporter);
+ devlink_health_reporter_destroy(priv->rx_reporter);
priv->rx_reporter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 60bc5b577ab9..34666e2b3871 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -81,6 +81,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
mlx5e_activate_txqsq(sq);
+ if (sq->channel)
+ mlx5e_trigger_napi_icosq(sq->channel);
+ else
+ mlx5e_trigger_napi_sched(sq->cq.napi);
return 0;
out:
@@ -590,10 +594,10 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(dl_port, &mlx5_tx_reporter_ops,
+ reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
@@ -609,6 +613,6 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
if (!priv->tx_reporter)
return;
- devlink_port_health_reporter_destroy(priv->tx_reporter);
+ devlink_health_reporter_destroy(priv->tx_reporter);
priv->tx_reporter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 78c427b38048..07cc65596f89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -216,7 +216,6 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
struct net_device *uplink_dev;
struct mlx5e_priv *out_priv;
struct mlx5_eswitch *esw;
- bool is_uplink_rep;
int *ifindexes;
int if_count;
int err;
@@ -231,10 +230,9 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->ifindexes[if_count] = out_dev->ifindex;
parse_state->if_count++;
- is_uplink_rep = mlx5e_eswitch_uplink_rep(out_dev);
- err = mlx5_lag_do_mirred(priv->mdev, out_dev);
- if (err)
- return err;
+
+ if (mlx5_lag_mpesw_do_mirred(priv->mdev, out_dev, extack))
+ return -EOPNOTSUPP;
out_dev = get_fdb_out_dev(uplink_dev, out_dev);
if (!out_dev)
@@ -275,13 +273,6 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
- /* If output device is bond master then rules are not explicit
- * so we don't attempt to count them.
- */
- if (is_uplink_rep && MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
- MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
- attr->lag.count = true;
-
esw_attr->out_count++;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index b86ac604d0c2..2e0d88b513aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -44,19 +44,17 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, vlan_idx)) {
+ NL_SET_ERR_MSG_MOD(extack, "firmware vlan actions is not supported");
+ return -EOPNOTSUPP;
+ }
+
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported");
- return -EOPNOTSUPP;
- }
-
+ if (vlan_idx)
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
- } else {
+ else
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- }
break;
case FLOW_ACTION_VLAN_PUSH:
attr->vlan_vid[vlan_idx] = act->vlan.vid;
@@ -65,25 +63,10 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan push action is not supported for vlan depth > 1");
- return -EOPNOTSUPP;
- }
-
+ if (vlan_idx)
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- } else {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
- (act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio)) {
- NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported");
- return -EOPNOTSUPP;
- }
-
+ else
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- }
break;
case FLOW_ACTION_VLAN_POP_ETH:
parse_state->eth_pop = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
new file mode 100644
index 000000000000..f71766dca660
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/rhashtable.h>
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+#include "act_stats.h"
+#include "en/fs.h"
+
+struct mlx5e_tc_act_stats_handle {
+ struct rhashtable ht;
+ spinlock_t ht_lock; /* protects hashtable */
+};
+
+struct mlx5e_tc_act_stats {
+ unsigned long tc_act_cookie;
+
+ struct mlx5_fc *counter;
+ u64 lastpackets;
+ u64 lastbytes;
+
+ struct rhash_head hash;
+ struct rcu_head rcu_head;
+};
+
+static const struct rhashtable_params act_counters_ht_params = {
+ .head_offset = offsetof(struct mlx5e_tc_act_stats, hash),
+ .key_offset = 0,
+ .key_len = offsetof(struct mlx5e_tc_act_stats, counter),
+ .automatic_shrinking = true,
+};
+
+struct mlx5e_tc_act_stats_handle *
+mlx5e_tc_act_stats_create(void)
+{
+ struct mlx5e_tc_act_stats_handle *handle;
+ int err;
+
+ handle = kvzalloc(sizeof(*handle), GFP_KERNEL);
+ if (IS_ERR(handle))
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&handle->ht, &act_counters_ht_params);
+ if (err)
+ goto err;
+
+ spin_lock_init(&handle->ht_lock);
+ return handle;
+err:
+ kvfree(handle);
+ return ERR_PTR(err);
+}
+
+void mlx5e_tc_act_stats_free(struct mlx5e_tc_act_stats_handle *handle)
+{
+ rhashtable_destroy(&handle->ht);
+ kvfree(handle);
+}
+
+static int
+mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle,
+ unsigned long act_cookie,
+ struct mlx5_fc *counter)
+{
+ struct mlx5e_tc_act_stats *act_stats, *old_act_stats;
+ struct rhashtable *ht = &handle->ht;
+ int err = 0;
+
+ act_stats = kvzalloc(sizeof(*act_stats), GFP_KERNEL);
+ if (!act_stats)
+ return -ENOMEM;
+
+ act_stats->tc_act_cookie = act_cookie;
+ act_stats->counter = counter;
+
+ rcu_read_lock();
+ old_act_stats = rhashtable_lookup_get_insert_fast(ht,
+ &act_stats->hash,
+ act_counters_ht_params);
+ if (IS_ERR(old_act_stats)) {
+ err = PTR_ERR(old_act_stats);
+ goto err_hash_insert;
+ } else if (old_act_stats) {
+ err = -EEXIST;
+ goto err_hash_insert;
+ }
+ rcu_read_unlock();
+
+ return 0;
+
+err_hash_insert:
+ rcu_read_unlock();
+ kvfree(act_stats);
+ return err;
+}
+
+void
+mlx5e_tc_act_stats_del_flow(struct mlx5e_tc_act_stats_handle *handle,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_flow_attr *attr;
+ struct mlx5e_tc_act_stats *act_stats;
+ int i;
+
+ if (!flow_flag_test(flow, USE_ACT_STATS))
+ return;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ for (i = 0; i < attr->tc_act_cookies_count; i++) {
+ struct rhashtable *ht = &handle->ht;
+
+ spin_lock(&handle->ht_lock);
+ act_stats = rhashtable_lookup_fast(ht,
+ &attr->tc_act_cookies[i],
+ act_counters_ht_params);
+ if (act_stats &&
+ rhashtable_remove_fast(ht, &act_stats->hash,
+ act_counters_ht_params) == 0)
+ kvfree_rcu(act_stats, rcu_head);
+
+ spin_unlock(&handle->ht_lock);
+ }
+ }
+}
+
+int
+mlx5e_tc_act_stats_add_flow(struct mlx5e_tc_act_stats_handle *handle,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_fc *curr_counter = NULL;
+ unsigned long last_cookie = 0;
+ struct mlx5_flow_attr *attr;
+ int err;
+ int i;
+
+ if (!flow_flag_test(flow, USE_ACT_STATS))
+ return 0;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (attr->counter)
+ curr_counter = attr->counter;
+
+ for (i = 0; i < attr->tc_act_cookies_count; i++) {
+ /* jump over identical ids (e.g. pedit)*/
+ if (last_cookie == attr->tc_act_cookies[i])
+ continue;
+
+ err = mlx5e_tc_act_stats_add(handle, attr->tc_act_cookies[i], curr_counter);
+ if (err)
+ goto out_err;
+ last_cookie = attr->tc_act_cookies[i];
+ }
+ }
+
+ return 0;
+out_err:
+ mlx5e_tc_act_stats_del_flow(handle, flow);
+ return err;
+}
+
+int
+mlx5e_tc_act_stats_fill_stats(struct mlx5e_tc_act_stats_handle *handle,
+ struct flow_offload_action *fl_act)
+{
+ struct rhashtable *ht = &handle->ht;
+ struct mlx5e_tc_act_stats *item;
+ struct mlx5e_tc_act_stats key;
+ u64 pkts, bytes, lastused;
+ int err = 0;
+
+ key.tc_act_cookie = fl_act->cookie;
+
+ rcu_read_lock();
+ item = rhashtable_lookup(ht, &key, act_counters_ht_params);
+ if (!item) {
+ rcu_read_unlock();
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ mlx5_fc_query_cached_raw(item->counter,
+ &bytes, &pkts, &lastused);
+
+ flow_stats_update(&fl_act->stats,
+ bytes - item->lastbytes,
+ pkts - item->lastpackets,
+ 0, lastused, FLOW_ACTION_HW_STATS_DELAYED);
+
+ item->lastpackets = pkts;
+ item->lastbytes = bytes;
+ rcu_read_unlock();
+
+ return 0;
+
+err_out:
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h
new file mode 100644
index 000000000000..002292c2567c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_ACT_STATS_H__
+#define __MLX5_EN_ACT_STATS_H__
+
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+
+struct mlx5e_tc_act_stats_handle;
+
+struct mlx5e_tc_act_stats_handle *mlx5e_tc_act_stats_create(void);
+void mlx5e_tc_act_stats_free(struct mlx5e_tc_act_stats_handle *handle);
+
+int
+mlx5e_tc_act_stats_add_flow(struct mlx5e_tc_act_stats_handle *handle,
+ struct mlx5e_tc_flow *flow);
+
+void
+mlx5e_tc_act_stats_del_flow(struct mlx5e_tc_act_stats_handle *handle,
+ struct mlx5e_tc_flow *flow);
+
+int
+mlx5e_tc_act_stats_fill_stats(struct mlx5e_tc_act_stats_handle *handle,
+ struct flow_offload_action *fl_act);
+
+#endif /* __MLX5_EN_ACT_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 78af8a3175bf..8218c892b161 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -28,7 +28,7 @@ struct mlx5e_flow_meter_aso_obj {
int base_id;
int total_meters;
- unsigned long meters_map[0]; /* must be at the end of this struct */
+ unsigned long meters_map[]; /* must be at the end of this struct */
};
struct mlx5e_flow_meters {
@@ -204,13 +204,15 @@ mlx5e_flow_meter_create_aso_obj(struct mlx5e_flow_meters *flow_meters, int *obj_
u32 in[MLX5_ST_SZ_DW(create_flow_meter_aso_obj_in)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct mlx5_core_dev *mdev = flow_meters->mdev;
- void *obj;
+ void *obj, *param;
int err;
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
- MLX5_SET(general_obj_in_cmd_hdr, in, log_obj_range, flow_meters->log_granularity);
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_create_param, param, log_obj_range,
+ flow_meters->log_granularity);
obj = MLX5_ADDR_OF(create_flow_meter_aso_obj_in, in, flow_meter_aso_obj);
MLX5_SET(flow_meter_aso_obj, obj, meter_aso_access_pd, flow_meters->pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index f2c2c752bd1c..558a776359af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -237,7 +237,7 @@ sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
int err;
err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
- CHAIN_TO_REG, obj_id);
+ MAPPED_OBJ_TO_REG, obj_id);
if (err)
goto err_set_regc0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 313df8232db7..314983bc6f08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -35,6 +35,7 @@
#define MLX5_CT_STATE_REPLY_BIT BIT(4)
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
+#define MLX5_CT_STATE_NEW_BIT BIT(7)
#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
@@ -59,6 +60,7 @@ struct mlx5_tc_ct_debugfs {
struct mlx5_tc_ct_priv {
struct mlx5_core_dev *dev;
+ struct mlx5e_priv *priv;
const struct net_device *netdev;
struct mod_hdr_tbl *mod_hdr_tbl;
struct xarray tuple_ids;
@@ -85,7 +87,6 @@ struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
struct mlx5_ct_ft *ft;
- u32 chain_mapping;
};
struct mlx5_ct_zone_rule {
@@ -721,12 +722,14 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
struct flow_action_entry *meta;
+ enum ip_conntrack_info ctinfo;
u16 ct_state = 0;
int err;
meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
if (!meta)
return -EOPNOTSUPP;
+ ctinfo = meta->ct_metadata.cookie & NFCT_INFOMASK;
err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
&attr->ct_attr.ct_labels_id);
@@ -742,7 +745,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
ct_state |= MLX5_CT_STATE_NAT_BIT;
}
- ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT;
+ ct_state |= MLX5_CT_STATE_TRK_BIT;
+ ct_state |= ctinfo == IP_CT_NEW ? MLX5_CT_STATE_NEW_BIT : MLX5_CT_STATE_ESTABLISHED_BIT;
ct_state |= meta->ct_metadata.orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT;
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
ct_state,
@@ -871,6 +875,68 @@ err_attr:
return err;
}
+static int
+mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat, u8 zone_restore_id)
+{
+ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+ struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
+ struct mlx5e_mod_hdr_handle *mh;
+ struct mlx5_ct_fs_rule *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ old_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!old_attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+ *old_attr = *attr;
+
+ err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
+ nat, mlx5_tc_ct_entry_has_nat(entry));
+ if (err) {
+ ct_dbg("Failed to create ct entry mod hdr");
+ goto err_mod_hdr;
+ }
+
+ mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
+
+ rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
+ goto err_rule;
+ }
+
+ ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
+ zone_rule->rule = rule;
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
+ zone_rule->mh = mh;
+
+ kfree(old_attr);
+ kvfree(spec);
+ ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
+
+ return 0;
+
+err_rule:
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+err_mod_hdr:
+ kfree(old_attr);
+err_attr:
+ kvfree(spec);
+ return err;
+}
+
static bool
mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
{
@@ -1066,6 +1132,52 @@ err_orig:
}
static int
+mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ u8 zone_restore_id)
+{
+ int err;
+
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ return err;
+
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err)
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry, unsigned long cookie)
+{
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ int err;
+
+ err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
+ if (!err)
+ return 0;
+
+ /* If failed to update the entry, then look it up again under ht_lock
+ * protection and properly delete it.
+ */
+ spin_lock_bh(&ct_priv->ht_lock);
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+ if (entry) {
+ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
+ spin_unlock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_put(entry);
+ } else {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ }
+ return err;
+}
+
+static int
mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
struct flow_cls_offload *flow)
{
@@ -1083,9 +1195,17 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
spin_lock_bh(&ct_priv->ht_lock);
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
if (entry && refcount_inc_not_zero(&entry->refcnt)) {
+ if (entry->restore_cookie == meta_action->ct_metadata.cookie) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_put(entry);
+ return -EEXIST;
+ }
+ entry->restore_cookie = meta_action->ct_metadata.cookie;
spin_unlock_bh(&ct_priv->ht_lock);
+
+ err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
mlx5_tc_ct_entry_put(entry);
- return -EEXIST;
+ return err;
}
spin_unlock_bh(&ct_priv->ht_lock);
@@ -1323,7 +1443,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack)
{
- bool trk, est, untrk, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
+ bool trk, est, untrk, unnew, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector_key_ct *mask, *key;
u32 ctstate = 0, ctstate_mask = 0;
@@ -1369,15 +1489,18 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
+ unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate |= new ? MLX5_CT_STATE_NEW_BIT : 0;
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate_mask |= (unnew || new) ? MLX5_CT_STATE_NEW_BIT : 0;
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0;
@@ -1395,12 +1518,6 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
return -EOPNOTSUPP;
}
- if (new) {
- NL_SET_ERR_MSG_MOD(extack,
- "matching on ct_state +new isn't supported");
- return -EOPNOTSUPP;
- }
-
if (mask->ct_zone)
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
key->ct_zone, MLX5_CT_ZONE_MASK);
@@ -1441,6 +1558,7 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
attr->ct_attr.zone = act->ct.zone;
attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
+ attr->ct_attr.act_miss_cookie = act->miss_cookie;
return 0;
}
@@ -1778,7 +1896,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* + ft prio (tc chain) +
* + original match +
* +---------------------+
- * | set chain miss mapping
+ * | set act_miss_cookie mapping
* | set fte_id
* | set tunnel_id
* | do decap
@@ -1823,7 +1941,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_ct_flow *ct_flow;
- int chain_mapping = 0, err;
+ int act_miss_mapping = 0, err;
struct mlx5_ct_ft *ft;
u16 zone;
@@ -1858,22 +1976,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- /* Write chain miss tag for miss in ct table as we
- * don't go though all prios of this chain as normal tc rules
- * miss.
- */
- err = mlx5_chains_get_chain_mapping(ct_priv->chains, attr->chain,
- &chain_mapping);
+ err = mlx5e_tc_action_miss_mapping_get(ct_priv->priv, attr, attr->ct_attr.act_miss_cookie,
+ &act_miss_mapping);
if (err) {
- ct_dbg("Failed to get chain register mapping for chain");
- goto err_get_chain;
+ ct_dbg("Failed to get register mapping for act miss");
+ goto err_get_act_miss;
}
- ct_flow->chain_mapping = chain_mapping;
+ attr->ct_attr.act_miss_mapping = act_miss_mapping;
err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
- CHAIN_TO_REG, chain_mapping);
+ MAPPED_OBJ_TO_REG, act_miss_mapping);
if (err) {
- ct_dbg("Failed to set chain register mapping");
+ ct_dbg("Failed to set act miss register mapping");
goto err_mapping;
}
@@ -1937,8 +2051,8 @@ err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
mlx5e_mod_hdr_dealloc(pre_mod_acts);
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
-err_get_chain:
+ mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, act_miss_mapping);
+err_get_act_miss:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
@@ -1977,7 +2091,7 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
+ mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, attr->ct_attr.act_miss_mapping);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
kfree(ct_flow->pre_ct_attr);
@@ -2074,13 +2188,6 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
const char *err_msg = NULL;
int err = 0;
-#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- /* cannot restore chain ID on HW miss */
-
- err_msg = "tc skb extension missing";
- err = -EOPNOTSUPP;
- goto out_err;
-#endif
if (IS_ERR_OR_NULL(post_act)) {
/* Ignore_flow_level support isn't supported by default for VFs and so post_act
* won't be supported. Skip showing error msg.
@@ -2157,6 +2264,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
}
spin_lock_init(&ct_priv->ht_lock);
+ ct_priv->priv = priv;
ct_priv->ns_type = ns_type;
ct_priv->chains = chains;
ct_priv->netdev = priv->netdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 5bbd6b92840f..5c5ddaa83055 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -28,6 +28,8 @@ struct mlx5_ct_attr {
struct mlx5_ct_flow *ct_flow;
struct nf_flowtable *nf_ft;
u32 ct_labels_id;
+ u32 act_miss_mapping;
+ u64 act_miss_cookie;
};
#define zone_to_reg_ct {\
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 2b7fd1c0e643..451fd4342a5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -30,6 +30,7 @@ enum {
MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9,
MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10,
MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11,
+ MLX5E_TC_FLOW_FLAG_USE_ACT_STATS = MLX5E_TC_FLOW_BASE + 12,
};
struct mlx5e_tc_flow_parse_attr {
@@ -95,8 +96,6 @@ struct mlx5e_tc_flow {
*/
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow;
- struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
- struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index e6f64d890fb3..00a04fdd756f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -93,11 +93,11 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
else
return -EOPNOTSUPP;
- if (!(mlx5e_eswitch_rep(*out_dev) &&
- mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
+ if (!mlx5e_eswitch_uplink_rep(*out_dev))
return -EOPNOTSUPP;
- if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev)
+ if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev &&
+ !mlx5_lag_is_mpesw(priv->mdev))
return -EOPNOTSUPP;
return 0;
@@ -745,8 +745,6 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
if (err)
goto out;
- esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
- misc_parameters.vxlan_vni);
esw_attr->rx_tun_attr->decap_vport = vport_num;
} else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) {
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 2aaf8ab857b8..780224fd67a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -1349,7 +1349,8 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
mlx5e_tc_unoffload_from_slow_path(esw, flow);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
- mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
+
+ mlx5e_tc_detach_mod_hdr(priv, flow, attr);
attr->modify_hdr = NULL;
esw_attr->dests[flow->tmp_entry_index].flags &=
@@ -1405,7 +1406,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
continue;
}
- err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
+ err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 853f312cd757..c067d2efab51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -73,6 +73,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
+static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
+{
+ return config->rx_filter == HWTSTAMP_FILTER_ALL;
+}
+
/* TX */
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
@@ -315,7 +320,6 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
}
}
-void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
@@ -445,7 +449,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
{
- WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
+ WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev));
/* A WQE must not cross the page boundary, hence two conditions:
* 1. Its size must not exceed the page size.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 20507ef2f956..bcd6370de440 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -57,8 +57,9 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
- struct page *page, struct xdp_buff *xdp)
+ struct xdp_buff *xdp)
{
+ struct page *page = virt_to_page(xdp->data);
struct skb_shared_info *sinfo = NULL;
struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
@@ -156,10 +157,39 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
return true;
}
+static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
+ return -EOPNOTSUPP;
+
+ *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
+ _ctx->rq->clock, get_cqe_ts(_ctx->cqe));
+ return 0;
+}
+
+static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
+ return -EOPNOTSUPP;
+
+ *hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
+ return 0;
+}
+
+const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
+ .xmo_rx_timestamp = mlx5e_xdp_rx_timestamp,
+ .xmo_rx_hash = mlx5e_xdp_rx_hash,
+};
+
/* returns true if packet was consumed by xdp */
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
- struct bpf_prog *prog, struct xdp_buff *xdp)
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf)
{
+ struct xdp_buff *xdp = &mxbuf->xdp;
u32 act;
int err;
@@ -168,7 +198,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
case XDP_PASS:
return false;
case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
+ if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, xdp)))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true;
@@ -180,7 +210,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
- mlx5e_page_dma_unmap(rq, page);
+ mlx5e_page_dma_unmap(rq, virt_to_page(xdp->data));
rq->stats->xdp_redirect++;
return true;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index bc2d9034af5b..10bcfa6f88c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -44,10 +44,16 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))
+struct mlx5e_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+};
+
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
- struct bpf_prog *prog, struct xdp_buff *xdp);
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
@@ -56,6 +62,8 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
+
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
struct skb_shared_info *sinfo,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index c91b54d9ff27..fab787600459 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -8,6 +8,14 @@
/* RX data path */
+static struct mlx5e_xdp_buff *xsk_buff_to_mxbuf(struct xdp_buff *xdp)
+{
+ /* mlx5e_xdp_buff shares its layout with xdp_buff_xsk
+ * and private mlx5e_xdp_buff fields fall into xdp_buff_xsk->cb
+ */
+ return (struct mlx5e_xdp_buff *)xdp;
+}
+
int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
@@ -22,6 +30,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err;
BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
+ XSK_CHECK_PRIV_TYPE(struct mlx5e_xdp_buff);
batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
rq->mpwqe.pages_per_wqe);
@@ -43,25 +52,30 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) {
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
};
+ mxbuf->rq = rq;
}
} else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
.key = rq->mkey_be,
.va = cpu_to_be64(addr),
};
+ mxbuf->rq = rq;
}
} else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) {
u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2);
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) {
@@ -80,6 +94,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
.key = rq->mkey_be,
.va = cpu_to_be64(rq->wqe_overflow.addr),
};
+ mxbuf->rq = rq;
}
} else {
__be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) -
@@ -87,6 +102,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
__be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size);
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) {
@@ -99,6 +115,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
.va = cpu_to_be64(rq->wqe_overflow.addr),
.bcount = pad_size,
};
+ mxbuf->rq = rq;
}
}
@@ -229,11 +246,12 @@ static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_b
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
u32 page_idx)
{
- struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[page_idx].xsk);
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -249,9 +267,11 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
- xsk_buff_set_size(xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
- net_prefetch(xdp->data);
+ /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
+ mxbuf->cqe = cqe;
+ xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ net_prefetch(mxbuf->xdp.data);
/* Possible flows:
* - XDP_REDIRECT to XSKMAP:
@@ -269,7 +289,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
prog = rcu_dereference(rq->xdp_prog);
- if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
+ if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -278,14 +298,15 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp);
+ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
+ struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
- struct xdp_buff *xdp = wi->au->xsk;
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->au->xsk);
struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
@@ -295,17 +316,19 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(wi->offset);
- xsk_buff_set_size(xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
- net_prefetch(xdp->data);
+ /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
+ mxbuf->cqe = cqe;
+ xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog);
- if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
+ if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp);
+ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index 087c943bd8e9..cefc0ef6105d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -13,11 +13,13 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
u32 page_idx);
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
+ struct mlx5_cqe64 *cqe,
u32 cqe_bcnt);
#endif /* __MLX5_EN_XSK_RX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index ff03c43833bb..81a567e17264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -7,6 +7,18 @@
#include "en/health.h"
#include <net/xdp_sock_drv.h>
+static int mlx5e_legacy_rq_validate_xsk(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
+ mlx5_core_err(mdev, "Legacy RQ linear mode for XSK can't be activated with current params\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* The limitation of 2048 can be altered, but shouldn't go beyond the minimal
* stride size of striding RQ.
*/
@@ -17,8 +29,11 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5_core_dev *mdev)
{
/* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
+ if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
+ MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
return false;
+ }
/* frag_sz is different for regular and XSK RQs, so ensure that linear
* SKB mode is possible.
@@ -27,7 +42,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
default: /* MLX5_WQ_TYPE_CYCLIC */
- return mlx5e_rx_is_linear_skb(mdev, params, xsk);
+ return !mlx5e_legacy_rq_validate_xsk(mdev, params, xsk);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 07187028f0d3..c964644ee866 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -124,7 +124,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
mlx5e_udp_gso_handle_tx_skb(skb);
#ifdef CONFIG_MLX5_EN_TLS
- /* May send SKBs and WQEs. */
+ /* May send WQEs. */
if (mlx5e_ktls_skb_offloaded(skb))
if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
&state->tls)))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index d7c020f72401..88a5aed9d678 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -365,7 +365,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(fs, i);
- kvfree(accel_tcp);
+ kfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
}
@@ -377,7 +377,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL);
+ accel_tcp = kzalloc(sizeof(*accel_tcp), GFP_KERNEL);
if (!accel_tcp)
return -ENOMEM;
mlx5e_fs_set_accel_tcp(fs, accel_tcp);
@@ -397,7 +397,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
err_destroy_tables:
while (--i >= 0)
accel_fs_tcp_destroy_table(fs, i);
- kvfree(accel_tcp);
+ kfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index bb9023957f74..7b0d3de0ec6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -158,95 +158,103 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->family = x->props.family;
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
+ attrs->upspec.dport = ntohs(x->sel.dport);
+ attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
+ attrs->upspec.sport = ntohs(x->sel.sport);
+ attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
+ attrs->upspec.proto = x->sel.proto;
mlx5e_ipsec_init_limits(sa_entry, attrs);
}
-static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
+static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xso.real_dev;
- struct mlx5e_priv *priv;
-
- priv = netdev_priv(netdev);
-
if (x->props.aalgo != SADB_AALG_NONE) {
- netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
return -EINVAL;
}
if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
- netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded");
return -EINVAL;
}
if (x->props.calgo != SADB_X_CALG_NONE) {
- netdev_info(netdev, "Cannot offload compressed xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN &&
- !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_ESN)) {
- netdev_info(netdev, "Cannot offload ESN xfrm states\n");
+ !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
return -EINVAL;
}
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
- netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
- netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded");
return -EINVAL;
}
if (x->encap) {
- netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state may not be offloaded");
return -EINVAL;
}
if (!x->aead) {
- netdev_info(netdev, "Cannot offload xfrm states without aead\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128) {
- netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit");
return -EINVAL;
}
if ((x->aead->alg_key_len != 128 + 32) &&
(x->aead->alg_key_len != 256 + 32)) {
- netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit");
return -EINVAL;
}
if (x->tfcpad) {
- netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
return -EINVAL;
}
if (!x->geniv) {
- netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
return -EINVAL;
}
if (strcmp(x->geniv, "seqiv")) {
- netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
return -EINVAL;
}
+
+ if (x->sel.proto != IPPROTO_IP &&
+ (x->sel.proto != IPPROTO_UDP || x->xso.dir != XFRM_DEV_OFFLOAD_OUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
+ return -EINVAL;
+ }
+
switch (x->xso.type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
- if (!(mlx5_ipsec_device_caps(priv->mdev) &
- MLX5_IPSEC_CAP_CRYPTO)) {
- netdev_info(netdev, "Crypto offload is not supported\n");
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
+ NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
- netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
return -EINVAL;
}
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (!(mlx5_ipsec_device_caps(priv->mdev) &
+ if (!(mlx5_ipsec_device_caps(mdev) &
MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
- netdev_info(netdev, "Packet offload is not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT) {
- netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only transport xfrm states may be offloaded in packet mode");
return -EINVAL;
}
@@ -254,35 +262,30 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {
- netdev_info(netdev,
- "Unsupported replay window size %u\n",
- x->replay_esn->replay_window);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size");
return -EINVAL;
}
if (!x->props.reqid) {
- netdev_info(netdev, "Cannot offload without reqid\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid");
return -EINVAL;
}
if (x->lft.hard_byte_limit != XFRM_INF ||
x->lft.soft_byte_limit != XFRM_INF) {
- netdev_info(netdev,
- "Device doesn't support limits in bytes\n");
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes");
return -EINVAL;
}
if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
x->lft.hard_packet_limit != XFRM_INF) {
/* XFRM stack doesn't prevent such configuration :(. */
- netdev_info(netdev,
- "Hard packet limit must be greater than soft one\n");
+ NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
return -EINVAL;
}
break;
default:
- netdev_info(netdev, "Unsupported xfrm offload type %d\n",
- x->xso.type);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
return -EINVAL;
}
return 0;
@@ -298,7 +301,8 @@ static void _update_xfrm_state(struct work_struct *work)
mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs);
}
-static int mlx5e_xfrm_add_state(struct xfrm_state *x)
+static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.real_dev;
@@ -311,15 +315,13 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
return -EOPNOTSUPP;
ipsec = priv->ipsec;
- err = mlx5e_xfrm_validate_state(x);
+ err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
if (err)
return err;
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
- if (!sa_entry) {
- err = -ENOMEM;
- goto out;
- }
+ if (!sa_entry)
+ return -ENOMEM;
sa_entry->x = x;
sa_entry->ipsec = ipsec;
@@ -360,7 +362,7 @@ err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry);
err_xfrm:
kfree(sa_entry);
-out:
+ NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
return err;
}
@@ -497,34 +499,39 @@ static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
}
-static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
+static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x,
+ struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xdo.real_dev;
-
if (x->type != XFRM_POLICY_TYPE_MAIN) {
- netdev_info(netdev, "Cannot offload non-main policy types\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
return -EINVAL;
}
/* Please pay attention that we support only one template */
if (x->xfrm_nr > 1) {
- netdev_info(netdev, "Cannot offload more than one template\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template");
return -EINVAL;
}
if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
- netdev_info(netdev, "Cannot offload forward policy\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy");
return -EINVAL;
}
if (!x->xfrm_vec[0].reqid) {
- netdev_info(netdev, "Cannot offload policy without reqid\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload policy without reqid");
return -EINVAL;
}
if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
- netdev_info(netdev, "Unsupported xfrm offload type\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
+ return -EINVAL;
+ }
+
+ if (x->selector.proto != IPPROTO_IP &&
+ (x->selector.proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
return -EINVAL;
}
@@ -548,9 +555,15 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
attrs->reqid = x->xfrm_vec[0].reqid;
+ attrs->upspec.dport = ntohs(sel->dport);
+ attrs->upspec.dport_mask = ntohs(sel->dport_mask);
+ attrs->upspec.sport = ntohs(sel->sport);
+ attrs->upspec.sport_mask = ntohs(sel->sport_mask);
+ attrs->upspec.proto = sel->proto;
}
-static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
+static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
+ struct netlink_ext_ack *extack)
{
struct net_device *netdev = x->xdo.real_dev;
struct mlx5e_ipsec_pol_entry *pol_entry;
@@ -558,10 +571,12 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
int err;
priv = netdev_priv(netdev);
- if (!priv->ipsec)
+ if (!priv->ipsec) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload");
return -EOPNOTSUPP;
+ }
- err = mlx5e_xfrm_validate_policy(x);
+ err = mlx5e_xfrm_validate_policy(x, extack);
if (err)
return err;
@@ -582,6 +597,7 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
err_fs:
kfree(pol_entry);
+ NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 8bed9c361075..12f044330639 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -52,6 +52,14 @@ struct aes_gcm_keymat {
u32 aes_key[256 / 32];
};
+struct upspec {
+ u16 dport;
+ u16 dport_mask;
+ u16 sport;
+ u16 sport_mask;
+ u8 proto;
+};
+
struct mlx5_accel_esp_xfrm_attrs {
u32 esn;
u32 spi;
@@ -68,6 +76,7 @@ struct mlx5_accel_esp_xfrm_attrs {
__be32 a6[4];
} daddr;
+ struct upspec upspec;
u8 dir : 2;
u8 esn_overlap : 1;
u8 esn_trigger : 1;
@@ -84,6 +93,7 @@ enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
MLX5_IPSEC_CAP_ESN = 1 << 1,
MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
+ MLX5_IPSEC_CAP_ROCE = 1 << 3,
};
struct mlx5e_priv;
@@ -119,7 +129,7 @@ struct mlx5e_ipsec_work {
};
struct mlx5e_ipsec_aso {
- u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
+ u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr;
struct mlx5_aso *aso;
/* Protect ASO WQ access, as it is global to whole IPsec */
@@ -138,6 +148,7 @@ struct mlx5e_ipsec {
struct mlx5e_ipsec_tx *tx;
struct mlx5e_ipsec_aso *aso;
struct notifier_block nb;
+ struct mlx5_ipsec_fs *roce;
};
struct mlx5e_ipsec_esn_state {
@@ -181,6 +192,7 @@ struct mlx5_accel_pol_xfrm_attrs {
__be32 a6[4];
} daddr;
+ struct upspec upspec;
u8 family;
u8 action;
u8 type : 2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 9f19f4b59a70..9871ba1b25ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -6,6 +6,7 @@
#include "en/fs.h"
#include "ipsec.h"
#include "fs_core.h"
+#include "lib/ipsec_fs_roce.h"
#define NUM_IPSEC_FTE BIT(15)
@@ -166,7 +167,8 @@ out:
return err;
}
-static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
{
mlx5_del_flow_rules(rx->pol.rule);
mlx5_destroy_flow_group(rx->pol.group);
@@ -179,6 +181,8 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
mlx5_destroy_flow_table(rx->ft.status);
+
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
}
static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
@@ -186,18 +190,35 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
{
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ struct mlx5_flow_destination default_dest;
struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
int err;
+ default_dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
+ err = mlx5_ipsec_fs_roce_rx_create(mdev, ipsec->roce, ns, &default_dest,
+ family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
+ MLX5E_NIC_PRIO);
+ if (err)
+ return err;
+
ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
MLX5E_NIC_PRIO, 1);
- if (IS_ERR(ft))
- return PTR_ERR(ft);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_fs_ft_status;
+ }
rx->ft.status = ft;
- dest[0] = mlx5_ttc_get_default_dest(ttc, family2tt(family));
+ ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
+ if (ft) {
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = ft;
+ } else {
+ dest[0] = default_dest;
+ }
+
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
err = ipsec_status_rule(mdev, rx, dest);
@@ -245,6 +266,8 @@ err_fs_ft:
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add:
mlx5_destroy_flow_table(rx->ft.status);
+err_fs_ft_status:
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
return err;
}
@@ -304,14 +327,15 @@ static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
/* remove FT */
- rx_destroy(mdev, rx);
+ rx_destroy(mdev, ipsec, rx, family);
out:
mutex_unlock(&rx->ft.mutex);
}
/* IPsec TX flow steering */
-static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
+static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
+ struct mlx5_ipsec_fs *roce)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft;
@@ -334,8 +358,15 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
if (err)
goto err_pol_miss;
+
+ err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
+ if (err)
+ goto err_roce;
return 0;
+err_roce:
+ mlx5_del_flow_rules(tx->pol.rule);
+ mlx5_destroy_flow_group(tx->pol.group);
err_pol_miss:
mlx5_destroy_flow_table(tx->ft.pol);
err_pol_ft:
@@ -353,9 +384,10 @@ static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
if (tx->ft.refcnt)
goto skip;
- err = tx_create(mdev, tx);
+ err = tx_create(mdev, tx, ipsec->roce);
if (err)
goto out;
+
skip:
tx->ft.refcnt++;
out:
@@ -374,6 +406,7 @@ static void tx_ft_put(struct mlx5e_ipsec *ipsec)
if (tx->ft.refcnt)
goto out;
+ mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce);
mlx5_del_flow_rules(tx->pol.rule);
mlx5_destroy_flow_group(tx->pol.group);
mlx5_destroy_flow_table(tx->ft.pol);
@@ -467,6 +500,27 @@ static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
misc_parameters_2.metadata_reg_c_0, reqid);
}
+static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
+{
+ if (upspec->proto != IPPROTO_UDP)
+ return;
+
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
+ if (upspec->dport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
+ upspec->dport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->dport);
+ }
+
+ if (upspec->sport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
+ upspec->sport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->sport);
+ }
+}
+
static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
struct mlx5_flow_act *flow_act)
{
@@ -654,6 +708,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
@@ -728,6 +783,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
&flow_act);
@@ -1008,6 +1064,9 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
if (!ipsec->tx)
return;
+ if (mlx5_ipsec_device_caps(ipsec->mdev) & MLX5_IPSEC_CAP_ROCE)
+ mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
+
ipsec_fs_destroy_counters(ipsec);
mutex_destroy(&ipsec->tx->ft.mutex);
WARN_ON(ipsec->tx->ft.refcnt);
@@ -1024,6 +1083,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_namespace *ns;
int err = -ENOMEM;
@@ -1053,6 +1113,9 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
mutex_init(&ipsec->rx_ipv6->ft.mutex);
ipsec->tx->ns = ns;
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE)
+ ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
+
return 0;
err_counters:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 2461462b7b99..5fa7a4c40429 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -4,7 +4,7 @@
#include "mlx5_core.h"
#include "en.h"
#include "ipsec.h"
-#include "lib/mlx5.h"
+#include "lib/crypto.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
@@ -42,6 +42,11 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+ if (mlx5_get_roce_state(mdev) &&
+ MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
+ MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
+ caps |= MLX5_IPSEC_CAP_ROCE;
+
if (!caps)
return 0;
@@ -92,7 +97,6 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
lower_32_bits(attrs->hard_packet_limit));
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
- MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->soft_packet_limit != XFRM_INF) {
@@ -329,8 +333,7 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
if (attrs->soft_packet_limit != XFRM_INF)
if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
- !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
- !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
+ !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm))
xfrm_state_check_expire(sa_entry->x);
unlock:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index da2184c94203..cf704f106b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -1,18 +1,19 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
+#include <linux/debugfs.h>
#include "en.h"
#include "lib/mlx5.h"
+#include "lib/crypto.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id)
+struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct tls_crypto_info *crypto_info)
{
+ const void *key;
u32 sz_bytes;
- void *key;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
@@ -32,17 +33,16 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
break;
}
default:
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- return mlx5_create_encryption_key(mdev, key, sz_bytes,
- MLX5_ACCEL_OBJ_TLS_KEY,
- p_key_id);
+ return mlx5_crypto_dek_create(dek_pool, key, sz_bytes);
}
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
+void mlx5_ktls_destroy_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek)
{
- mlx5_destroy_encryption_key(mdev, key_id);
+ mlx5_crypto_dek_destroy(dek_pool, dek);
}
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
@@ -177,8 +177,18 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
destroy_workqueue(priv->tls->rx_wq);
}
+static void mlx5e_tls_debugfs_init(struct mlx5e_tls *tls,
+ struct dentry *dfs_root)
+{
+ if (IS_ERR_OR_NULL(dfs_root))
+ return;
+
+ tls->debugfs.dfs = debugfs_create_dir("tls", dfs_root);
+}
+
int mlx5e_ktls_init(struct mlx5e_priv *priv)
{
+ struct mlx5_crypto_dek_pool *dek_pool;
struct mlx5e_tls *tls;
if (!mlx5e_is_ktls_device(priv->mdev))
@@ -187,13 +197,32 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv)
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
if (!tls)
return -ENOMEM;
+ tls->mdev = priv->mdev;
+ dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY);
+ if (IS_ERR(dek_pool)) {
+ kfree(tls);
+ return PTR_ERR(dek_pool);
+ }
+ tls->dek_pool = dek_pool;
priv->tls = tls;
+
+ mlx5e_tls_debugfs_init(tls, priv->dfs_root);
+
return 0;
}
void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
{
+ struct mlx5e_tls *tls = priv->tls;
+
+ if (!mlx5e_is_ktls_device(priv->mdev))
+ return;
+
+ debugfs_remove_recursive(tls->debugfs.dfs);
+ tls->debugfs.dfs = NULL;
+
+ mlx5_crypto_dek_pool_destroy(tls->dek_pool);
kfree(priv->tls);
priv->tls = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 1c35045e41fb..f11075e67658 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -4,15 +4,18 @@
#ifndef __MLX5E_KTLS_H__
#define __MLX5E_KTLS_H__
+#include <linux/debugfs.h>
#include <linux/tls.h>
#include <net/tls.h>
#include "en.h"
#ifdef CONFIG_MLX5_EN_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id);
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
+#include "lib/crypto.h"
+
+struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct tls_crypto_info *crypto_info);
+void mlx5_ktls_destroy_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek);
static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
{
@@ -72,10 +75,18 @@ struct mlx5e_tls_sw_stats {
atomic64_t rx_tls_del;
};
+struct mlx5e_tls_debugfs {
+ struct dentry *dfs;
+ struct dentry *dfs_tx;
+};
+
struct mlx5e_tls {
+ struct mlx5_core_dev *mdev;
struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
struct mlx5e_tls_tx_pool *tx_pool;
+ struct mlx5_crypto_dek_pool *dek_pool;
+ struct mlx5e_tls_debugfs debugfs;
};
int mlx5e_ktls_init(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 3e54834747ce..4be770443b0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -50,7 +50,7 @@ struct mlx5e_ktls_offload_context_rx {
struct mlx5e_tls_sw_stats *sw_stats;
struct completion add_ctx;
struct mlx5e_tir tir;
- u32 key_id;
+ struct mlx5_crypto_dek *dek;
u32 rxq;
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
@@ -148,7 +148,8 @@ post_static_params(struct mlx5e_icosq *sq,
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
mlx5e_tir_get_tirn(&priv_rx->tir),
- priv_rx->key_id, priv_rx->resync.seq, false,
+ mlx5_crypto_dek_get_id(priv_rx->dek),
+ priv_rx->resync.seq, false,
TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
@@ -610,20 +611,22 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
struct tls_context *tls_ctx;
- struct mlx5_core_dev *mdev;
+ struct mlx5_crypto_dek *dek;
struct mlx5e_priv *priv;
int rxq, err;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL);
if (unlikely(!priv_rx))
return -ENOMEM;
- err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id);
- if (err)
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
goto err_create_key;
+ }
+ priv_rx->dek = dek;
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
@@ -673,7 +676,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
err_post_wqes:
mlx5e_tir_destroy(&priv_rx->tir);
err_create_tir:
- mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+ mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
err_create_key:
kfree(priv_rx);
return err;
@@ -683,11 +686,9 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
- struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
priv = netdev_priv(netdev);
- mdev = priv->mdev;
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
@@ -707,7 +708,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
mlx5e_tir_destroy(&priv_rx->tir);
- mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+ mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
/* priv_rx should normally be freed here, but if there is an outstanding
* GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
* processed.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 78072bf93f3f..60b3e08a1028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
+#include <linux/debugfs.h>
#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
@@ -97,7 +98,7 @@ struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx;
struct mlx5_core_dev *mdev;
struct mlx5e_tls_sw_stats *sw_stats;
- u32 key_id;
+ struct mlx5_crypto_dek *dek;
u8 create_err : 1;
};
@@ -456,6 +457,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_tls_tx_pool *pool;
struct tls_context *tls_ctx;
+ struct mlx5_crypto_dek *dek;
struct mlx5e_priv *priv;
int err;
@@ -467,9 +469,12 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
if (IS_ERR(priv_tx))
return PTR_ERR(priv_tx);
- err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
- if (err)
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
goto err_create_key;
+ }
+ priv_tx->dek = dek;
priv_tx->expected_seq = start_offload_tcp_sn;
switch (crypto_info->cipher_type) {
@@ -511,7 +516,7 @@ void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
pool = priv->tls->tx_pool;
atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
- mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
+ mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_tx->dek);
pool_push(pool, priv_tx);
}
@@ -550,8 +555,9 @@ post_static_params(struct mlx5e_txqsq *sq,
pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
- priv_tx->tisn, priv_tx->key_id, 0, fence,
- TLS_OFFLOAD_CTX_DIR_TX);
+ priv_tx->tisn,
+ mlx5_crypto_dek_get_id(priv_tx->dek),
+ 0, fence, TLS_OFFLOAD_CTX_DIR_TX);
tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
sq->pc += num_wqebbs;
}
@@ -886,8 +892,22 @@ err_out:
return false;
}
+static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls,
+ struct dentry *dfs_root)
+{
+ if (IS_ERR_OR_NULL(dfs_root))
+ return;
+
+ tls->debugfs.dfs_tx = debugfs_create_dir("tx", dfs_root);
+
+ debugfs_create_size_t("pool_size", 0400, tls->debugfs.dfs_tx,
+ &tls->tx_pool->size);
+}
+
int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
{
+ struct mlx5e_tls *tls = priv->tls;
+
if (!mlx5e_is_ktls_tx(priv->mdev))
return 0;
@@ -895,6 +915,8 @@ int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
if (!priv->tls->tx_pool)
return -ENOMEM;
+ mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs);
+
return 0;
}
@@ -903,6 +925,9 @@ void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
if (!mlx5e_is_ktls_tx(priv->mdev))
return;
+ debugfs_remove_recursive(priv->tls->debugfs.dfs_tx);
+ priv->tls->debugfs.dfs_tx = NULL;
+
mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
priv->tls->tx_pool = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 7f6b940830b3..08d0929e8260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -7,7 +7,7 @@
#include "en.h"
#include "lib/aso.h"
-#include "lib/mlx5.h"
+#include "lib/crypto.h"
#include "en_accel/macsec.h"
#include "en_accel/macsec_fs.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 68f19324db93..4c9a3210600c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -31,6 +31,7 @@
*/
#include "en.h"
+#include "lib/crypto.h"
/* mlx5e global resources should be placed in this file.
* Global resources are common to all the netdevices created on the same nic.
@@ -104,6 +105,13 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
+ mdev->mlx5e_res.dek_priv = mlx5_crypto_dek_init(mdev);
+ if (IS_ERR(mdev->mlx5e_res.dek_priv)) {
+ mlx5_core_err(mdev, "crypto dek init failed, %ld\n",
+ PTR_ERR(mdev->mlx5e_res.dek_priv));
+ mdev->mlx5e_res.dek_priv = NULL;
+ }
+
return 0;
err_destroy_mkey:
@@ -119,6 +127,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
+ mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
+ mdev->mlx5e_res.dek_priv = NULL;
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 7cd36f4ac3ef..05796f8b1d7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -67,6 +68,7 @@ struct mlx5e_flow_steering {
struct mlx5e_fs_udp *udp;
struct mlx5e_fs_any *any;
struct mlx5e_ptp_fs *ptp_fs;
+ struct dentry *dfs_root;
};
static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
@@ -104,6 +106,11 @@ static inline int mlx5e_hash_l2(const u8 *addr)
return addr[5];
}
+struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs)
+{
+ return fs->dfs_root;
+}
+
static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
{
struct mlx5e_l2_hash_node *hn;
@@ -1429,9 +1436,19 @@ static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { }
#endif
+static void mlx5e_fs_debugfs_init(struct mlx5e_flow_steering *fs,
+ struct dentry *dfs_root)
+{
+ if (IS_ERR_OR_NULL(dfs_root))
+ return;
+
+ fs->dfs_root = debugfs_create_dir("fs", dfs_root);
+}
+
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
- bool state_destroy)
+ bool state_destroy,
+ struct dentry *dfs_root)
{
struct mlx5e_flow_steering *fs;
int err;
@@ -1458,6 +1475,8 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
if (err)
goto err_free_tc;
+ mlx5e_fs_debugfs_init(fs, dfs_root);
+
return fs;
err_free_tc:
mlx5e_fs_tc_free(fs);
@@ -1471,6 +1490,7 @@ err:
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
+ debugfs_remove_recursive(fs->dfs_root);
mlx5e_fs_ethtool_free(fs);
mlx5e_fs_tc_free(fs);
mlx5e_fs_vlan_free(fs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6c24f33a5ea5..53feb0529943 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -35,9 +35,11 @@
#include <net/vxlan.h>
#include <net/geneve.h>
#include <linux/bpf.h>
+#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
#include <net/page_pool.h>
+#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
#include "en.h"
@@ -179,17 +181,21 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
+ struct mlx5_devlink_trap_event_ctx *trap_event_ctx = data;
int err;
switch (event) {
case MLX5_DRIVER_EVENT_TYPE_TRAP:
- err = mlx5e_handle_trap_event(priv, data);
+ err = mlx5e_handle_trap_event(priv, trap_event_ctx->trap);
+ if (err) {
+ trap_event_ctx->err = err;
+ return NOTIFY_BAD;
+ }
break;
default:
- netdev_warn(priv->netdev, "Sync event: Unknown event %ld\n", event);
- err = -EINVAL;
+ return NOTIFY_DONE;
}
- return err;
+ return NOTIFY_OK;
}
static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
@@ -1441,6 +1447,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;
+ sq->channel = c;
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
@@ -2453,8 +2460,6 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_xsk(c);
else
mlx5e_activate_rq(&c->rq);
-
- mlx5e_trigger_napi_icosq(c);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2546,13 +2551,19 @@ err_free:
return err;
}
-static void mlx5e_activate_channels(struct mlx5e_channels *chs)
+static void mlx5e_activate_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
int i;
for (i = 0; i < chs->num; i++)
mlx5e_activate_channel(chs->c[i]);
+ if (priv->htb)
+ mlx5e_qos_activate_queues(priv);
+
+ for (i = 0; i < chs->num; i++)
+ mlx5e_trigger_napi_icosq(chs->c[i]);
+
if (chs->ptp)
mlx5e_ptp_activate_channel(chs->ptp);
}
@@ -2859,9 +2870,7 @@ out:
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_build_txq_maps(priv);
- mlx5e_activate_channels(&priv->channels);
- if (priv->htb)
- mlx5e_qos_activate_queues(priv);
+ mlx5e_activate_channels(priv, &priv->channels);
mlx5e_xdp_tx_enable(priv);
/* dev_watchdog() wants all TX queues to be started when the carrier is
@@ -2969,32 +2978,37 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset)
{
- struct mlx5e_channels new_chs = {};
+ struct mlx5e_channels *new_chs;
int err;
reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
if (!reset)
return mlx5e_switch_priv_params(priv, params, preactivate, context);
- new_chs.params = *params;
+ new_chs = kzalloc(sizeof(*new_chs), GFP_KERNEL);
+ if (!new_chs)
+ return -ENOMEM;
+ new_chs->params = *params;
- mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
+ mlx5e_selq_prepare_params(&priv->selq, &new_chs->params);
- err = mlx5e_open_channels(priv, &new_chs);
+ err = mlx5e_open_channels(priv, new_chs);
if (err)
goto err_cancel_selq;
- err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
+ err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
if (err)
goto err_close;
+ kfree(new_chs);
return 0;
err_close:
- mlx5e_close_channels(&new_chs);
+ mlx5e_close_channels(new_chs);
err_cancel_selq:
mlx5e_selq_cancel(&priv->selq);
+ kfree(new_chs);
return err;
}
@@ -4727,6 +4741,13 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
if (old_prog)
bpf_prog_put(old_prog);
+ if (reset) {
+ if (prog)
+ xdp_features_set_redirect_target(netdev, true);
+ else
+ xdp_features_clear_redirect_target(netdev);
+ }
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
@@ -5004,6 +5025,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
mlx5e_dcbnl_build_netdev(netdev);
@@ -5121,6 +5143,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
@@ -5181,7 +5207,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
+ priv->dfs_root);
if (!fs) {
err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
@@ -5822,7 +5849,8 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
static int mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
- struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
+ struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
+ struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
int err;
@@ -5845,7 +5873,8 @@ static int mlx5e_resume(struct auxiliary_device *adev)
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
+ struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
+ struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -5863,35 +5892,46 @@ static int mlx5e_probe(struct auxiliary_device *adev,
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
const struct mlx5e_profile *profile = &mlx5e_nic_profile;
struct mlx5_core_dev *mdev = edev->mdev;
+ struct mlx5e_dev *mlx5e_dev;
struct net_device *netdev;
pm_message_t state = {};
struct mlx5e_priv *priv;
int err;
+ mlx5e_dev = mlx5e_create_devlink(&adev->dev, mdev);
+ if (IS_ERR(mlx5e_dev))
+ return PTR_ERR(mlx5e_dev);
+ auxiliary_set_drvdata(adev, mlx5e_dev);
+
+ err = mlx5e_devlink_port_register(mlx5e_dev, mdev);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
+ goto err_devlink_unregister;
+ }
+
netdev = mlx5e_create_netdev(mdev, profile);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_devlink_port_unregister;
}
+ SET_NETDEV_DEVLINK_PORT(netdev, &mlx5e_dev->dl_port);
mlx5e_build_nic_netdev(netdev);
priv = netdev_priv(netdev);
- auxiliary_set_drvdata(adev, priv);
+ mlx5e_dev->priv = priv;
priv->profile = profile;
priv->ppriv = NULL;
- err = mlx5e_devlink_port_register(priv);
- if (err) {
- mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
- goto err_destroy_netdev;
- }
+ priv->dfs_root = debugfs_create_dir("nic",
+ mlx5_debugfs_get_dev_root(priv->mdev));
err = profile->init(mdev, netdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
- goto err_devlink_cleanup;
+ goto err_destroy_netdev;
}
err = mlx5e_resume(adev);
@@ -5900,7 +5940,6 @@ static int mlx5e_probe(struct auxiliary_device *adev,
goto err_profile_cleanup;
}
- SET_NETDEV_DEVLINK_PORT(netdev, mlx5e_devlink_get_dl_port(priv));
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
@@ -5908,7 +5947,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
}
mlx5e_dcbnl_init_app(priv);
- mlx5_uplink_netdev_set(mdev, netdev);
+ mlx5_core_uplink_netdev_set(mdev, netdev);
mlx5e_params_print_info(mdev, &priv->channels.params);
return 0;
@@ -5916,24 +5955,31 @@ err_resume:
mlx5e_suspend(adev, state);
err_profile_cleanup:
profile->cleanup(priv);
-err_devlink_cleanup:
- mlx5e_devlink_port_unregister(priv);
err_destroy_netdev:
+ debugfs_remove_recursive(priv->dfs_root);
mlx5e_destroy_netdev(priv);
+err_devlink_port_unregister:
+ mlx5e_devlink_port_unregister(mlx5e_dev);
+err_devlink_unregister:
+ mlx5e_destroy_devlink(mlx5e_dev);
return err;
}
static void mlx5e_remove(struct auxiliary_device *adev)
{
- struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
+ struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
+ struct mlx5e_priv *priv = mlx5e_dev->priv;
pm_message_t state = {};
+ mlx5_core_uplink_netdev_set(priv->mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
priv->profile->cleanup(priv);
- mlx5e_devlink_port_unregister(priv);
+ debugfs_remove_recursive(priv->dfs_root);
mlx5e_destroy_netdev(priv);
+ mlx5e_devlink_port_unregister(mlx5e_dev);
+ mlx5e_destroy_devlink(mlx5e_dev);
}
static const struct auxiliary_device_id mlx5e_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 7d90e5b72854..9b9203443085 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -789,8 +789,10 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ priv->fs =
+ mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
+ priv->dfs_root);
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
@@ -808,7 +810,8 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
+ priv->dfs_root);
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
@@ -1004,8 +1007,23 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
priv->rx_res = NULL;
}
+static void mlx5e_rep_mpesw_work(struct work_struct *work)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv =
+ container_of(work, struct mlx5_rep_uplink_priv,
+ mpesw_work);
+ struct mlx5e_rep_priv *rpriv =
+ container_of(uplink_priv, struct mlx5e_rep_priv,
+ uplink_priv);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+
+ rep_vport_rx_rule_destroy(priv);
+ mlx5e_create_rep_vport_rx_rule(priv);
+}
+
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
int err;
mlx5e_create_q_counters(priv);
@@ -1015,12 +1033,17 @@ static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
mlx5e_tc_int_port_init_rep_rx(priv);
+ INIT_WORK(&rpriv->uplink_priv.mpesw_work, mlx5e_rep_mpesw_work);
+
out:
return err;
}
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ cancel_work_sync(&rpriv->uplink_priv.mpesw_work);
mlx5e_tc_int_port_cleanup_rep_rx(priv);
mlx5e_cleanup_rep_rx(priv);
mlx5e_destroy_q_counters(priv);
@@ -1129,6 +1152,19 @@ static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
return 0;
}
+static int mlx5e_rep_event_mpesw(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ return NOTIFY_DONE;
+
+ queue_work(priv->wq, &rpriv->uplink_priv.mpesw_work);
+
+ return NOTIFY_OK;
+}
+
static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
@@ -1150,6 +1186,8 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
return mlx5e_rep_tc_event_port_affinity(priv);
+ else if (event == MLX5_DEV_EVENT_MULTIPORT_ESW)
+ return mlx5e_rep_event_mpesw(priv);
return NOTIFY_DONE;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b4e691760da9..dcfad0bf0f45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -100,6 +100,11 @@ struct mlx5_rep_uplink_priv {
struct mlx5e_tc_int_port_priv *int_port_priv;
struct mlx5e_flow_meters *flow_meters;
+
+ /* tc action stats */
+ struct mlx5e_tc_act_stats_handle *action_stats_handle;
+
+ struct work_struct mpesw_work;
};
struct mlx5e_rep_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3df455f6b168..3f7b63d6616b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -62,10 +62,12 @@
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx);
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx);
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -76,11 +78,6 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
.handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
};
-static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
-{
- return config->rx_filter == HWTSTAMP_FILTER_ALL;
-}
-
static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
u32 cqcc, void *data)
{
@@ -1559,7 +1556,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
u32 cqe_bcnt, u32 metasize)
{
- struct sk_buff *skb = build_skb(va, frag_size);
+ struct sk_buff *skb = napi_build_skb(va, frag_size);
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
@@ -1575,16 +1572,19 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb;
}
-static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
- u32 len, struct xdp_buff *xdp)
+static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ void *va, u16 headroom, u32 len,
+ struct mlx5e_xdp_buff *mxbuf)
{
- xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, true);
+ xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
+ mxbuf->cqe = cqe;
+ mxbuf->rq = rq;
}
static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- u32 cqe_bcnt)
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
@@ -1606,16 +1606,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, &mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1630,16 +1630,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- u32 cqe_bcnt)
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct skb_shared_info *sinfo;
+ struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
- struct xdp_buff xdp;
struct sk_buff *skb;
dma_addr_t addr;
u32 truesize;
@@ -1654,8 +1654,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
- sinfo = xdp_get_shared_info_from_buff(&xdp);
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, frag_consumed_bytes, &mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1673,13 +1673,13 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, rq->buff.map_dir);
- if (!xdp_buff_has_frags(&xdp)) {
+ if (!xdp_buff_has_frags(&mxbuf.xdp)) {
/* Init on the first fragment to avoid cold cache access
* when possible.
*/
sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(&xdp);
+ xdp_buff_set_frags_flag(&mxbuf.xdp);
}
frag = &sinfo->frags[sinfo->nr_frags++];
@@ -1688,7 +1688,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
skb_frag_size_set(frag, frag_consumed_bytes);
if (page_is_pfmemalloc(au->page))
- xdp_buff_set_frag_pfmemalloc(&xdp);
+ xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp);
sinfo->xdp_frags_size += frag_consumed_bytes;
truesize += frag_info->frag_stride;
@@ -1698,10 +1698,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
wi++;
}
- au = head_wi->au;
-
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
@@ -1711,22 +1709,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
- xdp.data - xdp.data_hard_start,
- xdp.data_end - xdp.data,
- xdp.data - xdp.data_meta);
+ skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
+ mxbuf.xdp.data_end - mxbuf.xdp.data,
+ mxbuf.xdp.data - mxbuf.xdp.data_meta);
if (unlikely(!skb))
return NULL;
- page_ref_inc(au->page);
+ page_ref_inc(head_wi->au->page);
- if (unlikely(xdp_buff_has_frags(&xdp))) {
+ if (xdp_buff_has_frags(&mxbuf.xdp)) {
int i;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&xdp));
+ xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
@@ -1777,7 +1775,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
mlx5e_xsk_skb_from_cqe_linear,
- rq, wi, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1792,7 +1790,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb)) {
+ if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
dev_kfree_skb_any(skb);
goto free_wqe;
}
@@ -1830,7 +1828,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, wi, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1889,7 +1887,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
- rq, wi, cqe_bcnt, head_offset, page_idx);
+ rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
@@ -1940,7 +1938,8 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx)
{
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
@@ -1979,7 +1978,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx)
{
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 rx_headroom = rq->buff.headroom;
@@ -2007,19 +2007,19 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -2174,8 +2174,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
if (likely(head_size))
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
else
- *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
- page_idx);
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
+ data_offset, page_idx);
if (unlikely(!*skb))
goto free_hd_entry;
@@ -2249,14 +2249,15 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
mlx5e_xsk_skb_from_cqe_mpwrq_linear,
- rq, wi, cqe_bcnt, head_offset, page_idx);
+ rq, wi, cqe, cqe_bcnt, head_offset,
+ page_idx);
if (!skb)
goto mpwrq_cqe_out;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb)) {
+ if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
}
@@ -2494,7 +2495,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, wi, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt);
if (!skb)
goto wq_free_wqe;
@@ -2567,10 +2568,8 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
- struct mlx5e_priv *priv = netdev_priv(rq->netdev);
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
- struct devlink_port *dl_port;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 trap_id;
@@ -2586,15 +2585,15 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
goto free_wqe;
}
- skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt);
+ skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
if (!skb)
goto free_wqe;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb_push(skb, ETH_HLEN);
- dl_port = mlx5e_devlink_get_dl_port(priv);
- mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port);
+ mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
+ rq->netdev->devlink_port);
dev_kfree_skb_any(skb);
free_wqe:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 243d5d7750be..e34d9b5fb504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -43,8 +43,10 @@
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
#include <net/bonding.h>
+#include <net/dst_metadata.h>
#include "en.h"
#include "en/tc/post_act.h"
+#include "en/tc/act_stats.h"
#include "en_rep.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
@@ -71,6 +73,12 @@
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
+struct mlx5e_hairpin_params {
+ struct mlx5_core_dev *mdev;
+ u32 num_queues;
+ u32 queue_size;
+};
+
struct mlx5e_tc_table {
/* Protects the dynamic assignment of the t parameter
* which is the nic tc root table.
@@ -93,10 +101,15 @@ struct mlx5e_tc_table {
struct mlx5_tc_ct_priv *ct;
struct mapping_ctx *mapping;
+ struct mlx5e_hairpin_params hairpin_params;
+ struct dentry *dfs_root;
+
+ /* tc action stats */
+ struct mlx5e_tc_act_stats_handle *action_stats_handle;
};
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
- [CHAIN_TO_REG] = {
+ [MAPPED_OBJ_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
.moffset = 0,
.mlen = 16,
@@ -123,7 +136,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
* into reg_b that is passed to SW since we don't
* jump between steering domains.
*/
- [NIC_CHAIN_TO_REG] = {
+ [NIC_MAPPED_OBJ_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
.moffset = 0,
.mlen = 16,
@@ -278,6 +291,24 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
return err;
}
+static struct mlx5e_tc_act_stats_handle *
+get_act_stats_handle(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->action_stats_handle;
+ }
+
+ return tc->action_stats_handle;
+}
+
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
{
@@ -639,36 +670,36 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
&tc->mod_hdr;
}
-static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_modify_hdr *modify_hdr;
struct mlx5e_mod_hdr_handle *mh;
mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
mlx5e_get_flow_namespace(flow),
- &parse_attr->mod_hdr_acts);
+ &attr->parse_attr->mod_hdr_acts);
if (IS_ERR(mh))
return PTR_ERR(mh);
- modify_hdr = mlx5e_mod_hdr_get(mh);
- flow->attr->modify_hdr = modify_hdr;
- flow->mh = mh;
+ WARN_ON(attr->modify_hdr);
+ attr->modify_hdr = mlx5e_mod_hdr_get(mh);
+ attr->mh = mh;
return 0;
}
-static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
+void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
/* flow wasn't fully initialized */
- if (!flow->mh)
+ if (!attr->mh)
return;
mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
- flow->mh);
- flow->mh = NULL;
+ attr->mh);
+ attr->mh = NULL;
}
static
@@ -1017,6 +1048,136 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
return 0;
}
+static int debugfs_hairpin_queues_set(void *data, u64 val)
+{
+ struct mlx5e_hairpin_params *hp = data;
+
+ if (!val) {
+ mlx5_core_err(hp->mdev,
+ "Number of hairpin queues must be > 0\n");
+ return -EINVAL;
+ }
+
+ hp->num_queues = val;
+
+ return 0;
+}
+
+static int debugfs_hairpin_queues_get(void *data, u64 *val)
+{
+ struct mlx5e_hairpin_params *hp = data;
+
+ *val = hp->num_queues;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queues, debugfs_hairpin_queues_get,
+ debugfs_hairpin_queues_set, "%llu\n");
+
+static int debugfs_hairpin_queue_size_set(void *data, u64 val)
+{
+ struct mlx5e_hairpin_params *hp = data;
+
+ if (val > BIT(MLX5_CAP_GEN(hp->mdev, log_max_hairpin_num_packets))) {
+ mlx5_core_err(hp->mdev,
+ "Invalid hairpin queue size, must be <= %lu\n",
+ BIT(MLX5_CAP_GEN(hp->mdev,
+ log_max_hairpin_num_packets)));
+ return -EINVAL;
+ }
+
+ hp->queue_size = roundup_pow_of_two(val);
+
+ return 0;
+}
+
+static int debugfs_hairpin_queue_size_get(void *data, u64 *val)
+{
+ struct mlx5e_hairpin_params *hp = data;
+
+ *val = hp->queue_size;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queue_size,
+ debugfs_hairpin_queue_size_get,
+ debugfs_hairpin_queue_size_set, "%llu\n");
+
+static int debugfs_hairpin_num_active_get(void *data, u64 *val)
+{
+ struct mlx5e_tc_table *tc = data;
+ struct mlx5e_hairpin_entry *hpe;
+ u32 cnt = 0;
+ u32 bkt;
+
+ mutex_lock(&tc->hairpin_tbl_lock);
+ hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
+ cnt++;
+ mutex_unlock(&tc->hairpin_tbl_lock);
+
+ *val = cnt;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active,
+ debugfs_hairpin_num_active_get, NULL, "%llu\n");
+
+static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv)
+
+{
+ struct mlx5e_tc_table *tc = file->private;
+ struct mlx5e_hairpin_entry *hpe;
+ u32 bkt;
+
+ mutex_lock(&tc->hairpin_tbl_lock);
+ hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
+ seq_printf(file, "Hairpin peer_vhca_id %u prio %u refcnt %u\n",
+ hpe->peer_vhca_id, hpe->prio,
+ refcount_read(&hpe->refcnt));
+ mutex_unlock(&tc->hairpin_tbl_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump);
+
+static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc,
+ struct dentry *dfs_root)
+{
+ if (IS_ERR_OR_NULL(dfs_root))
+ return;
+
+ tc->dfs_root = debugfs_create_dir("tc", dfs_root);
+
+ debugfs_create_file("hairpin_num_queues", 0644, tc->dfs_root,
+ &tc->hairpin_params, &fops_hairpin_queues);
+ debugfs_create_file("hairpin_queue_size", 0644, tc->dfs_root,
+ &tc->hairpin_params, &fops_hairpin_queue_size);
+ debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc,
+ &fops_hairpin_num_active);
+ debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc,
+ &debugfs_hairpin_table_dump_fops);
+}
+
+static void
+mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params,
+ struct mlx5_core_dev *mdev)
+{
+ u64 link_speed64;
+ u32 link_speed;
+
+ hairpin_params->mdev = mdev;
+ /* set hairpin pair per each 50Gbs share of the link */
+ mlx5e_port_max_linkspeed(mdev, &link_speed);
+ link_speed = max_t(u32, link_speed, 50000);
+ link_speed64 = link_speed;
+ do_div(link_speed64, 50000);
+ hairpin_params->num_queues = link_speed64;
+
+ hairpin_params->queue_size =
+ BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev),
+ MLX5_CAP_GEN(mdev, log_max_hairpin_num_packets)));
+}
+
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -1028,8 +1189,6 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5_core_dev *peer_mdev;
struct mlx5e_hairpin_entry *hpe;
struct mlx5e_hairpin *hp;
- u64 link_speed64;
- u32 link_speed;
u8 match_prio;
u16 peer_id;
int err;
@@ -1082,21 +1241,16 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&tc->hairpin_tbl_lock);
- params.log_data_size = clamp_t(u8, 16,
- MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
- MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
- params.log_num_packets = params.log_data_size -
- MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
- params.log_num_packets = min_t(u8, params.log_num_packets,
- MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
+ params.log_num_packets = ilog2(tc->hairpin_params.queue_size);
+ params.log_data_size =
+ clamp_t(u32,
+ params.log_num_packets +
+ MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev),
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.q_counter = priv->q_counter;
- /* set hairpin pair per each 50Gbs share of the link */
- mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
- link_speed = max_t(u32, link_speed, 50000);
- link_speed64 = link_speed;
- do_div(link_speed64, 50000);
- params.num_channels = link_speed64;
+ params.num_channels = tc->hairpin_params.num_queues;
hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
hpe->hp = hp;
@@ -1301,7 +1455,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
+ err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
if (err)
return err;
}
@@ -1361,7 +1515,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
- mlx5e_detach_mod_hdr(priv, flow);
+ mlx5e_tc_detach_mod_hdr(priv, flow, attr);
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
@@ -1451,7 +1605,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
goto err_get_chain;
err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
- CHAIN_TO_REG, chain_mapping);
+ MAPPED_OBJ_TO_REG, chain_mapping);
if (err)
goto err_reg_set;
@@ -1472,7 +1626,7 @@ skip_restore:
goto err_offload;
}
- flow->slow_mh = mh;
+ flow->attr->slow_mh = mh;
flow->chain_mapping = chain_mapping;
flow_flag_set(flow, SLOW);
@@ -1497,6 +1651,7 @@ err_get_chain:
void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow)
{
+ struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh;
struct mlx5_flow_attr *slow_attr;
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
@@ -1509,16 +1664,16 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
- if (flow->slow_mh) {
+ if (slow_mh) {
slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh);
+ slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh);
}
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
- if (flow->slow_mh) {
- mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh);
+ if (slow_mh) {
+ mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh);
mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
flow->chain_mapping = 0;
- flow->slow_mh = NULL;
+ flow->attr->slow_mh = NULL;
}
flow_flag_clear(flow, SLOW);
kfree(slow_attr);
@@ -1629,26 +1784,6 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
return err;
}
-int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr)
-{
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
- struct mlx5_modify_hdr *mod_hdr;
-
- mod_hdr = mlx5_modify_header_alloc(priv->mdev,
- mlx5e_get_flow_namespace(flow),
- mod_hdr_acts->num_actions,
- mod_hdr_acts->actions);
- if (IS_ERR(mod_hdr))
- return PTR_ERR(mod_hdr);
-
- WARN_ON(attr->modify_hdr);
- attr->modify_hdr = mod_hdr;
-
- return 0;
-}
-
static int
set_encap_dests(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
@@ -1768,10 +1903,8 @@ verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
static int
post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
- bool is_post_act_attr,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
bool vf_tun;
int err = 0;
@@ -1783,34 +1916,22 @@ post_process_attr(struct mlx5e_tc_flow *flow,
if (err)
goto err_out;
- if (mlx5e_is_eswitch_flow(flow)) {
- err = mlx5_eswitch_add_vlan_action(esw, attr);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err)
goto err_out;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- if (vf_tun || is_post_act_attr) {
- err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
- if (err)
- goto err_out;
- } else {
- err = mlx5e_attach_mod_hdr(flow->priv, flow, attr->parse_attr);
- if (err)
- goto err_out;
- }
- }
-
if (attr->branch_true &&
attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_true);
+ err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true);
if (err)
goto err_out;
}
if (attr->branch_false &&
attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_false);
+ err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false);
if (err)
goto err_out;
}
@@ -1924,7 +2045,11 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
- err = post_process_attr(flow, attr, false, extack);
+ err = post_process_attr(flow, attr, extack);
+ if (err)
+ goto err_out;
+
+ err = mlx5e_tc_act_stats_add_flow(get_act_stats_handle(priv), flow);
if (err)
goto err_out;
@@ -1998,8 +2123,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (mlx5_flow_has_geneve_opt(flow))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
- mlx5_eswitch_del_vlan_action(esw, attr);
-
if (flow->decap_route)
mlx5e_detach_decap_route(priv, flow);
@@ -2009,10 +2132,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
- if (vf_tun && attr->modify_hdr)
- mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
- else
- mlx5e_detach_mod_hdr(priv, flow);
+ mlx5e_tc_detach_mod_hdr(priv, flow, attr);
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
@@ -2027,13 +2147,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+ mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
+
free_flow_post_acts(flow);
free_branch_attr(flow, attr->branch_true);
free_branch_attr(flow, attr->branch_false);
- if (flow->attr->lag.count)
- mlx5_lag_del_mpesw_rule(esw->dev);
-
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
@@ -2492,13 +2611,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
err = mlx5e_tc_set_attr_rx_tun(flow, spec);
if (err)
return err;
- } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ } else if (tunnel) {
struct mlx5_flow_spec *tmp_spec;
tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
if (!tmp_spec) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
- netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec");
+ netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec");
return -ENOMEM;
}
memcpy(tmp_spec, spec, sizeof(*tmp_spec));
@@ -3560,7 +3679,6 @@ out_ok:
static bool
actions_match_supported_fdb(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
@@ -3609,7 +3727,7 @@ actions_match_supported(struct mlx5e_priv *priv,
return false;
if (mlx5e_is_eswitch_flow(flow) &&
- !actions_match_supported_fdb(priv, parse_attr, flow, extack))
+ !actions_match_supported_fdb(priv, flow, extack))
return false;
return true;
@@ -3692,10 +3810,13 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
INIT_LIST_HEAD(&attr2->list);
parse_attr->filter_dev = attr->parse_attr->filter_dev;
attr2->action = 0;
+ attr2->counter = NULL;
+ attr->tc_act_cookies_count = 0;
attr2->flags = 0;
attr2->parse_attr = parse_attr;
attr2->dest_chain = 0;
attr2->dest_ft = NULL;
+ attr2->act_id_restore_rule = NULL;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr2->esw_attr->out_count = 0;
@@ -3831,7 +3952,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (err)
goto out_free;
- err = post_process_attr(flow, attr, true, extack);
+ err = post_process_attr(flow, attr, extack);
if (err)
goto out_free;
@@ -3991,6 +4112,11 @@ parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
jump_state->jumping_attr = attr->branch_false;
jump_state->jump_count = jump_count;
+
+ /* branching action requires its own counter */
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_flag_set(flow, USE_ACT_STATS);
+
return 0;
err_branch_false:
@@ -4051,6 +4177,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
goto out_free;
parse_state->actions |= attr->action;
+ if (!tc_act->stats_action)
+ attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
/* Split attr for multi table act if not the last act. */
if (jump_state.jump_target ||
@@ -4184,12 +4312,7 @@ static bool is_lag_dev(struct mlx5e_priv *priv,
static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
{
- if (same_hw_reps(priv, out_dev) &&
- MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
- MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
- return true;
-
- return false;
+ return same_hw_reps(priv, out_dev) && mlx5_lag_is_mpesw(priv->mdev);
}
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
@@ -4360,6 +4483,9 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
(is_rep_ingress || act_is_encap))
return true;
+ if (mlx5_lag_is_mpesw(esw_attr->in_mdev))
+ return true;
+
return false;
}
@@ -4398,8 +4524,7 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
- if (attr->modify_hdr)
- mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
+ mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
}
}
@@ -4492,7 +4617,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_core_dev *in_mdev)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
@@ -4521,33 +4645,21 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- /* always set IP version for indirect table handling */
- flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
-
err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
- if (flow->attr->lag.count) {
- err = mlx5_lag_add_mpesw_rule(esw->dev);
- if (err)
- goto err_free;
- }
-
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
complete_all(&flow->init_done);
if (err) {
if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
- goto err_lag;
+ goto err_free;
add_unready_flow(flow);
}
return flow;
-err_lag:
- if (flow->attr->lag.count)
- mlx5_lag_del_mpesw_rule(esw->dev);
err_free:
mlx5e_flow_put(priv, flow);
out:
@@ -4579,8 +4691,10 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
* So packets redirected to uplink use the same mdev of the
* original flow and packets redirected from uplink use the
* peer mdev.
+ * In multiport eswitch it's a special case that we need to
+ * keep the original mdev.
*/
- if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ if (attr->in_rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(priv->mdev))
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
@@ -4842,6 +4956,12 @@ errout:
return err;
}
+int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ return mlx5e_tc_act_stats_fill_stats(get_act_stats_handle(priv), fl_act);
+}
+
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
@@ -4868,11 +4988,15 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
}
if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
- counter = mlx5e_tc_get_counter(flow);
- if (!counter)
- goto errout;
+ if (flow_flag_test(flow, USE_ACT_STATS)) {
+ f->use_act_stats = true;
+ } else {
+ counter = mlx5e_tc_get_counter(flow);
+ if (!counter)
+ goto errout;
- mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ }
}
/* Under multipath it's possible for one rule to be currently
@@ -4888,14 +5012,18 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
u64 packets2;
u64 lastuse2;
- counter = mlx5e_tc_get_counter(flow->peer_flow);
- if (!counter)
- goto no_peer_counter;
- mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
-
- bytes += bytes2;
- packets += packets2;
- lastuse = max_t(u64, lastuse, lastuse2);
+ if (flow_flag_test(flow, USE_ACT_STATS)) {
+ f->use_act_stats = true;
+ } else {
+ counter = mlx5e_tc_get_counter(flow->peer_flow);
+ if (!counter)
+ goto no_peer_counter;
+ mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
+
+ bytes += bytes2;
+ packets += packets2;
+ lastuse = max_t(u64, lastuse, lastuse2);
+ }
}
no_peer_counter:
@@ -5220,6 +5348,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
+ mlx5e_hairpin_params_init(&tc->hairpin_params, dev);
+
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
@@ -5230,8 +5360,18 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
goto err_reg;
}
+ mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
+
+ tc->action_stats_handle = mlx5e_tc_act_stats_create();
+ if (IS_ERR(tc->action_stats_handle))
+ goto err_act_stats;
+
return 0;
+err_act_stats:
+ unregister_netdevice_notifier_dev_net(priv->netdev,
+ &tc->netdevice_nb,
+ &tc->netdevice_nn);
err_reg:
mlx5_tc_ct_clean(tc->ct);
mlx5e_tc_post_act_destroy(tc->post_act);
@@ -5258,6 +5398,8 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ debugfs_remove_recursive(tc->dfs_root);
+
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
@@ -5279,6 +5421,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mapping_destroy(tc->mapping);
mlx5_chains_destroy(tc->chains);
mlx5e_tc_nic_destroy_miss_table(priv);
+ mlx5e_tc_act_stats_free(tc->action_stats_handle);
}
int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
@@ -5355,8 +5498,14 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_register_fib_notifier;
}
+ uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
+ if (IS_ERR(uplink_priv->action_stats_handle))
+ goto err_action_counter;
+
return 0;
+err_action_counter:
+ mlx5e_tc_tun_cleanup(uplink_priv->encap);
err_register_fib_notifier:
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
err_enc_opts_mapping:
@@ -5383,6 +5532,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
mlx5_tc_ct_clean(uplink_priv->ct_priv);
mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
+ mlx5e_tc_act_stats_free(uplink_priv->action_stats_handle);
}
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
@@ -5456,48 +5606,268 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb)
+static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- u32 chain = 0, chain_tag, reg_b, zone_restore_id;
- struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5_mapped_obj mapped_obj;
- struct tc_skb_ext *tc_skb_ext;
- struct mlx5e_tc_table *tc;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct tunnel_match_enc_opts enc_opts = {};
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct metadata_dst *tun_dst;
+ struct tunnel_match_key key;
+ u32 tun_id, enc_opts_id;
+ struct net_device *dev;
int err;
- reg_b = be32_to_cpu(cqe->ft_metadata);
- tc = mlx5e_fs_get_tc(priv->fs);
- chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
+ tun_id = tunnel_id >> ENC_OPTS_BITS;
- err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
+ if (!tun_id)
+ return true;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
if (err) {
netdev_dbg(priv->netdev,
- "Couldn't find chain for chain tag: %d, err: %d\n",
- chain_tag, err);
+ "Couldn't find tunnel for tun_id: %d, err: %d\n",
+ tun_id, err);
return false;
}
- if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
- chain = mapped_obj.chain;
- tc_skb_ext = tc_skb_ext_alloc(skb);
- if (WARN_ON(!tc_skb_ext))
+ if (enc_opts_id) {
+ err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id, &enc_opts);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
+ enc_opts_id, err);
return false;
+ }
+ }
+
+ switch (key.enc_control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, 0, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ break;
+ default:
+ netdev_dbg(priv->netdev,
+ "Couldn't restore tunnel, unsupported addr_type: %d\n",
+ key.enc_control.addr_type);
+ return false;
+ }
+
+ if (!tun_dst) {
+ netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
+ return false;
+ }
+
+ tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
+
+ if (enc_opts.key.len)
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
+ enc_opts.key.data,
+ enc_opts.key.len,
+ enc_opts.key.dst_opt_type);
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ dev = dev_get_by_index(&init_net, key.filter_ifindex);
+ if (!dev) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel device with ifindex: %d\n",
+ key.filter_ifindex);
+ return false;
+ }
- tc_skb_ext->chain = chain;
+ /* Set fwd_dev so we do dev_put() after datapath */
+ tc_priv->fwd_dev = dev;
- zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
- ESW_ZONE_ID_MASK;
+ skb->dev = dev;
- if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
- zone_restore_id))
+ return true;
+}
+
+static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id,
+ u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ struct tc_skb_ext *tc_skb_ext;
+ u64 act_miss_cookie;
+ u32 chain;
+
+ chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0;
+ act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ?
+ mapped_obj->act_miss_cookie : 0;
+ if (chain || act_miss_cookie) {
+ if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id))
return false;
- } else {
+
+ tc_skb_ext = tc_skb_ext_alloc(skb);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+
+ if (act_miss_cookie) {
+ tc_skb_ext->act_miss_cookie = act_miss_cookie;
+ tc_skb_ext->act_miss = 1;
+ } else {
+ tc_skb_ext->chain = chain;
+ }
+ }
+
+ if (tc_priv)
+ return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+
+ return true;
+}
+
+static void mlx5e_tc_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_mapped_obj *mapped_obj,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (!mlx5e_tc_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
+ netdev_dbg(priv->netdev,
+ "Failed to restore tunnel info for sampled packet\n");
+ return;
+ }
+ mlx5e_tc_sample_skb(skb, mapped_obj);
+}
+
+static bool mlx5e_tc_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_mapped_obj *mapped_obj,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ bool forward_tx = false;
+
+ /* Tunnel restore takes precedence over int port restore */
+ if (tunnel_id)
+ return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
+ mapped_obj->int_port_metadata, &forward_tx)) {
+ /* Set fwd_dev for future dev_put */
+ tc_priv->fwd_dev = skb->dev;
+ tc_priv->forward_tx = forward_tx;
+
+ return true;
+ }
+
+ return false;
+}
+
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
+ struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
+ struct mlx5_tc_ct_priv *ct_priv,
+ u32 zone_restore_id, u32 tunnel_id,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ struct mlx5_mapped_obj mapped_obj;
+ int err;
+
+ err = mapping_find(mapping_ctx, mapped_obj_id, &mapped_obj);
+ if (err) {
+ netdev_dbg(skb->dev,
+ "Couldn't find mapped object for mapped_obj_id: %d, err: %d\n",
+ mapped_obj_id, err);
+ return false;
+ }
+
+ switch (mapped_obj.type) {
+ case MLX5_MAPPED_OBJ_CHAIN:
+ case MLX5_MAPPED_OBJ_ACT_MISS:
+ return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id,
+ tunnel_id, tc_priv);
+ case MLX5_MAPPED_OBJ_SAMPLE:
+ mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
+ tc_priv->skb_done = true;
+ return true;
+ case MLX5_MAPPED_OBJ_INT_PORT_METADATA:
+ return mlx5e_tc_restore_skb_int_port(priv, skb, &mapped_obj, tc_priv, tunnel_id);
+ default:
netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
return false;
}
-#endif /* CONFIG_NET_TC_SKB_EXT */
- return true;
+ return false;
+}
+
+bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ u32 mapped_obj_id, reg_b, zone_restore_id;
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct mapping_ctx *mapping_ctx;
+ struct mlx5e_tc_table *tc;
+
+ reg_b = be32_to_cpu(cqe->ft_metadata);
+ tc = mlx5e_fs_get_tc(priv->fs);
+ mapped_obj_id = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
+ ESW_ZONE_ID_MASK;
+ ct_priv = tc->ct;
+ mapping_ctx = tc->mapping;
+
+ return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id,
+ 0, NULL);
+}
+
+int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u64 act_miss_cookie, u32 *act_miss_mapping)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_mapped_obj mapped_obj = {};
+ struct mapping_ctx *ctx;
+ int err;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+
+ mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
+ mapped_obj.act_miss_cookie = act_miss_cookie;
+ err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
+ if (err)
+ return err;
+
+ attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
+ if (IS_ERR(attr->act_id_restore_rule))
+ goto err_rule;
+
+ return 0;
+
+err_rule:
+ mapping_remove(ctx, *act_miss_mapping);
+ return err;
+}
+
+void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u32 act_miss_mapping)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mapping_ctx *ctx;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+ mlx5_del_flow_rules(attr->act_id_restore_rule);
+ mapping_remove(ctx, act_miss_mapping);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 50af70ef22f3..adb39e30f90f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -59,6 +59,8 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
struct net_device *fwd_dev;
+ bool skb_done;
+ bool forward_tx;
};
struct mlx5_nic_flow_attr {
@@ -69,35 +71,33 @@ struct mlx5_nic_flow_attr {
struct mlx5_flow_attr {
u32 action;
+ unsigned long tc_act_cookies[TCA_ACT_MAX_PRIO];
struct mlx5_fc *counter;
struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
+ struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
struct mlx5_ct_attr ct_attr;
struct mlx5e_sample_attr sample_attr;
struct mlx5e_meter_attr meter_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 chain;
u16 prio;
+ u16 tc_act_cookies_count;
u32 dest_chain;
struct mlx5_flow_table *ft;
struct mlx5_flow_table *dest_ft;
u8 inner_match_level;
u8 outer_match_level;
- u8 ip_version;
u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */
u32 flags;
u32 exe_aso_type;
struct list_head list;
struct mlx5e_post_act_handle *post_act_handle;
- struct {
- /* Indicate whether the parsed flow should be counted for lag mode decision
- * making
- */
- bool count;
- } lag;
struct mlx5_flow_attr *branch_true;
struct mlx5_flow_attr *branch_false;
struct mlx5_flow_attr *jumping_attr;
+ struct mlx5_flow_handle *act_id_restore_rule;
/* keep this union last */
union {
DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr);
@@ -134,7 +134,6 @@ struct mlx5_rx_tun_attr {
__be32 v4;
struct in6_addr v6;
} dst_ip; /* Valid if decap_vport is not zero */
- u32 vni;
};
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
@@ -197,6 +196,8 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
+int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
@@ -227,7 +228,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
enum mlx5e_tc_attr_to_reg {
- CHAIN_TO_REG,
+ MAPPED_OBJ_TO_REG,
VPORT_TO_REG,
TUNNEL_TO_REG,
CTSTATE_TO_REG,
@@ -236,7 +237,7 @@ enum mlx5e_tc_attr_to_reg {
MARK_TO_REG,
LABELS_TO_REG,
FTEID_TO_REG,
- NIC_CHAIN_TO_REG,
+ NIC_MAPPED_OBJ_TO_REG,
NIC_ZONE_RESTORE_TO_REG,
PACKET_COLOR_TO_REG,
};
@@ -285,9 +286,13 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
enum mlx5e_tc_attr_to_reg type,
u32 data);
-int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr);
+int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
+
+void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
@@ -366,7 +371,6 @@ struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain, reg_b;
reg_b = be32_to_cpu(cqe->ft_metadata);
@@ -377,20 +381,29 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
if (chain)
return true;
-#endif
return false;
}
-bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
+bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
+ struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
+ struct mlx5_tc_ct_priv *ct_priv,
+ u32 zone_restore_id, u32 tunnel_id,
+ struct mlx5e_tc_update_priv *tc_priv);
#else /* CONFIG_MLX5_CLS_ACT */
static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{ return false; }
static inline bool
-mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
+mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
{ return true; }
#endif
+int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u64 act_miss_cookie, u32 *act_miss_mapping);
+void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u32 act_miss_mapping);
+
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f7897ddb29c5..df5e780e8e6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -720,21 +720,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
-{
- struct mlx5e_tx_wqe_attr wqe_attr;
- struct mlx5e_tx_attr attr;
- struct mlx5e_tx_wqe *wqe;
- u16 pi;
-
- mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
- mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
- pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
- wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
- mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
-}
-
static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 8f7580fec193..38b32e98f3bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -629,9 +629,9 @@ static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ &val);
if (!err)
return val.vu32;
mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
@@ -817,9 +817,12 @@ static void comp_irqs_release(struct mlx5_core_dev *dev)
static int comp_irqs_request(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ const struct cpumask *prev = cpu_none_mask;
+ const struct cpumask *mask;
int ncomp_eqs = table->num_comp_eqs;
u16 *cpus;
int ret;
+ int cpu;
int i;
ncomp_eqs = table->num_comp_eqs;
@@ -838,8 +841,19 @@ static int comp_irqs_request(struct mlx5_core_dev *dev)
ret = -ENOMEM;
goto free_irqs;
}
- for (i = 0; i < ncomp_eqs; i++)
- cpus[i] = cpumask_local_spread(i, dev->priv.numa_node);
+
+ i = 0;
+ rcu_read_lock();
+ for_each_numa_hop_mask(mask, dev->priv.numa_node) {
+ for_each_cpu_andnot(cpu, mask, prev) {
+ cpus[i] = cpu;
+ if (++i == ncomp_eqs)
+ goto spread_done;
+ }
+ prev = mask;
+ }
+spread_done:
+ rcu_read_unlock();
ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs);
kfree(cpus);
if (ret < 0)
@@ -874,9 +888,9 @@ static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ &val);
if (!err)
return val.vu32;
mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
@@ -946,11 +960,11 @@ static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_eq_comp *eq, *n;
+ struct mlx5_eq_comp *eq;
int err = -ENOENT;
int i = 0;
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ list_for_each_entry(eq, &table->comp_eqs_list, list) {
if (i++ == vector) {
if (irqn)
*irqn = eq->core.irqn;
@@ -985,10 +999,10 @@ struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_eq_comp *eq, *n;
+ struct mlx5_eq_comp *eq;
int i = 0;
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ list_for_each_entry(eq, &table->comp_eqs_list, list) {
if (i++ == vector)
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index a994e71e05c1..d55775627a47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -356,8 +356,8 @@ void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
}
/* Caller must hold rtnl_lock */
-int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
- u32 metadata)
+int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 metadata)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
index 11d3d3978848..c9f8469e9a47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
@@ -24,8 +24,8 @@ static inline bool mlx5_esw_acl_egress_fwd2vport_supported(struct mlx5_eswitch *
/* Eswitch acl ingress external APIs */
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
- u32 metadata);
+int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 metadata);
void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index c9a91158e99c..9959e9fd15a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -16,18 +16,12 @@
#include "lib/fs_chains.h"
#include "en/mod_hdr.h"
-#define MLX5_ESW_INDIR_TABLE_SIZE 128
-#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
+#define MLX5_ESW_INDIR_TABLE_SIZE 2
+#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
#define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1)
struct mlx5_esw_indir_table_rule {
- struct list_head list;
struct mlx5_flow_handle *handle;
- union {
- __be32 v4;
- struct in6_addr v6;
- } dst_ip;
- u32 vni;
struct mlx5_modify_hdr *mh;
refcount_t refcnt;
};
@@ -38,12 +32,10 @@ struct mlx5_esw_indir_table_entry {
struct mlx5_flow_group *recirc_grp;
struct mlx5_flow_group *fwd_grp;
struct mlx5_flow_handle *fwd_rule;
- struct list_head recirc_rules;
- int recirc_cnt;
+ struct mlx5_esw_indir_table_rule *recirc_rule;
int fwd_ref;
u16 vport;
- u8 ip_version;
};
struct mlx5_esw_indir_table {
@@ -89,7 +81,6 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
vf_sf_vport &&
esw->dev == dest_mdev &&
- attr->ip_version &&
attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
}
@@ -101,27 +92,8 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0;
}
-static struct mlx5_esw_indir_table_rule *
-mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e,
- struct mlx5_esw_flow_attr *attr)
-{
- struct mlx5_esw_indir_table_rule *rule;
-
- list_for_each_entry(rule, &e->recirc_rules, list)
- if (rule->vni == attr->rx_tun_attr->vni &&
- !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip,
- sizeof(attr->rx_tun_attr->dst_ip)))
- goto found;
- return NULL;
-
-found:
- refcount_inc(&rule->refcnt);
- return rule;
-}
-
static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
struct mlx5_esw_indir_table_entry *e)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
@@ -130,73 +102,18 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
struct mlx5_flow_destination dest = {};
struct mlx5_esw_indir_table_rule *rule;
struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_spec *rule_spec;
struct mlx5_flow_handle *handle;
int err = 0;
u32 data;
- rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr);
- if (rule)
+ if (e->recirc_rule) {
+ refcount_inc(&e->recirc_rule->refcnt);
return 0;
-
- if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX)
- return -EINVAL;
-
- rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
- if (!rule_spec)
- return -ENOMEM;
-
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule) {
- err = -ENOMEM;
- goto out;
- }
-
- rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS |
- MLX5_MATCH_MISC_PARAMETERS_2;
- if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) {
- MLX5_SET(fte_match_param, rule_spec->match_criteria,
- outer_headers.ip_version, 0xf);
- MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version,
- attr->ip_version);
- } else if (attr->ip_version) {
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.ethertype);
- MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype,
- (attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6));
- } else {
- err = -EOPNOTSUPP;
- goto err_ethertype;
}
- if (attr->ip_version == 4) {
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
- MLX5_SET(fte_match_param, rule_spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(esw_attr->rx_tun_attr->dst_ip.v4));
- } else if (attr->ip_version == 6) {
- int len = sizeof(struct in6_addr);
-
- memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- 0xff, len);
- memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &esw_attr->rx_tun_attr->dst_ip.v6, len);
- }
-
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- misc_parameters.vxlan_vni);
- MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni,
- MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni));
-
- MLX5_SET(fte_match_param, rule_spec->match_criteria,
- misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch,
- MLX5_VPORT_UPLINK));
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
/* Modify flow source to recirculate packet */
data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport);
@@ -219,13 +136,14 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ flow_act.fg = e->recirc_grp;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = mlx5_chains_get_table(chains, 0, 1, 0);
if (IS_ERR(dest.ft)) {
err = PTR_ERR(dest.ft);
goto err_table;
}
- handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1);
+ handle = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto err_handle;
@@ -233,14 +151,10 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
mlx5e_mod_hdr_dealloc(&mod_acts);
rule->handle = handle;
- rule->vni = esw_attr->rx_tun_attr->vni;
rule->mh = flow_act.modify_hdr;
- memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
- sizeof(esw_attr->rx_tun_attr->dst_ip));
refcount_set(&rule->refcnt, 1);
- list_add(&rule->list, &e->recirc_rules);
- e->recirc_cnt++;
- goto out;
+ e->recirc_rule = rule;
+ return 0;
err_handle:
mlx5_chains_put_table(chains, 0, 1, 0);
@@ -250,89 +164,44 @@ err_mod_hdr_alloc:
err_mod_hdr_regc1:
mlx5e_mod_hdr_dealloc(&mod_acts);
err_mod_hdr_regc0:
-err_ethertype:
kfree(rule);
-out:
- kvfree(rule_spec);
return err;
}
static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
struct mlx5_esw_indir_table_entry *e)
{
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_esw_indir_table_rule *rule = e->recirc_rule;
struct mlx5_fs_chains *chains = esw_chains(esw);
- struct mlx5_esw_indir_table_rule *rule;
- list_for_each_entry(rule, &e->recirc_rules, list)
- if (rule->vni == esw_attr->rx_tun_attr->vni &&
- !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
- sizeof(esw_attr->rx_tun_attr->dst_ip)))
- goto found;
-
- return;
+ if (!rule)
+ return;
-found:
if (!refcount_dec_and_test(&rule->refcnt))
return;
mlx5_del_flow_rules(rule->handle);
mlx5_chains_put_table(chains, 0, 1, 0);
mlx5_modify_header_dealloc(esw->dev, rule->mh);
- list_del(&rule->list);
kfree(rule);
- e->recirc_cnt--;
+ e->recirc_rule = NULL;
}
-static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
- struct mlx5_esw_indir_table_entry *e)
+static int mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry *e)
{
int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- u32 *in, *match;
+ u32 *in;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
- MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2);
- match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-
- if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version))
- MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf);
- else
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype);
-
- if (attr->ip_version == 4) {
- MLX5_SET_TO_ONES(fte_match_param, match,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
- } else if (attr->ip_version == 6) {
- memset(MLX5_ADDR_OF(fte_match_param, match,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- 0xff, sizeof(struct in6_addr));
- } else {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni);
- MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX);
e->recirc_grp = mlx5_create_flow_group(e->ft, in);
- if (IS_ERR(e->recirc_grp)) {
+ if (IS_ERR(e->recirc_grp))
err = PTR_ERR(e->recirc_grp);
- goto out;
- }
- INIT_LIST_HEAD(&e->recirc_rules);
- e->recirc_cnt = 0;
-
-out:
kvfree(in);
return err;
}
@@ -343,19 +212,12 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_spec *spec;
u32 *in;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- kvfree(in);
- return -ENOMEM;
- }
-
/* Hold one entry */
MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
@@ -366,25 +228,25 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.fg = e->fwd_grp;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = e->vport;
dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
- e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1);
+ e->fwd_rule = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(e->fwd_rule)) {
mlx5_destroy_flow_group(e->fwd_grp);
err = PTR_ERR(e->fwd_rule);
}
err_out:
- kvfree(spec);
kvfree(in);
return err;
}
static struct mlx5_esw_indir_table_entry *
mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec, u16 vport, bool decap)
+ u16 vport, bool decap)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
@@ -412,15 +274,14 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
}
e->ft = ft;
e->vport = vport;
- e->ip_version = attr->ip_version;
e->fwd_ref = !decap;
- err = mlx5_create_indir_recirc_group(esw, attr, spec, e);
+ err = mlx5_create_indir_recirc_group(e);
if (err)
goto recirc_grp_err;
if (decap) {
- err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
+ err = mlx5_esw_indir_table_rule_get(esw, attr, e);
if (err)
goto recirc_rule_err;
}
@@ -430,13 +291,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
goto fwd_grp_err;
hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
- vport << 16 | attr->ip_version);
+ vport << 16);
return e;
fwd_grp_err:
if (decap)
- mlx5_esw_indir_table_rule_put(esw, attr, e);
+ mlx5_esw_indir_table_rule_put(esw, e);
recirc_rule_err:
mlx5_destroy_flow_group(e->recirc_grp);
recirc_grp_err:
@@ -447,13 +308,13 @@ tbl_err:
}
static struct mlx5_esw_indir_table_entry *
-mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version)
+mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport)
{
struct mlx5_esw_indir_table_entry *e;
- u32 key = vport << 16 | ip_version;
+ u32 key = vport << 16;
hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
- if (e->vport == vport && e->ip_version == ip_version)
+ if (e->vport == vport)
return e;
return NULL;
@@ -461,24 +322,23 @@ mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_ver
struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
u16 vport, bool decap)
{
struct mlx5_esw_indir_table_entry *e;
int err;
mutex_lock(&esw->fdb_table.offloads.indir->lock);
- e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
+ e = mlx5_esw_indir_table_entry_lookup(esw, vport);
if (e) {
if (!decap) {
e->fwd_ref++;
} else {
- err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
+ err = mlx5_esw_indir_table_rule_get(esw, attr, e);
if (err)
goto out_err;
}
} else {
- e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap);
+ e = mlx5_esw_indir_table_entry_create(esw, attr, vport, decap);
if (IS_ERR(e)) {
err = PTR_ERR(e);
esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err);
@@ -494,22 +354,21 @@ out_err:
}
void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
u16 vport, bool decap)
{
struct mlx5_esw_indir_table_entry *e;
mutex_lock(&esw->fdb_table.offloads.indir->lock);
- e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
+ e = mlx5_esw_indir_table_entry_lookup(esw, vport);
if (!e)
goto out;
if (!decap)
e->fwd_ref--;
else
- mlx5_esw_indir_table_rule_put(esw, attr, e);
+ mlx5_esw_indir_table_rule_put(esw, e);
- if (e->fwd_ref || e->recirc_cnt)
+ if (e->fwd_ref || e->recirc_rule)
goto out;
hash_del(&e->hlist);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
index 21d56b49d14b..036f5b3a341b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
@@ -13,10 +13,8 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir);
struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
u16 vport, bool decap);
void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
u16 vport, bool decap);
bool
@@ -44,7 +42,6 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
static inline struct mlx5_flow_table *
mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
u16 vport, bool decap)
{
return ERR_PTR(-EOPNOTSUPP);
@@ -52,7 +49,6 @@ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
static inline void
mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
u16 vport, bool decap)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 9daf55e90367..0f052513fefa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1190,9 +1190,9 @@ static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ &val);
if (!err) {
esw->params.large_group_num = val.vu32;
} else {
@@ -1250,7 +1250,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
if (err)
return err;
} else {
- esw_warn(dev, "engress ACL is not supported by FW\n");
+ esw_warn(dev, "egress ACL is not supported by FW\n");
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
@@ -1406,9 +1406,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
- /* If disabling sriov in switchdev mode, free meta rules here
- * because it depends on num_vfs.
- */
+
if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
struct devlink *devlink = priv_to_devlink(esw->dev);
@@ -1489,7 +1487,7 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
void *hca_caps;
int err;
- if (!mlx5_core_is_ecpf(dev)) {
+ if (!mlx5_core_is_ecpf(dev) || mlx5_core_is_management_pf(dev)) {
*max_sfs = 0;
return 0;
}
@@ -1642,7 +1640,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
- err = esw_offloads_init_reps(esw);
+ err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -1708,7 +1706,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
- esw_offloads_cleanup_reps(esw);
+ esw_offloads_cleanup(esw);
mlx5_esw_vports_cleanup(esw);
kfree(esw);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 92644fbb5081..19e9a77c4633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -52,12 +52,14 @@ enum mlx5_mapped_obj_type {
MLX5_MAPPED_OBJ_CHAIN,
MLX5_MAPPED_OBJ_SAMPLE,
MLX5_MAPPED_OBJ_INT_PORT_METADATA,
+ MLX5_MAPPED_OBJ_ACT_MISS,
};
struct mlx5_mapped_obj {
enum mlx5_mapped_obj_type type;
union {
u32 chain;
+ u64 act_miss_cookie;
struct {
u32 group_id;
u32 rate;
@@ -222,7 +224,6 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle **send_to_vport_meta_rules;
struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi;
- int vlan_push_pop_refcount;
struct mlx5_fs_chains *esw_chains_priv;
struct {
@@ -346,8 +347,8 @@ struct mlx5_eswitch {
void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
-void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
-int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+void esw_offloads_cleanup(struct mlx5_eswitch *esw);
+int esw_offloads_init(struct mlx5_eswitch *esw);
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
@@ -520,10 +521,6 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
-int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr);
-int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index c981fa77f439..2a98375a0abf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -179,15 +179,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
static int
esw_setup_decap_indir(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_table *ft;
if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
- ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ ft = mlx5_esw_indir_table_get(esw, attr,
mlx5_esw_indir_table_decap_vport(attr), true);
return PTR_ERR_OR_ZERO(ft);
}
@@ -197,7 +196,7 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr)
{
if (mlx5_esw_indir_table_decap_vport(attr))
- mlx5_esw_indir_table_put(esw, attr,
+ mlx5_esw_indir_table_put(esw,
mlx5_esw_indir_table_decap_vport(attr),
true);
}
@@ -235,7 +234,6 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
int i)
{
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
@@ -243,7 +241,7 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
dest[i].ft = attr->dest_ft;
if (mlx5_esw_indir_table_decap_vport(attr))
- return esw_setup_decap_indir(esw, attr, spec);
+ return esw_setup_decap_indir(esw, attr);
return 0;
}
@@ -298,7 +296,7 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
mlx5_chains_put_table(chains, 0, 1, 0);
else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev))
- mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
+ mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
false);
}
@@ -384,7 +382,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
bool ignore_flow_lvl,
int *i)
{
@@ -399,7 +396,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
esw_attr->dests[j].rep->vport, false);
if (IS_ERR(dest[*i].ft)) {
err = PTR_ERR(dest[*i].ft);
@@ -408,7 +405,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
}
if (mlx5_esw_indir_table_decap_vport(attr)) {
- err = esw_setup_decap_indir(esw, attr, spec);
+ err = esw_setup_decap_indir(esw, attr);
if (err)
goto err_indir_tbl_get;
}
@@ -446,7 +443,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
- mlx5_lag_mpesw_is_activated(esw->dev))
+ mlx5_lag_is_mpesw(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
@@ -511,14 +508,14 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
- err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
*i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
if (attr->dest_ft) {
- err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
(*i)++;
} else if (attr->dest_chain) {
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
@@ -582,16 +579,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
+ return ERR_PTR(-EOPNOTSUPP);
+
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
flow_act.action = attr->action;
- /* if per flow vlan pop/push is emulated, don't set that into the firmware */
- if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
- MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
@@ -727,7 +724,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
if (esw_is_indir_table(esw, attr))
- err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
+ err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
&i);
@@ -832,204 +829,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
__mlx5_eswitch_del_rule(esw, rule, attr, true);
}
-static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
-{
- struct mlx5_eswitch_rep *rep;
- unsigned long i;
- int err = 0;
-
- esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
- mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
- if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
- continue;
-
- err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
- if (err)
- goto out;
- }
-
-out:
- return err;
-}
-
-static struct mlx5_eswitch_rep *
-esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
-{
- struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
-
- in_rep = attr->in_rep;
- out_rep = attr->dests[0].rep;
-
- if (push)
- vport = in_rep;
- else if (pop)
- vport = out_rep;
- else
- vport = in_rep;
-
- return vport;
-}
-
-static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
- bool push, bool pop, bool fwd)
-{
- struct mlx5_eswitch_rep *in_rep, *out_rep;
-
- if ((push || pop) && !fwd)
- goto out_notsupp;
-
- in_rep = attr->in_rep;
- out_rep = attr->dests[0].rep;
-
- if (push && in_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
- if (!push && !pop && fwd)
- if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- /* protects against (1) setting rules with different vlans to push and
- * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
- */
- if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
- goto out_notsupp;
-
- return 0;
-
-out_notsupp:
- return -EOPNOTSUPP;
-}
-
-int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr)
-{
- struct offloads_fdb *offloads = &esw->fdb_table.offloads;
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- struct mlx5_eswitch_rep *vport = NULL;
- bool push, pop, fwd;
- int err = 0;
-
- /* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- return 0;
-
- push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
- pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
- !attr->dest_chain);
-
- mutex_lock(&esw->state_lock);
-
- err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
- if (err)
- goto unlock;
-
- attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED;
-
- vport = esw_vlan_action_get_vport(esw_attr, push, pop);
-
- if (!push && !pop && fwd) {
- /* tracks VF --> wire rules without vlan push action */
- if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
- vport->vlan_refcount++;
- attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
- }
-
- goto unlock;
- }
-
- if (!push && !pop)
- goto unlock;
-
- if (!(offloads->vlan_push_pop_refcount)) {
- /* it's the 1st vlan rule, apply global vlan pop policy */
- err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
- if (err)
- goto out;
- }
- offloads->vlan_push_pop_refcount++;
-
- if (push) {
- if (vport->vlan_refcount)
- goto skip_set_push;
-
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
- 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
- if (err)
- goto out;
- vport->vlan = esw_attr->vlan_vid[0];
-skip_set_push:
- vport->vlan_refcount++;
- }
-out:
- if (!err)
- attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
-unlock:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr)
-{
- struct offloads_fdb *offloads = &esw->fdb_table.offloads;
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- struct mlx5_eswitch_rep *vport = NULL;
- bool push, pop, fwd;
- int err = 0;
-
- /* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- return 0;
-
- if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED))
- return 0;
-
- push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
- pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-
- mutex_lock(&esw->state_lock);
-
- vport = esw_vlan_action_get_vport(esw_attr, push, pop);
-
- if (!push && !pop && fwd) {
- /* tracks VF --> wire rules without vlan push action */
- if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
- vport->vlan_refcount--;
-
- goto out;
- }
-
- if (push) {
- vport->vlan_refcount--;
- if (vport->vlan_refcount)
- goto skip_unset_push;
-
- vport->vlan = 0;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
- 0, 0, SET_VLAN_STRIP);
- if (err)
- goto out;
- }
-
-skip_unset_push:
- offloads->vlan_push_pop_refcount--;
- if (offloads->vlan_push_pop_refcount)
- goto out;
-
- /* no more vlan rules, stop global vlan pop policy */
- err = esw_set_global_vlan_pop(esw, 0);
-
-out:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_eswitch *from_esw,
@@ -2406,7 +2205,7 @@ static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
kfree(rep);
}
-void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
@@ -2416,7 +2215,7 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
xa_destroy(&esw->offloads.vport_reps);
}
-int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
unsigned long i;
@@ -2436,6 +2235,94 @@ err:
return err;
}
+static int esw_port_metadata_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int err = 0;
+
+ down_write(&esw->mode_lock);
+ if (mlx5_esw_is_fdb_created(esw)) {
+ err = -EBUSY;
+ goto done;
+ }
+ if (!mlx5_esw_vport_match_metadata_supported(esw)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ if (ctx->val.vbool)
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ else
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
+done:
+ up_write(&esw->mode_lock);
+ return err;
+}
+
+static int esw_port_metadata_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
+ return 0;
+}
+
+static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u8 esw_mode;
+
+ esw_mode = mlx5_eswitch_mode(dev);
+ if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch must either disabled or non switchdev mode");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static const struct devlink_param esw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ esw_port_metadata_get,
+ esw_port_metadata_set,
+ esw_port_metadata_validate),
+};
+
+int esw_offloads_init(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ err = esw_offloads_init_reps(esw);
+ if (err)
+ return err;
+
+ err = devl_params_register(priv_to_devlink(esw->dev),
+ esw_devlink_params,
+ ARRAY_SIZE(esw_devlink_params));
+ if (err)
+ goto err_params;
+
+ return 0;
+
+err_params:
+ esw_offloads_cleanup_reps(esw);
+ return err;
+}
+
+void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+{
+ devl_params_unregister(priv_to_devlink(esw->dev),
+ esw_devlink_params,
+ ARRAY_SIZE(esw_devlink_params));
+ esw_offloads_cleanup_reps(esw);
+}
+
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
@@ -3575,9 +3462,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ down_read(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode);
- up_write(&esw->mode_lock);
+ up_read(&esw->mode_lock);
return err;
}
@@ -3675,9 +3562,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ down_read(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
- up_write(&esw->mode_lock);
+ up_read(&esw->mode_lock);
return err;
}
@@ -3749,9 +3636,9 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ down_read(&esw->mode_lock);
*encap = esw->offloads.encap;
- up_write(&esw->mode_lock);
+ up_read(&esw->mode_lock);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 9459e56ee90a..718cf09c28ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -424,6 +424,7 @@ int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_b
return blocking_notifier_chain_register(&events->sw_nh, nb);
}
+EXPORT_SYMBOL(mlx5_blocking_notifier_register);
int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
@@ -431,6 +432,7 @@ int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier
return blocking_notifier_chain_unregister(&events->sw_nh, nb);
}
+EXPORT_SYMBOL(mlx5_blocking_notifier_unregister);
int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
void *data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 32d4c967469c..144e59480686 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -272,8 +272,6 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
unsigned int size;
int err;
- if (ft_attr->max_fte != POOL_NEXT_SIZE)
- size = roundup_pow_of_two(ft_attr->max_fte);
size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
if (!size)
return -ENOSPC;
@@ -412,11 +410,6 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
MLX5_SET(create_flow_group_in, in, table_id, ft->id);
- if (ft->vport) {
- MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
- MLX5_SET(create_flow_group_in, in, other_vport, 1);
- }
-
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
@@ -653,6 +646,12 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
id = dst->dest_attr.sampler_id;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_table_type, dst->dest_attr.ft->type);
+ id = dst->dest_attr.ft->id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ break;
default:
id = dst->dest_attr.tir_num;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5a85d8c1e797..731acbe22dc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -34,12 +34,14 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
+#include <net/devlink.h>
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
#include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h"
+#include "devlink.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
@@ -111,8 +113,10 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 8
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
+ * IPsec RoCE policy
+ */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 9
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@@ -219,19 +223,30 @@ static struct init_tree_node egress_root_fs = {
};
enum {
+ RDMA_RX_IPSEC_PRIO,
RDMA_RX_COUNTERS_PRIO,
RDMA_RX_BYPASS_PRIO,
RDMA_RX_KERNEL_PRIO,
};
+#define RDMA_RX_IPSEC_NUM_PRIOS 1
+#define RDMA_RX_IPSEC_NUM_LEVELS 2
+#define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS)
+
#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
static struct init_tree_node rdma_rx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 3,
+ .ar_size = 4,
.children = (struct init_tree_node[]) {
+ [RDMA_RX_IPSEC_PRIO] =
+ ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS,
+ RDMA_RX_IPSEC_NUM_LEVELS))),
[RDMA_RX_COUNTERS_PRIO] =
ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
@@ -254,15 +269,20 @@ static struct init_tree_node rdma_rx_root_fs = {
enum {
RDMA_TX_COUNTERS_PRIO,
+ RDMA_TX_IPSEC_PRIO,
RDMA_TX_BYPASS_PRIO,
};
#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
+#define RDMA_TX_IPSEC_NUM_PRIOS 1
+#define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
+#define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
+
static struct init_tree_node rdma_tx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 2,
+ .ar_size = 3,
.children = (struct init_tree_node[]) {
[RDMA_TX_COUNTERS_PRIO] =
ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
@@ -270,6 +290,13 @@ static struct init_tree_node rdma_tx_root_fs = {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
+ [RDMA_TX_IPSEC_PRIO] =
+ ADD_PRIO(0, RDMA_TX_IPSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS,
+ RDMA_TX_IPSEC_PRIO_NUM_LEVELS))),
+
[RDMA_TX_BYPASS_PRIO] =
ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS_RDMA_TX,
@@ -449,7 +476,8 @@ static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
- type == MLX5_FLOW_DESTINATION_TYPE_RANGE;
+ type == MLX5_FLOW_DESTINATION_TYPE_RANGE ||
+ type == MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
}
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
@@ -1774,7 +1802,6 @@ static int build_match_list(struct match_list *match_head,
{
struct rhlist_head *tmp, *list;
struct mlx5_flow_group *g;
- int err = 0;
rcu_read_lock();
INIT_LIST_HEAD(&match_head->list);
@@ -1800,7 +1827,7 @@ static int build_match_list(struct match_list *match_head,
list_add_tail(&curr_match->list, &match_head->list);
}
rcu_read_unlock();
- return err;
+ return 0;
}
static u64 matched_fgs_get_version(struct list_head *match_head)
@@ -2367,6 +2394,14 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
root_ns = steering->rdma_tx_root_ns;
prio = RDMA_TX_COUNTERS_PRIO;
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_IPSEC_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC:
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_IPSEC_PRIO;
+ break;
default: /* Must be NIC RX */
WARN_ON(!is_nic_rx_ns(type));
root_ns = steering->root_ns;
@@ -3143,6 +3178,78 @@ cleanup:
return err;
}
+static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ char *value = val.vstr;
+ int err = 0;
+
+ if (!strcmp(value, "dmfs")) {
+ return 0;
+ } else if (!strcmp(value, "smfs")) {
+ u8 eswitch_mode;
+ bool smfs_cap;
+
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ smfs_cap = mlx5_fs_dr_is_supported(dev);
+
+ if (!smfs_cap) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Software managed steering is not supported by current device");
+ }
+
+ else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Software managed steering is not supported when eswitch offloads enabled.");
+ err = -EOPNOTSUPP;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ enum mlx5_flow_steering_mode mode;
+
+ if (!strcmp(ctx->val.vstr, "smfs"))
+ mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else
+ mode = MLX5_FLOW_STEERING_MODE_DMFS;
+ dev->priv.steering->mode = mode;
+
+ return 0;
+}
+
+static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ strcpy(ctx->val.vstr, "smfs");
+ else
+ strcpy(ctx->val.vstr, "dmfs");
+ return 0;
+}
+
+static const struct devlink_param mlx5_fs_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
+ "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_fs_mode_get, mlx5_fs_mode_set,
+ mlx5_fs_mode_validate),
+};
+
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
@@ -3155,12 +3262,20 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
+
+ devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
+ ARRAY_SIZE(mlx5_fs_params));
}
int mlx5_fs_core_init(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- int err = 0;
+ int err;
+
+ err = devl_params_register(priv_to_devlink(dev), mlx5_fs_params,
+ ARRAY_SIZE(mlx5_fs_params));
+ if (err)
+ return err;
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index b406e0367af6..17fe30a4c06c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -504,6 +504,16 @@ void mlx5_fc_query_cached(struct mlx5_fc *counter,
counter->lastpackets = c.packets;
}
+void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse)
+{
+ struct mlx5_fc_cache c = counter->cache;
+
+ *bytes = c.bytes;
+ *packets = c.packets;
+ *lastuse = c.lastuse;
+}
+
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
struct delayed_work *dwork,
unsigned long delay)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index f34e758a2f1f..7bb7be01225a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -267,6 +267,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, crypto)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_CRYPTO);
+ if (err)
+ return err;
+ }
+
if (MLX5_CAP_GEN(dev, shampo)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 1e46f9afa40e..4c2dad9d7cfb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+#include <devlink.h>
+
#include "fw_reset.h"
#include "diag/fw_tracer.h"
#include "lib/tout.h"
@@ -28,21 +30,32 @@ struct mlx5_fw_reset {
int ret;
};
-void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable)
+static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_fw_reset *fw_reset;
- if (enable)
+ fw_reset = dev->priv.fw_reset;
+
+ if (ctx->val.vbool)
clear_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
else
set_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+ return 0;
}
-bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev)
+static int mlx5_fw_reset_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_fw_reset *fw_reset;
+
+ fw_reset = dev->priv.fw_reset;
- return !test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+ ctx->val.vbool = !test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
+ &fw_reset->reset_flags);
+ return 0;
}
static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
@@ -150,11 +163,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
- mlx5_load_one(dev, false);
+ mlx5_load_one(dev);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
@@ -358,6 +371,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "PCI link not ready (0x%04x) after %llu ms\n",
reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
err = -ETIMEDOUT;
+ goto restore;
}
do {
@@ -484,7 +498,7 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
}
err = fw_reset->ret;
if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, false);
mlx5_load_one_devl_locked(dev, false);
}
out:
@@ -517,9 +531,16 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_abort_work);
}
+static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_fw_reset_enable_remote_dev_reset_get,
+ mlx5_fw_reset_enable_remote_dev_reset_set, NULL),
+};
+
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+ int err;
if (!fw_reset)
return -ENOMEM;
@@ -532,6 +553,15 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
fw_reset->dev = dev;
dev->priv.fw_reset = fw_reset;
+ err = devl_params_register(priv_to_devlink(dev),
+ mlx5_fw_reset_devlink_params,
+ ARRAY_SIZE(mlx5_fw_reset_devlink_params));
+ if (err) {
+ destroy_workqueue(fw_reset->wq);
+ kfree(fw_reset);
+ return err;
+ }
+
INIT_WORK(&fw_reset->fw_live_patch_work, mlx5_fw_live_patch_event);
INIT_WORK(&fw_reset->reset_request_work, mlx5_sync_reset_request_event);
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
@@ -546,6 +576,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ devl_params_unregister(priv_to_devlink(dev),
+ mlx5_fw_reset_devlink_params,
+ ARRAY_SIZE(mlx5_fw_reset_devlink_params));
destroy_workqueue(fw_reset->wq);
kfree(dev->priv.fw_reset);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index dc141c7e641a..c57465595f7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -6,8 +6,6 @@
#include "mlx5_core.h"
-void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
-bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 879555ba847d..f9438d4e43ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -62,7 +62,7 @@ enum {
};
enum {
- MLX5_DROP_NEW_HEALTH_WORK,
+ MLX5_DROP_HEALTH_WORK,
};
enum {
@@ -675,7 +675,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
devlink = priv_to_devlink(dev);
mutex_lock(&dev->intf_state_mutex);
- if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
+ if (test_bit(MLX5_DROP_HEALTH_WORK, &health->flags)) {
mlx5_core_err(dev, "health works are not permitted at this stage\n");
mutex_unlock(&dev->intf_state_mutex);
return;
@@ -699,7 +699,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
* requests from the kernel.
*/
mlx5_core_err(dev, "Driver is in error state. Unloading\n");
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, false);
}
}
@@ -771,14 +771,8 @@ static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev)
void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
- spin_lock_irqsave(&health->wq_lock, flags);
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
- queue_work(health->wq, &health->fatal_report_work);
- else
- mlx5_core_err(dev, "new health works are not permitted at this stage\n");
- spin_unlock_irqrestore(&health->wq_lock, flags);
+ queue_work(health->wq, &health->fatal_report_work);
}
#define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60)
@@ -858,7 +852,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
timer_setup(&health->timer, poll_health, 0);
health->fatal_error = MLX5_SENSOR_NO_ERR;
- clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ clear_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -869,13 +863,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
{
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
- if (disable_health) {
- spin_lock_irqsave(&health->wq_lock, flags);
- set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- spin_unlock_irqrestore(&health->wq_lock, flags);
- }
+ if (disable_health)
+ set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
del_timer_sync(&health->timer);
}
@@ -891,11 +881,8 @@ void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
- spin_lock_irqsave(&health->wq_lock, flags);
- set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- spin_unlock_irqrestore(&health->wq_lock, flags);
+ set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
cancel_delayed_work_sync(&health->update_fw_log_ts_work);
cancel_work_sync(&health->report_work);
cancel_work_sync(&health->fatal_report_work);
@@ -928,7 +915,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
kfree(name);
if (!health->wq)
goto out_err;
- spin_lock_init(&health->wq_lock);
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index e09518f887a0..779d92b762d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -172,6 +172,7 @@ enum mlx5_ptys_rate {
MLX5_PTYS_RATE_EDR = 1 << 5,
MLX5_PTYS_RATE_HDR = 1 << 6,
MLX5_PTYS_RATE_NDR = 1 << 7,
+ MLX5_PTYS_RATE_XDR = 1 << 8,
};
static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
@@ -185,6 +186,7 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
case MLX5_PTYS_RATE_EDR: return 25000;
case MLX5_PTYS_RATE_HDR: return 50000;
case MLX5_PTYS_RATE_NDR: return 100000;
+ case MLX5_PTYS_RATE_XDR: return 200000;
default: return -1;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 911cf4d23964..c2a4f86bc890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -412,7 +412,8 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
int err;
priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
+ priv->dfs_root);
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index b8feaf0f5c4c..f4b777d4e108 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -22,7 +22,7 @@ static int type_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
char *mode = NULL;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
mode = get_str_mode_type(ldev);
@@ -41,7 +41,7 @@ static int port_sel_mode_show(struct seq_file *file, void *priv)
int ret = 0;
char *mode;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags);
@@ -61,7 +61,7 @@ static int state_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool active;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
active = __mlx5_lag_is_active(ldev);
mutex_unlock(&ldev->lock);
@@ -77,7 +77,7 @@ static int flags_show(struct seq_file *file, void *priv)
bool shared_fdb;
bool lag_active;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (!lag_active)
@@ -108,7 +108,7 @@ static int mapping_show(struct seq_file *file, void *priv)
int num_ports;
int i;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
@@ -142,7 +142,7 @@ static int members_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
int i;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
for (i = 0; i < ldev->ports; i++) {
if (!ldev->pf[i].dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index ad32b80e8501..5d331b940f4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -230,7 +230,6 @@ static void mlx5_ldev_free(struct kref *ref)
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
destroy_workqueue(ldev->wq);
- mlx5_lag_mpesw_cleanup(ldev);
mutex_destroy(&ldev->lock);
kfree(ldev);
}
@@ -276,7 +275,6 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
err);
- mlx5_lag_mpesw_init(ldev);
ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports);
ldev->buckets = 1;
@@ -646,7 +644,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
return 0;
}
-static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
+int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
@@ -688,7 +686,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
}
#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 2
-static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
@@ -723,7 +721,7 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
return true;
}
-static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
+void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
@@ -740,7 +738,7 @@ static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
}
}
-static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
+void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
@@ -1187,7 +1185,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
tmp_dev = mlx5_get_next_phys_dev_lag(dev);
if (tmp_dev)
- ldev = tmp_dev->priv.lag;
+ ldev = mlx5_lag_dev(tmp_dev);
if (!ldev) {
ldev = mlx5_lag_dev_alloc(dev);
@@ -1386,8 +1384,7 @@ bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_sriov(ldev) &&
- test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+ res = ldev && test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
spin_unlock_irqrestore(&lag_lock, flags);
return res;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index f30ac2de639f..bc1f1dd3e283 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -50,19 +50,6 @@ struct lag_tracker {
enum netdev_lag_hash hash_type;
};
-enum mpesw_op {
- MLX5_MPESW_OP_ENABLE,
- MLX5_MPESW_OP_DISABLE,
-};
-
-struct mlx5_mpesw_work_st {
- struct work_struct work;
- struct mlx5_lag *lag;
- enum mpesw_op op;
- struct completion comp;
- int result;
-};
-
/* LAG data of a ConnectX card.
* It serves both its phys functions.
*/
@@ -115,6 +102,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
+bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
@@ -124,8 +112,6 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
-void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
-int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
@@ -134,5 +120,8 @@ void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
void mlx5_ldev_remove_debugfs(struct dentry *dbg);
void mlx5_disable_lag(struct mlx5_lag *ldev);
+void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
+int mlx5_deactivate_lag(struct mlx5_lag *ldev);
+void mlx5_lag_add_devices(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index d9fcb9ed726f..d85a8dfc153d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -28,13 +28,9 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
{
- struct mlx5_lag *ldev;
- bool res;
-
- ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_multipath(ldev);
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
- return res;
+ return ldev && __mlx5_lag_is_multipath(ldev);
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index c17e8f1ec914..0c0ef600f643 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -5,39 +5,121 @@
#include <net/nexthop.h>
#include "lag/lag.h"
#include "eswitch.h"
+#include "esw/acl/ofld.h"
#include "lib/mlx5.h"
-static int add_mpesw_rule(struct mlx5_lag *ldev)
+static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
- int err;
+ struct mlx5_core_dev *dev;
+ struct mlx5_eswitch *esw;
+ u32 pf_metadata;
+ int i;
+
+ for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ esw = dev->priv.eswitch;
+ pf_metadata = ldev->lag_mpesw.pf_metadata[i];
+ if (!pf_metadata)
+ continue;
+ mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK, 0);
+ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
+ (void *)0);
+ mlx5_esw_match_metadata_free(esw, pf_metadata);
+ ldev->lag_mpesw.pf_metadata[i] = 0;
+ }
+}
- if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
- return 0;
+static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev;
+ struct mlx5_eswitch *esw;
+ u32 pf_metadata;
+ int i, err;
+
+ for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ esw = dev->priv.eswitch;
+ pf_metadata = mlx5_esw_match_metadata_alloc(esw);
+ if (!pf_metadata) {
+ err = -ENOSPC;
+ goto err_metadata;
+ }
+
+ ldev->lag_mpesw.pf_metadata[i] = pf_metadata;
+ err = mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK,
+ pf_metadata);
+ if (err)
+ goto err_metadata;
+ }
- if (ldev->mode != MLX5_LAG_MODE_NONE) {
- err = -EINVAL;
- goto out_err;
+ for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
+ (void *)0);
}
- err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
+ return 0;
+
+err_metadata:
+ mlx5_mpesw_metadata_cleanup(ldev);
+ return err;
+}
+
+static int enable_mpesw(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ int err;
+
+ if (ldev->mode != MLX5_LAG_MODE_NONE)
+ return -EINVAL;
+
+ if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
+ !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
+ !MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
+ !mlx5_lag_check_prereq(ldev))
+ return -EOPNOTSUPP;
+
+ err = mlx5_mpesw_metadata_set(ldev);
+ if (err)
+ return err;
+
+ mlx5_lag_remove_devices(ldev);
+
+ err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, true);
if (err) {
- mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
- goto out_err;
+ mlx5_core_warn(dev0, "Failed to create LAG in MPESW mode (%d)\n", err);
+ goto err_add_devices;
}
+ dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+ err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
+ if (!err)
+ err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ if (err)
+ goto err_rescan_drivers;
+
return 0;
-out_err:
- atomic_dec(&ldev->lag_mpesw.mpesw_rule_count);
+err_rescan_drivers:
+ dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+ mlx5_deactivate_lag(ldev);
+err_add_devices:
+ mlx5_lag_add_devices(ldev);
+ mlx5_eswitch_reload_reps(dev0->priv.eswitch);
+ mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
-static void del_mpesw_rule(struct mlx5_lag *ldev)
+static void disable_mpesw(struct mlx5_lag *ldev)
{
- if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
- ldev->mode == MLX5_LAG_MODE_MPESW)
+ if (ldev->mode == MLX5_LAG_MODE_MPESW) {
+ mlx5_mpesw_metadata_cleanup(ldev);
mlx5_disable_lag(ldev);
+ }
}
static void mlx5_mpesw_work(struct work_struct *work)
@@ -45,20 +127,27 @@ static void mlx5_mpesw_work(struct work_struct *work)
struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
struct mlx5_lag *ldev = mpesww->lag;
+ mlx5_dev_list_lock();
mutex_lock(&ldev->lock);
+ if (ldev->mode_changes_in_progress) {
+ mpesww->result = -EAGAIN;
+ goto unlock;
+ }
+
if (mpesww->op == MLX5_MPESW_OP_ENABLE)
- mpesww->result = add_mpesw_rule(ldev);
+ mpesww->result = enable_mpesw(ldev);
else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
- del_mpesw_rule(ldev);
+ disable_mpesw(ldev);
+unlock:
mutex_unlock(&ldev->lock);
-
+ mlx5_dev_list_unlock();
complete(&mpesww->comp);
}
static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
enum mpesw_op op)
{
- struct mlx5_lag *ldev = dev->priv.lag;
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
struct mlx5_mpesw_work_st *work;
int err = 0;
@@ -86,43 +175,36 @@ out:
return err;
}
-void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev)
{
mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
}
-int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev)
{
return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
}
-int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
+int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_lag *ldev = mdev->priv.lag;
+ struct mlx5_lag *ldev = mlx5_lag_dev(mdev);
if (!netif_is_bond_master(out_dev) || !ldev)
return 0;
- if (ldev->mode == MLX5_LAG_MODE_MPESW)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
-{
- bool ret;
+ if (ldev->mode != MLX5_LAG_MODE_MPESW)
+ return 0;
- ret = dev->priv.lag && dev->priv.lag->mode == MLX5_LAG_MODE_MPESW;
- return ret;
+ NL_SET_ERR_MSG_MOD(extack, "can't forward to bond in mpesw mode");
+ return -EOPNOTSUPP;
}
-void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev)
{
- atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
-}
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
-void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
-{
- WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));
+ return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
}
+EXPORT_SYMBOL(mlx5_lag_is_mpesw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
index 88e8daffcf92..02520f27a033 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
@@ -9,17 +9,27 @@
struct lag_mpesw {
struct work_struct mpesw_work;
- atomic_t mpesw_rule_count;
+ u32 pf_metadata[MLX5_MAX_PORTS];
};
-int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
-bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
-#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
-void mlx5_lag_mpesw_init(struct mlx5_lag *ldev);
-void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev);
-#else
-static inline void mlx5_lag_mpesw_init(struct mlx5_lag *ldev) {}
-static inline void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev) {}
-#endif
+enum mpesw_op {
+ MLX5_MPESW_OP_ENABLE,
+ MLX5_MPESW_OP_DISABLE,
+};
+
+struct mlx5_mpesw_work_st {
+ struct work_struct work;
+ struct mlx5_lag *lag;
+ enum mpesw_op op;
+ struct completion comp;
+ int result;
+};
+
+int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
+void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev);
+int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev);
#endif /* __MLX5_LAG_MPESW_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 69318b143268..4c9a40211059 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -69,6 +69,13 @@ enum {
MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa),
};
+enum {
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN = S16_MIN,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX = S16_MAX,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
+};
+
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
@@ -86,6 +93,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
+static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
+{
+ s64 min = MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN;
+ s64 max = MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+
+ if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range)) {
+ min = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN;
+ max = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX;
+ }
+
+ if (delta < min || delta > max)
+ return false;
+
+ return true;
+}
+
static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
{
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
@@ -288,8 +311,8 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
if (!mlx5_modify_mtutc_allowed(mdev))
return 0;
- /* HW time adjustment range is s16. If out of range, settime instead */
- if (delta < S16_MIN || delta > S16_MAX) {
+ /* HW time adjustment range is checked. If out of range, settime instead */
+ if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
struct timespec64 ts;
s64 ns;
@@ -326,7 +349,20 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
+static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
+
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ if (!mlx5_is_mtutc_time_adj_cap(mdev, delta))
+ return -ERANGE;
+
+ return mlx5_ptp_adjtime(ptp, delta);
+}
+
+static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
@@ -334,7 +370,15 @@ static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
return 0;
MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
- MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
+
+ if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) {
+ MLX5_SET(mtutc_reg, in, freq_adj_units,
+ MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm);
+ } else {
+ MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
+ }
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
@@ -349,7 +393,8 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_adjfreq_real_time(mdev, scaled_ppm_to_ppb(scaled_ppm));
+
+ err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
if (err)
return err;
@@ -688,6 +733,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.n_pins = 0,
.pps = 0,
.adjfine = mlx5_ptp_adjfine,
+ .adjphase = mlx5_ptp_adjphase,
.adjtime = mlx5_ptp_adjtime,
.gettimex64 = mlx5_ptp_gettimex,
.settime64 = mlx5_ptp_settime,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index e995f8378df7..3a94b8f8031e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -2,53 +2,253 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "mlx5_core.h"
-#include "lib/mlx5.h"
+#include "lib/crypto.h"
-int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
- void *key, u32 sz_bytes,
- u32 key_type, u32 *p_key_id)
-{
- u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
- u32 sz_bits = sz_bytes * BITS_PER_BYTE;
- u8 general_obj_key_size;
- u64 general_obj_types;
- void *obj, *key_p;
- int err;
+#define MLX5_CRYPTO_DEK_POOLS_NUM (MLX5_ACCEL_OBJ_TYPE_KEY_NUM - 1)
+#define type2idx(type) ((type) - 1)
- obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
- key_p = MLX5_ADDR_OF(encryption_key_obj, obj, key);
+#define MLX5_CRYPTO_DEK_POOL_SYNC_THRESH 128
- general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
- if (!(general_obj_types &
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY))
- return -EINVAL;
+/* calculate the num of DEKs, which are freed by any user
+ * (for example, TLS) after last revalidation in a pool or a bulk.
+ */
+#define MLX5_CRYPTO_DEK_CALC_FREED(a) \
+ ({ typeof(a) _a = (a); \
+ _a->num_deks - _a->avail_deks - _a->in_use_deks; })
+
+#define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool)
+#define MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk) MLX5_CRYPTO_DEK_CALC_FREED(bulk)
+
+#define MLX5_CRYPTO_DEK_BULK_IDLE(bulk) \
+ ({ typeof(bulk) _bulk = (bulk); \
+ _bulk->avail_deks == _bulk->num_deks; })
+
+enum {
+ MLX5_CRYPTO_DEK_ALL_TYPE = BIT(0),
+};
+
+struct mlx5_crypto_dek_pool {
+ struct mlx5_core_dev *mdev;
+ u32 key_purpose;
+ int num_deks; /* the total number of keys in this pool */
+ int avail_deks; /* the number of available keys in this pool */
+ int in_use_deks; /* the number of being used keys in this pool */
+ struct mutex lock; /* protect the following lists, and the bulks */
+ struct list_head partial_list; /* some of keys are available */
+ struct list_head full_list; /* no available keys */
+ struct list_head avail_list; /* all keys are available to use */
+
+ /* No in-used keys, and all need to be synced.
+ * These bulks will be put to avail list after sync.
+ */
+ struct list_head sync_list;
+
+ bool syncing;
+ struct list_head wait_for_free;
+ struct work_struct sync_work;
+
+ spinlock_t destroy_lock; /* protect destroy_list */
+ struct list_head destroy_list;
+ struct work_struct destroy_work;
+};
+
+struct mlx5_crypto_dek_bulk {
+ struct mlx5_core_dev *mdev;
+ int base_obj_id;
+ int avail_start; /* the bit to start search */
+ int num_deks; /* the total number of keys in a bulk */
+ int avail_deks; /* the number of keys available, with need_sync bit 0 */
+ int in_use_deks; /* the number of keys being used, with in_use bit 1 */
+ struct list_head entry;
+
+ /* 0: not being used by any user, 1: otherwise */
+ unsigned long *in_use;
+
+ /* The bits are set when they are used, and reset after crypto_sync
+ * is executed. So, the value 0 means the key is newly created, or not
+ * used after sync, and 1 means it is in use, or freed but not synced
+ */
+ unsigned long *need_sync;
+};
+
+struct mlx5_crypto_dek_priv {
+ struct mlx5_core_dev *mdev;
+ int log_dek_obj_range;
+};
+
+struct mlx5_crypto_dek {
+ struct mlx5_crypto_dek_bulk *bulk;
+ struct list_head entry;
+ u32 obj_id;
+};
+
+u32 mlx5_crypto_dek_get_id(struct mlx5_crypto_dek *dek)
+{
+ return dek->obj_id;
+}
+
+static int mlx5_crypto_dek_get_key_sz(struct mlx5_core_dev *mdev,
+ u32 sz_bytes, u8 *key_sz_p)
+{
+ u32 sz_bits = sz_bytes * BITS_PER_BYTE;
switch (sz_bits) {
case 128:
- general_obj_key_size =
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
- key_p += sz_bytes;
+ *key_sz_p = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
break;
case 256:
- general_obj_key_size =
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256;
+ *key_sz_p = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256;
break;
default:
+ mlx5_core_err(mdev, "Crypto offload error, invalid key size (%u bits)\n",
+ sz_bits);
return -EINVAL;
}
- memcpy(key_p, key, sz_bytes);
+ return 0;
+}
+
+static int mlx5_crypto_dek_fill_key(struct mlx5_core_dev *mdev, u8 *key_obj,
+ const void *key, u32 sz_bytes)
+{
+ void *dst;
+ u8 key_sz;
+ int err;
+
+ err = mlx5_crypto_dek_get_key_sz(mdev, sz_bytes, &key_sz);
+ if (err)
+ return err;
+
+ MLX5_SET(encryption_key_obj, key_obj, key_size, key_sz);
+
+ if (sz_bytes == 16)
+ /* For key size of 128b the MSBs are reserved. */
+ dst = MLX5_ADDR_OF(encryption_key_obj, key_obj, key[1]);
+ else
+ dst = MLX5_ADDR_OF(encryption_key_obj, key_obj, key);
+
+ memcpy(dst, key, sz_bytes);
+
+ return 0;
+}
+
+static int mlx5_crypto_cmd_sync_crypto(struct mlx5_core_dev *mdev,
+ int crypto_type)
+{
+ u32 in[MLX5_ST_SZ_DW(sync_crypto_in)] = {};
+ int err;
+
+ mlx5_core_dbg(mdev,
+ "Execute SYNC_CRYPTO command with crypto_type(0x%x)\n",
+ crypto_type);
+
+ MLX5_SET(sync_crypto_in, in, opcode, MLX5_CMD_OP_SYNC_CRYPTO);
+ MLX5_SET(sync_crypto_in, in, crypto_type, crypto_type);
+
+ err = mlx5_cmd_exec_in(mdev, sync_crypto, in);
+ if (err)
+ mlx5_core_err(mdev,
+ "Failed to exec sync crypto, type=%d, err=%d\n",
+ crypto_type, err);
+
+ return err;
+}
+
+static int mlx5_crypto_create_dek_bulk(struct mlx5_core_dev *mdev,
+ u32 key_purpose, int log_obj_range,
+ u32 *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *obj, *param;
+ int err;
- MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
- MLX5_SET(encryption_key_obj, obj, key_type, key_type);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_create_param, param, log_obj_range, log_obj_range);
+
+ obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
+ MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ mlx5_core_dbg(mdev, "DEK objects created, bulk=%d, obj_id=%d\n",
+ 1 << log_obj_range, *obj_id);
+
+ return 0;
+}
+
+static int mlx5_crypto_modify_dek_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes, u32 key_purpose,
+ u32 obj_id, u32 obj_offset)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *obj, *param;
+ int err;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_query_param, param, obj_offset, obj_offset);
+
+ obj = MLX5_ADDR_OF(modify_encryption_key_in, in, encryption_key_object);
+ MLX5_SET64(encryption_key_obj, obj, modify_field_select, 1);
+ MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
+ MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
+
+ err = mlx5_crypto_dek_fill_key(mdev, obj, key, sz_bytes);
+ if (err)
+ return err;
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+
+ /* avoid leaking key on the stack */
+ memzero_explicit(in, sizeof(in));
+
+ return err;
+}
+
+static int mlx5_crypto_create_dek_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes,
+ u32 key_purpose, u32 *p_key_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u64 general_obj_types;
+ void *obj;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY))
+ return -EINVAL;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+
+ obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
+ MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
+ MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
+
+ err = mlx5_crypto_dek_fill_key(mdev, obj, key, sz_bytes);
+ if (err)
+ return err;
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*p_key_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
@@ -58,7 +258,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
return err;
}
-void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
+static void mlx5_crypto_destroy_dek_key(struct mlx5_core_dev *mdev, u32 key_id)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
@@ -71,3 +271,504 @@ void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
+
+int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes,
+ u32 key_type, u32 *p_key_id)
+{
+ return mlx5_crypto_create_dek_key(mdev, key, sz_bytes, key_type, p_key_id);
+}
+
+void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
+{
+ mlx5_crypto_destroy_dek_key(mdev, key_id);
+}
+
+static struct mlx5_crypto_dek_bulk *
+mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv;
+ struct mlx5_core_dev *mdev = pool->mdev;
+ struct mlx5_crypto_dek_bulk *bulk;
+ int num_deks, base_obj_id;
+ int err;
+
+ bulk = kzalloc(sizeof(*bulk), GFP_KERNEL);
+ if (!bulk)
+ return ERR_PTR(-ENOMEM);
+
+ num_deks = 1 << dek_priv->log_dek_obj_range;
+ bulk->need_sync = bitmap_zalloc(num_deks, GFP_KERNEL);
+ if (!bulk->need_sync) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ bulk->in_use = bitmap_zalloc(num_deks, GFP_KERNEL);
+ if (!bulk->in_use) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose,
+ dek_priv->log_dek_obj_range,
+ &base_obj_id);
+ if (err)
+ goto err_out;
+
+ bulk->base_obj_id = base_obj_id;
+ bulk->num_deks = num_deks;
+ bulk->avail_deks = num_deks;
+ bulk->mdev = mdev;
+
+ return bulk;
+
+err_out:
+ bitmap_free(bulk->in_use);
+ bitmap_free(bulk->need_sync);
+ kfree(bulk);
+ return ERR_PTR(err);
+}
+
+static struct mlx5_crypto_dek_bulk *
+mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_bulk *bulk;
+
+ bulk = mlx5_crypto_dek_bulk_create(pool);
+ if (IS_ERR(bulk))
+ return bulk;
+
+ pool->avail_deks += bulk->num_deks;
+ pool->num_deks += bulk->num_deks;
+ list_add(&bulk->entry, &pool->partial_list);
+
+ return bulk;
+}
+
+static void mlx5_crypto_dek_bulk_free(struct mlx5_crypto_dek_bulk *bulk)
+{
+ mlx5_crypto_destroy_dek_key(bulk->mdev, bulk->base_obj_id);
+ bitmap_free(bulk->need_sync);
+ bitmap_free(bulk->in_use);
+ kfree(bulk);
+}
+
+static void mlx5_crypto_dek_pool_remove_bulk(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek_bulk *bulk,
+ bool delay)
+{
+ pool->num_deks -= bulk->num_deks;
+ pool->avail_deks -= bulk->avail_deks;
+ pool->in_use_deks -= bulk->in_use_deks;
+ list_del(&bulk->entry);
+ if (!delay)
+ mlx5_crypto_dek_bulk_free(bulk);
+}
+
+static struct mlx5_crypto_dek_bulk *
+mlx5_crypto_dek_pool_pop(struct mlx5_crypto_dek_pool *pool, u32 *obj_offset)
+{
+ struct mlx5_crypto_dek_bulk *bulk;
+ int pos;
+
+ mutex_lock(&pool->lock);
+ bulk = list_first_entry_or_null(&pool->partial_list,
+ struct mlx5_crypto_dek_bulk, entry);
+
+ if (bulk) {
+ pos = find_next_zero_bit(bulk->need_sync, bulk->num_deks,
+ bulk->avail_start);
+ if (pos == bulk->num_deks) {
+ mlx5_core_err(pool->mdev, "Wrong DEK bulk avail_start.\n");
+ pos = find_first_zero_bit(bulk->need_sync, bulk->num_deks);
+ }
+ WARN_ON(pos == bulk->num_deks);
+ } else {
+ bulk = list_first_entry_or_null(&pool->avail_list,
+ struct mlx5_crypto_dek_bulk,
+ entry);
+ if (bulk) {
+ list_move(&bulk->entry, &pool->partial_list);
+ } else {
+ bulk = mlx5_crypto_dek_pool_add_bulk(pool);
+ if (IS_ERR(bulk))
+ goto out;
+ }
+ pos = 0;
+ }
+
+ *obj_offset = pos;
+ bitmap_set(bulk->need_sync, pos, 1);
+ bitmap_set(bulk->in_use, pos, 1);
+ bulk->in_use_deks++;
+ bulk->avail_deks--;
+ if (!bulk->avail_deks) {
+ list_move(&bulk->entry, &pool->full_list);
+ bulk->avail_start = bulk->num_deks;
+ } else {
+ bulk->avail_start = pos + 1;
+ }
+ pool->avail_deks--;
+ pool->in_use_deks++;
+
+out:
+ mutex_unlock(&pool->lock);
+ return bulk;
+}
+
+static bool mlx5_crypto_dek_need_sync(struct mlx5_crypto_dek_pool *pool)
+{
+ return !pool->syncing &&
+ MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) > MLX5_CRYPTO_DEK_POOL_SYNC_THRESH;
+}
+
+static int mlx5_crypto_dek_free_locked(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek *dek)
+{
+ struct mlx5_crypto_dek_bulk *bulk = dek->bulk;
+ int obj_offset;
+ bool old_val;
+ int err = 0;
+
+ obj_offset = dek->obj_id - bulk->base_obj_id;
+ old_val = test_and_clear_bit(obj_offset, bulk->in_use);
+ WARN_ON_ONCE(!old_val);
+ if (!old_val) {
+ err = -ENOENT;
+ goto out_free;
+ }
+ pool->in_use_deks--;
+ bulk->in_use_deks--;
+ if (!bulk->avail_deks && !bulk->in_use_deks)
+ list_move(&bulk->entry, &pool->sync_list);
+
+ if (mlx5_crypto_dek_need_sync(pool) && schedule_work(&pool->sync_work))
+ pool->syncing = true;
+
+out_free:
+ kfree(dek);
+ return err;
+}
+
+static int mlx5_crypto_dek_pool_push(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek *dek)
+{
+ int err = 0;
+
+ mutex_lock(&pool->lock);
+ if (pool->syncing)
+ list_add(&dek->entry, &pool->wait_for_free);
+ else
+ err = mlx5_crypto_dek_free_locked(pool, dek);
+ mutex_unlock(&pool->lock);
+
+ return err;
+}
+
+/* Update the bits for a bulk while sync, and avail_next for search.
+ * As the combinations of (need_sync, in_use) of one DEK are
+ * - (0,0) means the key is ready for use,
+ * - (1,1) means the key is currently being used by a user,
+ * - (1,0) means the key is freed, and waiting for being synced,
+ * - (0,1) is invalid state.
+ * the number of revalidated DEKs can be calculated by
+ * hweight_long(need_sync XOR in_use), and the need_sync bits can be reset
+ * by simply copying from in_use bits.
+ */
+static void mlx5_crypto_dek_bulk_reset_synced(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek_bulk *bulk)
+{
+ unsigned long *need_sync = bulk->need_sync;
+ unsigned long *in_use = bulk->in_use;
+ int i, freed, reused, avail_next;
+ bool first = true;
+
+ freed = MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk);
+
+ for (i = 0; freed && i < BITS_TO_LONGS(bulk->num_deks);
+ i++, need_sync++, in_use++) {
+ reused = hweight_long((*need_sync) ^ (*in_use));
+ if (!reused)
+ continue;
+
+ bulk->avail_deks += reused;
+ pool->avail_deks += reused;
+ *need_sync = *in_use;
+ if (first) {
+ avail_next = i * BITS_PER_TYPE(long);
+ if (bulk->avail_start > avail_next)
+ bulk->avail_start = avail_next;
+ first = false;
+ }
+
+ freed -= reused;
+ }
+}
+
+/* Return true if the bulk is reused, false if destroyed with delay */
+static bool mlx5_crypto_dek_bulk_handle_avail(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek_bulk *bulk,
+ struct list_head *destroy_list)
+{
+ if (list_empty(&pool->avail_list)) {
+ list_move(&bulk->entry, &pool->avail_list);
+ return true;
+ }
+
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, true);
+ list_add(&bulk->entry, destroy_list);
+ return false;
+}
+
+static void mlx5_crypto_dek_pool_splice_destroy_list(struct mlx5_crypto_dek_pool *pool,
+ struct list_head *list,
+ struct list_head *head)
+{
+ spin_lock(&pool->destroy_lock);
+ list_splice_init(list, head);
+ spin_unlock(&pool->destroy_lock);
+}
+
+static void mlx5_crypto_dek_pool_free_wait_keys(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek *dek, *next;
+
+ list_for_each_entry_safe(dek, next, &pool->wait_for_free, entry) {
+ list_del(&dek->entry);
+ mlx5_crypto_dek_free_locked(pool, dek);
+ }
+}
+
+/* For all the bulks in each list, reset the bits while sync.
+ * Move them to different lists according to the number of available DEKs.
+ * Destrory all the idle bulks, except one for quick service.
+ * And free DEKs in the waiting list at the end of this func.
+ */
+static void mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_bulk *bulk, *tmp;
+ LIST_HEAD(destroy_list);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry) {
+ mlx5_crypto_dek_bulk_reset_synced(pool, bulk);
+ if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
+ mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
+ }
+
+ list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry) {
+ mlx5_crypto_dek_bulk_reset_synced(pool, bulk);
+
+ if (!bulk->avail_deks)
+ continue;
+
+ if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
+ mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
+ else
+ list_move(&bulk->entry, &pool->partial_list);
+ }
+
+ list_for_each_entry_safe(bulk, tmp, &pool->sync_list, entry) {
+ bulk->avail_deks = bulk->num_deks;
+ pool->avail_deks += bulk->num_deks;
+ if (mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list)) {
+ memset(bulk->need_sync, 0, BITS_TO_BYTES(bulk->num_deks));
+ bulk->avail_start = 0;
+ }
+ }
+
+ mlx5_crypto_dek_pool_free_wait_keys(pool);
+
+ if (!list_empty(&destroy_list)) {
+ mlx5_crypto_dek_pool_splice_destroy_list(pool, &destroy_list,
+ &pool->destroy_list);
+ schedule_work(&pool->destroy_work);
+ }
+}
+
+static void mlx5_crypto_dek_sync_work_fn(struct work_struct *work)
+{
+ struct mlx5_crypto_dek_pool *pool =
+ container_of(work, struct mlx5_crypto_dek_pool, sync_work);
+ int err;
+
+ err = mlx5_crypto_cmd_sync_crypto(pool->mdev, BIT(pool->key_purpose));
+ mutex_lock(&pool->lock);
+ if (!err)
+ mlx5_crypto_dek_pool_reset_synced(pool);
+ pool->syncing = false;
+ mutex_unlock(&pool->lock);
+}
+
+struct mlx5_crypto_dek *mlx5_crypto_dek_create(struct mlx5_crypto_dek_pool *dek_pool,
+ const void *key, u32 sz_bytes)
+{
+ struct mlx5_crypto_dek_priv *dek_priv = dek_pool->mdev->mlx5e_res.dek_priv;
+ struct mlx5_core_dev *mdev = dek_pool->mdev;
+ u32 key_purpose = dek_pool->key_purpose;
+ struct mlx5_crypto_dek_bulk *bulk;
+ struct mlx5_crypto_dek *dek;
+ int obj_offset;
+ int err;
+
+ dek = kzalloc(sizeof(*dek), GFP_KERNEL);
+ if (!dek)
+ return ERR_PTR(-ENOMEM);
+
+ if (!dek_priv) {
+ err = mlx5_crypto_create_dek_key(mdev, key, sz_bytes,
+ key_purpose, &dek->obj_id);
+ goto out;
+ }
+
+ bulk = mlx5_crypto_dek_pool_pop(dek_pool, &obj_offset);
+ if (IS_ERR(bulk)) {
+ err = PTR_ERR(bulk);
+ goto out;
+ }
+
+ dek->bulk = bulk;
+ dek->obj_id = bulk->base_obj_id + obj_offset;
+ err = mlx5_crypto_modify_dek_key(mdev, key, sz_bytes, key_purpose,
+ bulk->base_obj_id, obj_offset);
+ if (err) {
+ mlx5_crypto_dek_pool_push(dek_pool, dek);
+ return ERR_PTR(err);
+ }
+
+out:
+ if (err) {
+ kfree(dek);
+ return ERR_PTR(err);
+ }
+
+ return dek;
+}
+
+void mlx5_crypto_dek_destroy(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek)
+{
+ struct mlx5_crypto_dek_priv *dek_priv = dek_pool->mdev->mlx5e_res.dek_priv;
+ struct mlx5_core_dev *mdev = dek_pool->mdev;
+
+ if (!dek_priv) {
+ mlx5_crypto_destroy_dek_key(mdev, dek->obj_id);
+ kfree(dek);
+ } else {
+ mlx5_crypto_dek_pool_push(dek_pool, dek);
+ }
+}
+
+static void mlx5_crypto_dek_free_destroy_list(struct list_head *destroy_list)
+{
+ struct mlx5_crypto_dek_bulk *bulk, *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, destroy_list, entry)
+ mlx5_crypto_dek_bulk_free(bulk);
+}
+
+static void mlx5_crypto_dek_destroy_work_fn(struct work_struct *work)
+{
+ struct mlx5_crypto_dek_pool *pool =
+ container_of(work, struct mlx5_crypto_dek_pool, destroy_work);
+ LIST_HEAD(destroy_list);
+
+ mlx5_crypto_dek_pool_splice_destroy_list(pool, &pool->destroy_list,
+ &destroy_list);
+ mlx5_crypto_dek_free_destroy_list(&destroy_list);
+}
+
+struct mlx5_crypto_dek_pool *
+mlx5_crypto_dek_pool_create(struct mlx5_core_dev *mdev, int key_purpose)
+{
+ struct mlx5_crypto_dek_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->mdev = mdev;
+ pool->key_purpose = key_purpose;
+
+ mutex_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->avail_list);
+ INIT_LIST_HEAD(&pool->partial_list);
+ INIT_LIST_HEAD(&pool->full_list);
+ INIT_LIST_HEAD(&pool->sync_list);
+ INIT_LIST_HEAD(&pool->wait_for_free);
+ INIT_WORK(&pool->sync_work, mlx5_crypto_dek_sync_work_fn);
+ spin_lock_init(&pool->destroy_lock);
+ INIT_LIST_HEAD(&pool->destroy_list);
+ INIT_WORK(&pool->destroy_work, mlx5_crypto_dek_destroy_work_fn);
+
+ return pool;
+}
+
+void mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_bulk *bulk, *tmp;
+
+ cancel_work_sync(&pool->sync_work);
+ cancel_work_sync(&pool->destroy_work);
+
+ mlx5_crypto_dek_pool_free_wait_keys(pool);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->avail_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->sync_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ mlx5_crypto_dek_free_destroy_list(&pool->destroy_list);
+
+ mutex_destroy(&pool->lock);
+
+ kfree(pool);
+}
+
+void mlx5_crypto_dek_cleanup(struct mlx5_crypto_dek_priv *dek_priv)
+{
+ if (!dek_priv)
+ return;
+
+ kfree(dek_priv);
+}
+
+struct mlx5_crypto_dek_priv *mlx5_crypto_dek_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_crypto_dek_priv *dek_priv;
+ int err;
+
+ if (!MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc))
+ return NULL;
+
+ dek_priv = kzalloc(sizeof(*dek_priv), GFP_KERNEL);
+ if (!dek_priv)
+ return ERR_PTR(-ENOMEM);
+
+ dek_priv->mdev = mdev;
+ dek_priv->log_dek_obj_range = min_t(int, 12,
+ MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc));
+
+ /* sync all types of objects */
+ err = mlx5_crypto_cmd_sync_crypto(mdev, MLX5_CRYPTO_DEK_ALL_TYPE);
+ if (err)
+ goto err_sync_crypto;
+
+ mlx5_core_dbg(mdev, "Crypto DEK enabled, %d deks per alloc (max %d), total %d\n",
+ 1 << dek_priv->log_dek_obj_range,
+ 1 << MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc),
+ 1 << MLX5_CAP_CRYPTO(mdev, log_max_num_deks));
+
+ return dek_priv;
+
+err_sync_crypto:
+ kfree(dek_priv);
+ return ERR_PTR(err);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
new file mode 100644
index 000000000000..c819c047bb9c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_CRYPTO_H__
+#define __MLX5_LIB_CRYPTO_H__
+
+enum {
+ MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS,
+ MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC,
+ MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC,
+ MLX5_ACCEL_OBJ_TYPE_KEY_NUM,
+};
+
+int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes,
+ u32 key_type, u32 *p_key_id);
+
+void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+
+struct mlx5_crypto_dek_pool;
+struct mlx5_crypto_dek;
+
+struct mlx5_crypto_dek_pool *mlx5_crypto_dek_pool_create(struct mlx5_core_dev *mdev,
+ int key_purpose);
+void mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool *pool);
+struct mlx5_crypto_dek *mlx5_crypto_dek_create(struct mlx5_crypto_dek_pool *dek_pool,
+ const void *key, u32 sz_bytes);
+void mlx5_crypto_dek_destroy(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek);
+u32 mlx5_crypto_dek_get_id(struct mlx5_crypto_dek *dek);
+
+struct mlx5_crypto_dek_priv *mlx5_crypto_dek_init(struct mlx5_core_dev *mdev);
+void mlx5_crypto_dek_cleanup(struct mlx5_crypto_dek_priv *dek_priv);
+#endif /* __MLX5_LIB_CRYPTO_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index df58cba37930..81ed91fee59b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -214,7 +214,7 @@ create_chain_restore(struct fs_chain *chain)
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
- enum mlx5e_tc_attr_to_reg chain_to_reg;
+ enum mlx5e_tc_attr_to_reg mapped_obj_to_reg;
struct mlx5_modify_hdr *mod_hdr;
u32 index;
int err;
@@ -242,7 +242,7 @@ create_chain_restore(struct fs_chain *chain)
chain->id = index;
if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
- chain_to_reg = CHAIN_TO_REG;
+ mapped_obj_to_reg = MAPPED_OBJ_TO_REG;
chain->restore_rule = esw_add_restore_rule(esw, chain->id);
if (IS_ERR(chain->restore_rule)) {
err = PTR_ERR(chain->restore_rule);
@@ -253,7 +253,7 @@ create_chain_restore(struct fs_chain *chain)
* since we write the metadata to reg_b
* that is passed to SW directly.
*/
- chain_to_reg = NIC_CHAIN_TO_REG;
+ mapped_obj_to_reg = NIC_MAPPED_OBJ_TO_REG;
} else {
err = -EINVAL;
goto err_rule;
@@ -261,12 +261,12 @@ create_chain_restore(struct fs_chain *chain)
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
+ mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mfield);
MLX5_SET(set_action_in, modact, offset,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset);
+ mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].moffset);
MLX5_SET(set_action_in, modact, length,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen == 32 ?
- 0 : mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen);
+ mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen == 32 ?
+ 0 : mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen);
MLX5_SET(set_action_in, modact, data, chain->id);
mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
1, modact);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
new file mode 100644
index 000000000000..2c53589b765d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "fs_core.h"
+#include "lib/ipsec_fs_roce.h"
+#include "mlx5_core.h"
+
+struct mlx5_ipsec_miss {
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
+};
+
+struct mlx5_ipsec_rx_roce {
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_ipsec_miss roce_miss;
+
+ struct mlx5_flow_table *ft_rdma;
+ struct mlx5_flow_namespace *ns_rdma;
+};
+
+struct mlx5_ipsec_tx_roce {
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_namespace *ns;
+};
+
+struct mlx5_ipsec_fs {
+ struct mlx5_ipsec_rx_roce ipv4_rx;
+ struct mlx5_ipsec_rx_roce ipv6_rx;
+ struct mlx5_ipsec_tx_roce tx;
+};
+
+static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
+ u16 dport)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
+}
+
+static int
+ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_destination *default_dst,
+ struct mlx5_ipsec_rx_roce *roce)
+{
+ struct mlx5_flow_destination dst = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->ft_rdma;
+ rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n",
+ err);
+ goto fail_add_rule;
+ }
+
+ roce->rule = rule;
+
+ memset(spec, 0, sizeof(*spec));
+ rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, default_dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add RX RoCE IPsec miss rule err=%d\n",
+ err);
+ goto fail_add_default_rule;
+ }
+
+ roce->roce_miss.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+fail_add_default_rule:
+ mlx5_del_flow_rules(roce->rule);
+fail_add_rule:
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_tx_roce *roce,
+ struct mlx5_flow_table *pol_ft)
+{
+ struct mlx5_flow_destination dst = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ int err = 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = pol_ft;
+ rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, &dst,
+ 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
+ err);
+ goto out;
+ }
+ roce->rule = rule;
+
+out:
+ return err;
+}
+
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce)
+{
+ struct mlx5_ipsec_tx_roce *tx_roce;
+
+ if (!ipsec_roce)
+ return;
+
+ tx_roce = &ipsec_roce->tx;
+
+ mlx5_del_flow_rules(tx_roce->rule);
+ mlx5_destroy_flow_group(tx_roce->g);
+ mlx5_destroy_flow_table(tx_roce->ft);
+}
+
+#define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
+
+int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_table *pol_ft)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_ipsec_tx_roce *roce;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ int ix = 0;
+ int err;
+ u32 *in;
+
+ if (!ipsec_roce)
+ return 0;
+
+ roce = &ipsec_roce->tx;
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(roce->ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
+ return err;
+ }
+
+ roce->ft = ft;
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_TX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
+ goto fail;
+ }
+ roce->g = g;
+
+ err = ipsec_fs_roce_tx_rule_setup(mdev, roce, pol_ft);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
+ goto rule_fail;
+ }
+
+ return 0;
+
+rule_fail:
+ mlx5_destroy_flow_group(roce->g);
+fail:
+ mlx5_destroy_flow_table(ft);
+ return err;
+}
+
+struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
+{
+ struct mlx5_ipsec_rx_roce *rx_roce;
+
+ if (!ipsec_roce)
+ return NULL;
+
+ rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
+ &ipsec_roce->ipv6_rx;
+
+ return rx_roce->ft;
+}
+
+void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
+{
+ struct mlx5_ipsec_rx_roce *rx_roce;
+
+ if (!ipsec_roce)
+ return;
+
+ rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
+ &ipsec_roce->ipv6_rx;
+
+ mlx5_del_flow_rules(rx_roce->roce_miss.rule);
+ mlx5_del_flow_rules(rx_roce->rule);
+ mlx5_destroy_flow_table(rx_roce->ft_rdma);
+ mlx5_destroy_flow_group(rx_roce->roce_miss.group);
+ mlx5_destroy_flow_group(rx_roce->g);
+ mlx5_destroy_flow_table(rx_roce->ft);
+}
+
+#define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
+
+int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_destination *default_dst,
+ u32 family, u32 level, u32 prio)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_ipsec_rx_roce *roce;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ if (!ipsec_roce)
+ return 0;
+
+ roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
+ &ipsec_roce->ipv6_rx;
+
+ ft_attr.max_fte = 2;
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at nic err=%d\n", err);
+ return err;
+ }
+
+ roce->ft = ft;
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto fail_nomem;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_RX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group at nic err=%d\n", err);
+ goto fail_group;
+ }
+ roce->g = g;
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_RX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx miss group at nic err=%d\n", err);
+ goto fail_mgroup;
+ }
+ roce->roce_miss.group = g;
+
+ memset(&ft_attr, 0, sizeof(ft_attr));
+ if (family == AF_INET)
+ ft_attr.level = 1;
+ ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err);
+ goto fail_rdma_table;
+ }
+
+ roce->ft_rdma = ft;
+
+ err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err);
+ goto fail_setup_rule;
+ }
+
+ kvfree(in);
+ return 0;
+
+fail_setup_rule:
+ mlx5_destroy_flow_table(roce->ft_rdma);
+fail_rdma_table:
+ mlx5_destroy_flow_group(roce->roce_miss.group);
+fail_mgroup:
+ mlx5_destroy_flow_group(roce->g);
+fail_group:
+ kvfree(in);
+fail_nomem:
+ mlx5_destroy_flow_table(roce->ft);
+ return err;
+}
+
+void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce)
+{
+ kfree(ipsec_roce);
+}
+
+struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_ipsec_fs *roce_ipsec;
+ struct mlx5_flow_namespace *ns;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
+ if (!ns) {
+ mlx5_core_err(mdev, "Failed to get RoCE rx ns\n");
+ return NULL;
+ }
+
+ roce_ipsec = kzalloc(sizeof(*roce_ipsec), GFP_KERNEL);
+ if (!roce_ipsec)
+ return NULL;
+
+ roce_ipsec->ipv4_rx.ns_rdma = ns;
+ roce_ipsec->ipv6_rx.ns_rdma = ns;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
+ if (!ns) {
+ mlx5_core_err(mdev, "Failed to get RoCE tx ns\n");
+ goto err_tx;
+ }
+
+ roce_ipsec->tx.ns = ns;
+
+ return roce_ipsec;
+
+err_tx:
+ kfree(roce_ipsec);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
new file mode 100644
index 000000000000..9712d705fe48
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_IPSEC_H__
+#define __MLX5_LIB_IPSEC_H__
+
+struct mlx5_ipsec_fs;
+
+struct mlx5_flow_table *
+mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family);
+void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
+ u32 family);
+int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_destination *default_dst,
+ u32 family, u32 level, u32 prio);
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce);
+int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_table *pol_ft);
+void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce);
+struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev);
+
+#endif /* __MLX5_LIB_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 032adb21ad4b..ccf12f7db6f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -79,28 +79,11 @@ struct mlx5_pme_stats {
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
-/* Crypto */
-enum {
- MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
- MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
- MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC,
-};
-
-int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
- void *key, u32 sz_bytes,
- u32 key_type, u32 *p_key_id);
-void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
-
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));
}
-static inline void mlx5_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev)
-{
- mdev->mlx5e_res.uplink_netdev = netdev;
-}
-
static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
{
return mdev->mlx5e_res.uplink_netdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4e1b5757528a..540840e80493 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -336,6 +336,24 @@ static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
}
}
+void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *dev, struct net_device *netdev)
+{
+ mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
+ dev->mlx5e_res.uplink_netdev = netdev;
+ mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ netdev);
+ mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
+}
+
+void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev)
+{
+ mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
+ mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ dev->mlx5e_res.uplink_netdev);
+ mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
+}
+EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay);
+
static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode)
@@ -484,9 +502,9 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &val);
if (!err)
return val.vu32;
mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
@@ -499,9 +517,9 @@ bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
if (!err)
return val.vbool;
@@ -1390,9 +1408,9 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- err = mlx5_devlink_register(priv_to_devlink(dev));
+ err = mlx5_devlink_params_register(priv_to_devlink(dev));
if (err)
- goto err_devlink_reg;
+ goto err_devlink_params_reg;
err = mlx5_register_device(dev);
if (err)
@@ -1403,8 +1421,8 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
return 0;
err_register:
- mlx5_devlink_unregister(priv_to_devlink(dev));
-err_devlink_reg:
+ mlx5_devlink_params_unregister(priv_to_devlink(dev));
+err_devlink_params_reg:
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
mlx5_unload(dev);
err_load:
@@ -1426,7 +1444,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mutex_lock(&dev->intf_state_mutex);
mlx5_unregister_device(dev);
- mlx5_devlink_unregister(priv_to_devlink(dev));
+ mlx5_devlink_params_unregister(priv_to_devlink(dev));
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
@@ -1491,23 +1509,23 @@ out:
return err;
}
-int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+int mlx5_load_one(struct mlx5_core_dev *dev)
{
struct devlink *devlink = priv_to_devlink(dev);
int ret;
devl_lock(devlink);
- ret = mlx5_load_one_devl_locked(dev, recovery);
+ ret = mlx5_load_one_devl_locked(dev, false);
devl_unlock(devlink);
return ret;
}
-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend)
{
devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
- mlx5_detach_device(dev);
+ mlx5_detach_device(dev, suspend);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
@@ -1522,12 +1540,12 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
-void mlx5_unload_one(struct mlx5_core_dev *dev)
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend)
{
struct devlink *devlink = priv_to_devlink(dev);
devl_lock(devlink);
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, suspend);
devl_unlock(devlink);
}
@@ -1555,6 +1573,7 @@ static const int types[] = {
MLX5_CAP_DEV_SHAMPO,
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
+ MLX5_CAP_CRYPTO,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1608,6 +1627,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
+ mutex_init(&dev->mlx5e_res.uplink_netdev_lock);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1696,6 +1716,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock);
mutex_destroy(&dev->intf_state_mutex);
lockdep_unregister_key(&dev->lock_key);
}
@@ -1809,7 +1830,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, true);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
@@ -1891,8 +1912,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
mlx5_pci_trace(dev, "Enter, loading driver..\n");
- err = mlx5_load_one(dev, false);
-
+ err = mlx5_load_one(dev);
if (!err)
devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter,
DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
@@ -1966,7 +1986,7 @@ static void shutdown(struct pci_dev *pdev)
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, false);
mlx5_pci_disable_device(dev);
}
@@ -1974,7 +1994,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, true);
return 0;
}
@@ -1983,7 +2003,7 @@ static int mlx5_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- return mlx5_load_one(dev, false);
+ return mlx5_load_one(dev);
}
static const struct pci_device_id mlx5_core_pci_table[] = {
@@ -2017,7 +2037,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, false);
}
int mlx5_recover_device(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 029305a8b80a..be0785f83083 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -236,7 +236,7 @@ void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
int mlx5_adev_init(struct mlx5_core_dev *dev);
int mlx5_attach_device(struct mlx5_core_dev *dev);
-void mlx5_detach_device(struct mlx5_core_dev *dev);
+void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
@@ -319,9 +319,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
-void mlx5_unload_one(struct mlx5_core_dev *dev);
-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
-int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
+int mlx5_load_one(struct mlx5_core_dev *dev);
int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 function_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 0eb50be175cc..64d4e7125e9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -219,7 +219,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
if (n >= MLX5_NUM_4K_IN_PAGE) {
- mlx5_core_warn(dev, "alloc 4k bug\n");
+ mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
+ fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE);
return -ENOENT;
}
clear_bit(n, &fp->bitmask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 7b4783ce213e..a7377619ba6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -74,7 +74,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- mlx5_unload_one(sf_dev->mdev);
+ mlx5_unload_one(sf_dev->mdev, false);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index a4476cb4c3b3..fd2d31cdbcf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -724,7 +724,6 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action)
{
struct postsend_info send_info = {};
- int ret;
send_info.write.addr = (uintptr_t)action->rewrite->data;
send_info.write.length = action->rewrite->num_of_actions *
@@ -734,9 +733,7 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
- ret = dr_postsend_icm_data(dmn, &send_info);
-
- return ret;
+ return dr_postsend_icm_data(dmn, &send_info);
}
static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index 5a1027b07215..a453b9cd9033 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -14,6 +14,7 @@
#include <linux/irqreturn.h>
#include <linux/netdevice.h>
#include <linux/irq.h>
+#include <linux/phy.h>
/* The silicon design supports a maximum RX ring size of
* 32K entries. Based on current testing this maximum size
@@ -67,6 +68,29 @@ struct mlxbf_gige_stats {
u64 rx_filter_discard_pkts;
};
+struct mlxbf_gige_reg_param {
+ u32 mask;
+ u32 shift;
+};
+
+struct mlxbf_gige_mdio_gw {
+ u32 gw_address;
+ u32 read_data_address;
+ struct mlxbf_gige_reg_param busy;
+ struct mlxbf_gige_reg_param write_data;
+ struct mlxbf_gige_reg_param read_data;
+ struct mlxbf_gige_reg_param devad;
+ struct mlxbf_gige_reg_param partad;
+ struct mlxbf_gige_reg_param opcode;
+ struct mlxbf_gige_reg_param st1;
+};
+
+struct mlxbf_gige_link_cfg {
+ void (*set_phy_link_mode)(struct phy_device *phydev);
+ void (*adjust_link)(struct net_device *netdev);
+ phy_interface_t phy_mode;
+};
+
struct mlxbf_gige {
void __iomem *base;
void __iomem *llu_base;
@@ -102,6 +126,9 @@ struct mlxbf_gige {
u8 valid_polarity;
struct napi_struct napi;
struct mlxbf_gige_stats stats;
+ u8 hw_version;
+ struct mlxbf_gige_mdio_gw *mdio_gw;
+ int prev_speed;
};
/* Rx Work Queue Element definitions */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index 41ebef25a930..253d7ad9b809 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -135,4 +135,5 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = mlxbf_gige_get_pauseparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 2292d63a279c..694de9513b9f 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -205,7 +205,7 @@ static int mlxbf_gige_stop(struct net_device *netdev)
}
static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+ struct ifreq *ifr, int cmd)
{
if (!(netif_running(netdev)))
return -EINVAL;
@@ -263,13 +263,99 @@ static const struct net_device_ops mlxbf_gige_netdev_ops = {
.ndo_get_stats64 = mlxbf_gige_get_stats64,
};
-static void mlxbf_gige_adjust_link(struct net_device *netdev)
+static void mlxbf_gige_bf2_adjust_link(struct net_device *netdev)
{
struct phy_device *phydev = netdev->phydev;
phy_print_status(phydev);
}
+static void mlxbf_gige_bf3_adjust_link(struct net_device *netdev)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u8 sgmii_mode;
+ u16 ipg_size;
+ u32 val;
+
+ if (phydev->link && phydev->speed != priv->prev_speed) {
+ switch (phydev->speed) {
+ case 1000:
+ ipg_size = MLXBF_GIGE_1G_IPG_SIZE;
+ sgmii_mode = MLXBF_GIGE_1G_SGMII_MODE;
+ break;
+ case 100:
+ ipg_size = MLXBF_GIGE_100M_IPG_SIZE;
+ sgmii_mode = MLXBF_GIGE_100M_SGMII_MODE;
+ break;
+ case 10:
+ ipg_size = MLXBF_GIGE_10M_IPG_SIZE;
+ sgmii_mode = MLXBF_GIGE_10M_SGMII_MODE;
+ break;
+ default:
+ return;
+ }
+
+ val = readl(priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
+ val &= ~(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK | MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK);
+ val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK, ipg_size);
+ val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK, sgmii_mode);
+ writel(val, priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
+
+ val = readl(priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
+ val &= ~MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK;
+ val |= FIELD_PREP(MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK, sgmii_mode);
+ writel(val, priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
+
+ priv->prev_speed = phydev->speed;
+ }
+
+ phy_print_status(phydev);
+}
+
+static void mlxbf_gige_bf2_set_phy_link_mode(struct phy_device *phydev)
+{
+ /* MAC only supports 1000T full duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+
+ /* Only symmetric pause with flow control enabled is supported so no
+ * need to negotiate pause.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+}
+
+static void mlxbf_gige_bf3_set_phy_link_mode(struct phy_device *phydev)
+{
+ /* MAC only supports full duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+
+ /* Only symmetric pause with flow control enabled is supported so no
+ * need to negotiate pause.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+}
+
+static struct mlxbf_gige_link_cfg mlxbf_gige_link_cfgs[] = {
+ [MLXBF_GIGE_VERSION_BF2] = {
+ .set_phy_link_mode = mlxbf_gige_bf2_set_phy_link_mode,
+ .adjust_link = mlxbf_gige_bf2_adjust_link,
+ .phy_mode = PHY_INTERFACE_MODE_GMII
+ },
+ [MLXBF_GIGE_VERSION_BF3] = {
+ .set_phy_link_mode = mlxbf_gige_bf3_set_phy_link_mode,
+ .adjust_link = mlxbf_gige_bf3_adjust_link,
+ .phy_mode = PHY_INTERFACE_MODE_SGMII
+ }
+};
+
static int mlxbf_gige_probe(struct platform_device *pdev)
{
struct phy_device *phydev;
@@ -315,6 +401,8 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
+ priv->hw_version = readq(base + MLXBF_GIGE_VERSION);
+
/* Attach MDIO device */
err = mlxbf_gige_mdio_probe(pdev, priv);
if (err)
@@ -357,25 +445,14 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
phydev->irq = phy_irq;
err = phy_connect_direct(netdev, phydev,
- mlxbf_gige_adjust_link,
- PHY_INTERFACE_MODE_GMII);
+ mlxbf_gige_link_cfgs[priv->hw_version].adjust_link,
+ mlxbf_gige_link_cfgs[priv->hw_version].phy_mode);
if (err) {
dev_err(&pdev->dev, "Could not attach to PHY\n");
goto out;
}
- /* MAC only supports 1000T full duplex mode */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
-
- /* Only symmetric pause with flow control enabled is supported so no
- * need to negotiate pause.
- */
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+ mlxbf_gige_link_cfgs[priv->hw_version].set_phy_link_mode(phydev);
/* Display information about attached PHY device */
phy_attached_info(phydev);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
index aa780b1614a3..654190263535 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -23,9 +23,75 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
+#include "mlxbf_gige_mdio_bf2.h"
+#include "mlxbf_gige_mdio_bf3.h"
-#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
-#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
+static struct mlxbf_gige_mdio_gw mlxbf_gige_mdio_gw_t[] = {
+ [MLXBF_GIGE_VERSION_BF2] = {
+ .gw_address = MLXBF2_GIGE_MDIO_GW_OFFSET,
+ .read_data_address = MLXBF2_GIGE_MDIO_GW_OFFSET,
+ .busy = {
+ .mask = MLXBF2_GIGE_MDIO_GW_BUSY_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_BUSY_SHIFT,
+ },
+ .read_data = {
+ .mask = MLXBF2_GIGE_MDIO_GW_AD_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT,
+ },
+ .write_data = {
+ .mask = MLXBF2_GIGE_MDIO_GW_AD_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT,
+ },
+ .devad = {
+ .mask = MLXBF2_GIGE_MDIO_GW_DEVAD_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_DEVAD_SHIFT,
+ },
+ .partad = {
+ .mask = MLXBF2_GIGE_MDIO_GW_PARTAD_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_PARTAD_SHIFT,
+ },
+ .opcode = {
+ .mask = MLXBF2_GIGE_MDIO_GW_OPCODE_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_OPCODE_SHIFT,
+ },
+ .st1 = {
+ .mask = MLXBF2_GIGE_MDIO_GW_ST1_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_ST1_SHIFT,
+ },
+ },
+ [MLXBF_GIGE_VERSION_BF3] = {
+ .gw_address = MLXBF3_GIGE_MDIO_GW_OFFSET,
+ .read_data_address = MLXBF3_GIGE_MDIO_DATA_READ,
+ .busy = {
+ .mask = MLXBF3_GIGE_MDIO_GW_BUSY_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_BUSY_SHIFT,
+ },
+ .read_data = {
+ .mask = MLXBF3_GIGE_MDIO_GW_DATA_READ_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_DATA_READ_SHIFT,
+ },
+ .write_data = {
+ .mask = MLXBF3_GIGE_MDIO_GW_DATA_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_DATA_SHIFT,
+ },
+ .devad = {
+ .mask = MLXBF3_GIGE_MDIO_GW_DEVAD_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_DEVAD_SHIFT,
+ },
+ .partad = {
+ .mask = MLXBF3_GIGE_MDIO_GW_PARTAD_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_PARTAD_SHIFT,
+ },
+ .opcode = {
+ .mask = MLXBF3_GIGE_MDIO_GW_OPCODE_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_OPCODE_SHIFT,
+ },
+ .st1 = {
+ .mask = MLXBF3_GIGE_MDIO_GW_ST1_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_ST1_SHIFT,
+ },
+ },
+};
#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
@@ -47,30 +113,10 @@
/* Busy bit is set by software and cleared by hardware */
#define MLXBF_GIGE_MDIO_SET_BUSY 0x1
-/* MDIO GW register bits */
-#define MLXBF_GIGE_MDIO_GW_AD_MASK GENMASK(15, 0)
-#define MLXBF_GIGE_MDIO_GW_DEVAD_MASK GENMASK(20, 16)
-#define MLXBF_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21)
-#define MLXBF_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26)
-#define MLXBF_GIGE_MDIO_GW_ST1_MASK GENMASK(28, 28)
-#define MLXBF_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
-
-/* MDIO config register bits */
-#define MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
-#define MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK GENMASK(2, 2)
-#define MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(4, 4)
-#define MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(15, 8)
-#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
-#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
-
-#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
-
#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
+#define MLXBF_GIGE_BF3_COREPLL_ADDR 0x13409824
+#define MLXBF_GIGE_BF3_COREPLL_SIZE 0x00000010
static struct resource corepll_params[] = {
[MLXBF_GIGE_VERSION_BF2] = {
@@ -78,6 +124,11 @@ static struct resource corepll_params[] = {
.end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
.name = "COREPLL_RES"
},
+ [MLXBF_GIGE_VERSION_BF3] = {
+ .start = MLXBF_GIGE_BF3_COREPLL_ADDR,
+ .end = MLXBF_GIGE_BF3_COREPLL_ADDR + MLXBF_GIGE_BF3_COREPLL_SIZE - 1,
+ .name = "COREPLL_RES"
+ }
};
/* Returns core clock i1clk in Hz */
@@ -134,19 +185,23 @@ static u8 mdio_period_map(struct mlxbf_gige *priv)
return mdio_period;
}
-static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
+static u32 mlxbf_gige_mdio_create_cmd(struct mlxbf_gige_mdio_gw *mdio_gw, u16 data, int phy_add,
int phy_reg, u32 opcode)
{
u32 gw_reg = 0;
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_AD_MASK, data);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_DEVAD_MASK, phy_reg);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_PARTAD_MASK, phy_add);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_OPCODE_MASK, opcode);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_ST1_MASK,
- MLXBF_GIGE_MDIO_CL22_ST1);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_BUSY_MASK,
- MLXBF_GIGE_MDIO_SET_BUSY);
+ gw_reg |= ((data << mdio_gw->write_data.shift) &
+ mdio_gw->write_data.mask);
+ gw_reg |= ((phy_reg << mdio_gw->devad.shift) &
+ mdio_gw->devad.mask);
+ gw_reg |= ((phy_add << mdio_gw->partad.shift) &
+ mdio_gw->partad.mask);
+ gw_reg |= ((opcode << mdio_gw->opcode.shift) &
+ mdio_gw->opcode.mask);
+ gw_reg |= ((MLXBF_GIGE_MDIO_CL22_ST1 << mdio_gw->st1.shift) &
+ mdio_gw->st1.mask);
+ gw_reg |= ((MLXBF_GIGE_MDIO_SET_BUSY << mdio_gw->busy.shift) &
+ mdio_gw->busy.mask);
return gw_reg;
}
@@ -158,29 +213,27 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
int ret;
u32 val;
- if (phy_reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
/* Send mdio read request */
- cmd = mlxbf_gige_mdio_create_cmd(0, phy_add, phy_reg, MLXBF_GIGE_MDIO_CL22_READ);
+ cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, 0, phy_add, phy_reg,
+ MLXBF_GIGE_MDIO_CL22_READ);
- writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address);
- ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
- val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
+ ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address,
+ val, !(val & priv->mdio_gw->busy.mask),
5, 1000000);
if (ret) {
- writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
- ret = readl(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ ret = readl(priv->mdio_io + priv->mdio_gw->read_data_address);
/* Only return ad bits of the gw register */
- ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+ ret &= priv->mdio_gw->read_data.mask;
/* The MDIO lock is set on read. To release it, clear gw register */
- writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
@@ -193,21 +246,18 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
u32 cmd;
int ret;
- if (phy_reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
/* Send mdio write request */
- cmd = mlxbf_gige_mdio_create_cmd(val, phy_add, phy_reg,
+ cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, val, phy_add, phy_reg,
MLXBF_GIGE_MDIO_CL22_WRITE);
- writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address);
/* If the poll timed out, drop the request */
- ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
- temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
+ ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address,
+ temp, !(temp & priv->mdio_gw->busy.mask),
5, 1000000);
/* The MDIO lock is set on read. To release it, clear gw register */
- writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
@@ -219,9 +269,20 @@ static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
mdio_period = mdio_period_map(priv);
- val = MLXBF_GIGE_MDIO_CFG_VAL;
- val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
- writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+ if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
+ val = MLXBF2_GIGE_MDIO_CFG_VAL;
+ val |= FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
+ writel(val, priv->mdio_io + MLXBF2_GIGE_MDIO_CFG_OFFSET);
+ } else {
+ val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) |
+ FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1);
+ writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG0);
+ val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
+ writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG1);
+ val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) |
+ FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13);
+ writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG2);
+ }
}
int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
@@ -230,6 +291,9 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
struct resource *res;
int ret;
+ if (priv->hw_version > MLXBF_GIGE_VERSION_BF3)
+ return -ENODEV;
+
priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
if (IS_ERR(priv->mdio_io))
return PTR_ERR(priv->mdio_io);
@@ -242,13 +306,15 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
/* For backward compatibility with older ACPI tables, also keep
* CLK resource internal to the driver.
*/
- res = &corepll_params[MLXBF_GIGE_VERSION_BF2];
+ res = &corepll_params[priv->hw_version];
}
priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
if (!priv->clk_io)
return -ENOMEM;
+ priv->mdio_gw = &mlxbf_gige_mdio_gw_t[priv->hw_version];
+
mlxbf_gige_mdio_cfg(priv);
priv->mdiobus = devm_mdiobus_alloc(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h
new file mode 100644
index 000000000000..7f1ff0ac7699
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* MDIO support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, ALL RIGHTS RESERVED.
+ *
+ * This software product is a proprietary product of NVIDIA CORPORATION &
+ * AFFILIATES (the "Company") and all right, title, and interest in and to the
+ * software product, including all associated intellectual property rights, are
+ * and shall remain exclusively with the Company.
+ *
+ * This software product is governed by the End User License Agreement
+ * provided with the software product.
+ */
+
+#ifndef __MLXBF_GIGE_MDIO_BF2_H__
+#define __MLXBF_GIGE_MDIO_BF2_H__
+
+#include <linux/bitfield.h>
+
+#define MLXBF2_GIGE_MDIO_GW_OFFSET 0x0
+#define MLXBF2_GIGE_MDIO_CFG_OFFSET 0x4
+
+/* MDIO GW register bits */
+#define MLXBF2_GIGE_MDIO_GW_AD_MASK GENMASK(15, 0)
+#define MLXBF2_GIGE_MDIO_GW_DEVAD_MASK GENMASK(20, 16)
+#define MLXBF2_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21)
+#define MLXBF2_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26)
+#define MLXBF2_GIGE_MDIO_GW_ST1_MASK GENMASK(28, 28)
+#define MLXBF2_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
+
+#define MLXBF2_GIGE_MDIO_GW_AD_SHIFT 0
+#define MLXBF2_GIGE_MDIO_GW_DEVAD_SHIFT 16
+#define MLXBF2_GIGE_MDIO_GW_PARTAD_SHIFT 21
+#define MLXBF2_GIGE_MDIO_GW_OPCODE_SHIFT 26
+#define MLXBF2_GIGE_MDIO_GW_ST1_SHIFT 28
+#define MLXBF2_GIGE_MDIO_GW_BUSY_SHIFT 30
+
+/* MDIO config register bits */
+#define MLXBF2_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
+#define MLXBF2_GIGE_MDIO_CFG_MDIO3_3_MASK GENMASK(2, 2)
+#define MLXBF2_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(4, 4)
+#define MLXBF2_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(15, 8)
+#define MLXBF2_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
+#define MLXBF2_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
+
+#define MLXBF2_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+ FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+ FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+ FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+ FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+#endif /* __MLXBF_GIGE_MDIO_BF2_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h
new file mode 100644
index 000000000000..9dd9144b9173
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* MDIO support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, ALL RIGHTS RESERVED.
+ *
+ * This software product is a proprietary product of NVIDIA CORPORATION &
+ * AFFILIATES (the "Company") and all right, title, and interest in and to the
+ * software product, including all associated intellectual property rights, are
+ * and shall remain exclusively with the Company.
+ *
+ * This software product is governed by the End User License Agreement
+ * provided with the software product.
+ */
+
+#ifndef __MLXBF_GIGE_MDIO_BF3_H__
+#define __MLXBF_GIGE_MDIO_BF3_H__
+
+#include <linux/bitfield.h>
+
+#define MLXBF3_GIGE_MDIO_GW_OFFSET 0x80
+#define MLXBF3_GIGE_MDIO_DATA_READ 0x8c
+#define MLXBF3_GIGE_MDIO_CFG_REG0 0x100
+#define MLXBF3_GIGE_MDIO_CFG_REG1 0x104
+#define MLXBF3_GIGE_MDIO_CFG_REG2 0x108
+
+/* MDIO GW register bits */
+#define MLXBF3_GIGE_MDIO_GW_ST1_MASK GENMASK(1, 1)
+#define MLXBF3_GIGE_MDIO_GW_OPCODE_MASK GENMASK(3, 2)
+#define MLXBF3_GIGE_MDIO_GW_PARTAD_MASK GENMASK(8, 4)
+#define MLXBF3_GIGE_MDIO_GW_DEVAD_MASK GENMASK(13, 9)
+/* For BlueField-3, this field is only used for mdio write */
+#define MLXBF3_GIGE_MDIO_GW_DATA_MASK GENMASK(29, 14)
+#define MLXBF3_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
+
+#define MLXBF3_GIGE_MDIO_GW_DATA_READ_MASK GENMASK(15, 0)
+
+#define MLXBF3_GIGE_MDIO_GW_ST1_SHIFT 1
+#define MLXBF3_GIGE_MDIO_GW_OPCODE_SHIFT 2
+#define MLXBF3_GIGE_MDIO_GW_PARTAD_SHIFT 4
+#define MLXBF3_GIGE_MDIO_GW_DEVAD_SHIFT 9
+#define MLXBF3_GIGE_MDIO_GW_DATA_SHIFT 14
+#define MLXBF3_GIGE_MDIO_GW_BUSY_SHIFT 30
+
+#define MLXBF3_GIGE_MDIO_GW_DATA_READ_SHIFT 0
+
+/* MDIO config register bits */
+#define MLXBF3_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
+#define MLXBF3_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(2, 2)
+#define MLXBF3_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(7, 0)
+#define MLXBF3_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(7, 0)
+#define MLXBF3_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(15, 8)
+
+#endif /* __MLXBF_GIGE_MDIO_BF3_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 7be3a793984d..cd0973229c9b 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -8,8 +8,11 @@
#ifndef __MLXBF_GIGE_REGS_H__
#define __MLXBF_GIGE_REGS_H__
+#include <linux/bitfield.h>
+
#define MLXBF_GIGE_VERSION 0x0000
#define MLXBF_GIGE_VERSION_BF2 0x0
+#define MLXBF_GIGE_VERSION_BF3 0x1
#define MLXBF_GIGE_STATUS 0x0010
#define MLXBF_GIGE_STATUS_READY BIT(0)
#define MLXBF_GIGE_INT_STATUS 0x0028
@@ -77,4 +80,23 @@
*/
#define MLXBF_GIGE_MMIO_REG_SZ (MLXBF_GIGE_MAC_CFG + 8)
+#define MLXBF_GIGE_PLU_TX_REG0 0x80
+#define MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK GENMASK(11, 0)
+#define MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK GENMASK(15, 14)
+
+#define MLXBF_GIGE_PLU_RX_REG0 0x10
+#define MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK GENMASK(25, 24)
+
+#define MLXBF_GIGE_1G_SGMII_MODE 0x0
+#define MLXBF_GIGE_10M_SGMII_MODE 0x1
+#define MLXBF_GIGE_100M_SGMII_MODE 0x2
+
+/* ipg_size default value for 1G is fixed by HW to 11 + End = 12.
+ * So for 100M it is 12 * 10 - 1 = 119
+ * For 10M, it is 12 * 100 - 1 = 1199
+ */
+#define MLXBF_GIGE_1G_IPG_SIZE 11
+#define MLXBF_GIGE_100M_IPG_SIZE 119
+#define MLXBF_GIGE_10M_IPG_SIZE 1199
+
#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index a0a06e2eff82..22db0bb15c45 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -78,6 +78,7 @@ struct mlxsw_core {
spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
bool enable_string_tlv;
+ bool enable_latency_tlv;
} emad;
struct {
u16 *mapping; /* lag_id+port_index to local_port mapping */
@@ -378,6 +379,22 @@ MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
MLXSW_EMAD_STRING_TLV_STRING_LEN);
+/* emad_latency_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x4 (latency TLV).
+ */
+MLXSW_ITEM32(emad, latency_tlv, type, 0x00, 27, 5);
+
+/* emad_latency_tlv_len
+ * Length of the latency TLV in u32.
+ */
+MLXSW_ITEM32(emad, latency_tlv, len, 0x00, 16, 11);
+
+/* emad_latency_tlv_latency_time
+ * EMAD latency time in units of uSec.
+ */
+MLXSW_ITEM32(emad, latency_tlv, latency_time, 0x04, 0, 32);
+
/* emad_reg_tlv_type
* Type of the TLV.
* Must be set to 0x3 (register TLV).
@@ -461,6 +478,12 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
}
+static void mlxsw_emad_pack_latency_tlv(char *latency_tlv)
+{
+ mlxsw_emad_latency_tlv_type_set(latency_tlv, MLXSW_EMAD_TLV_TYPE_LATENCY);
+ mlxsw_emad_latency_tlv_len_set(latency_tlv, MLXSW_EMAD_LATENCY_TLV_LEN);
+}
+
static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
{
char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
@@ -476,11 +499,11 @@ static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
return 0;
}
-static void mlxsw_emad_construct(struct sk_buff *skb,
+static void mlxsw_emad_construct(const struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
const struct mlxsw_reg_info *reg,
char *payload,
- enum mlxsw_core_reg_access_type type,
- u64 tid, bool enable_string_tlv)
+ enum mlxsw_core_reg_access_type type, u64 tid)
{
char *buf;
@@ -490,7 +513,12 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
buf = skb_push(skb, reg->len + sizeof(u32));
mlxsw_emad_pack_reg_tlv(buf, reg, payload);
- if (enable_string_tlv) {
+ if (mlxsw_core->emad.enable_latency_tlv) {
+ buf = skb_push(skb, MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_latency_tlv(buf);
+ }
+
+ if (mlxsw_core->emad.enable_string_tlv) {
buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
mlxsw_emad_pack_string_tlv(buf);
}
@@ -504,6 +532,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
struct mlxsw_emad_tlv_offsets {
u16 op_tlv;
u16 string_tlv;
+ u16 latency_tlv;
u16 reg_tlv;
};
@@ -514,6 +543,13 @@ static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
}
+static bool mlxsw_emad_tlv_is_latency_tlv(const char *tlv)
+{
+ u8 tlv_type = mlxsw_emad_latency_tlv_type_get(tlv);
+
+ return tlv_type == MLXSW_EMAD_TLV_TYPE_LATENCY;
+}
+
static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
{
struct mlxsw_emad_tlv_offsets *offsets =
@@ -521,6 +557,8 @@ static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
offsets->string_tlv = 0;
+ offsets->latency_tlv = 0;
+
offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
@@ -529,6 +567,11 @@ static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
offsets->string_tlv = offsets->reg_tlv;
offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
}
+
+ if (mlxsw_emad_tlv_is_latency_tlv(skb->data + offsets->reg_tlv)) {
+ offsets->latency_tlv = offsets->reg_tlv;
+ offsets->reg_tlv += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32);
+ }
}
static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
@@ -794,6 +837,32 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
EMAD, DISCARD);
+static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
+{
+ char mgir_pl[MLXSW_REG_MGIR_LEN];
+ bool string_tlv, latency_tlv;
+ int err;
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
+ if (err)
+ return err;
+
+ string_tlv = mlxsw_reg_mgir_fw_info_string_tlv_get(mgir_pl);
+ mlxsw_core->emad.enable_string_tlv = string_tlv;
+
+ latency_tlv = mlxsw_reg_mgir_fw_info_latency_tlv_get(mgir_pl);
+ mlxsw_core->emad.enable_latency_tlv = latency_tlv;
+
+ return 0;
+}
+
+static void mlxsw_emad_tlv_disable(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->emad.enable_latency_tlv = false;
+ mlxsw_core->emad.enable_string_tlv = false;
+}
+
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{
struct workqueue_struct *emad_wq;
@@ -824,10 +893,17 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (err)
goto err_trap_register;
+ err = mlxsw_emad_tlv_enable(mlxsw_core);
+ if (err)
+ goto err_emad_tlv_enable;
+
mlxsw_core->emad.use_emad = true;
return 0;
+err_emad_tlv_enable:
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
+ mlxsw_core);
err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
@@ -840,13 +916,14 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
return;
mlxsw_core->emad.use_emad = false;
+ mlxsw_emad_tlv_disable(mlxsw_core);
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
destroy_workqueue(mlxsw_core->emad_wq);
}
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
- u16 reg_len, bool enable_string_tlv)
+ u16 reg_len)
{
struct sk_buff *skb;
u16 emad_len;
@@ -854,8 +931,10 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
sizeof(u32) + mlxsw_core->driver->txhdr_len);
- if (enable_string_tlv)
+ if (mlxsw_core->emad.enable_string_tlv)
emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
+ if (mlxsw_core->emad.enable_latency_tlv)
+ emad_len += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32);
if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
return NULL;
@@ -877,7 +956,6 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
mlxsw_reg_trans_cb_t *cb,
unsigned long cb_priv, u64 tid)
{
- bool enable_string_tlv;
struct sk_buff *skb;
int err;
@@ -885,12 +963,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
tid, reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
- /* Since this can be changed during emad_reg_access, read it once and
- * use the value all the way.
- */
- enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
-
- skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
if (!skb)
return -ENOMEM;
@@ -907,8 +980,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->reg = reg;
trans->type = type;
- mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
- enable_string_tlv);
+ mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid);
mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
@@ -1171,9 +1243,9 @@ static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
return 0;
/* Don't check if devlink 'fw_load_policy' param is 'flash' */
- err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
- DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
- &value);
+ err = devl_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ &value);
if (err)
return err;
if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
@@ -1244,20 +1316,22 @@ static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
union devlink_param_value value;
int err;
- err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
- ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+ err = devl_params_register(devlink, mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
if (err)
return err;
value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
- devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ value);
return 0;
}
static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
{
- devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
- ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+ devl_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
}
static void *__dl_port(struct devlink_port *devlink_port)
@@ -1676,29 +1750,12 @@ static const struct devlink_ops mlxsw_devlink_ops = {
static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
{
- int err;
-
- err = mlxsw_core_fw_params_register(mlxsw_core);
- if (err)
- return err;
-
- if (mlxsw_core->driver->params_register) {
- err = mlxsw_core->driver->params_register(mlxsw_core);
- if (err)
- goto err_params_register;
- }
- return 0;
-
-err_params_register:
- mlxsw_core_fw_params_unregister(mlxsw_core);
- return err;
+ return mlxsw_core_fw_params_register(mlxsw_core);
}
static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
{
mlxsw_core_fw_params_unregister(mlxsw_core);
- if (mlxsw_core->driver->params_register)
- mlxsw_core->driver->params_unregister(mlxsw_core);
}
struct mlxsw_core_health_event {
@@ -2051,8 +2108,8 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
- fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
- 0, mlxsw_core);
+ fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
+ 0, mlxsw_core);
if (IS_ERR(fw_fatal)) {
dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
return PTR_ERR(fw_fatal);
@@ -2072,7 +2129,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
err_fw_fatal_config:
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
err_trap_register:
- devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+ devl_health_reporter_destroy(mlxsw_core->health.fw_fatal);
return err;
}
@@ -2085,7 +2142,7 @@ static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
/* Make sure there is no more event work scheduled */
mlxsw_core_flush_owq();
- devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+ devl_health_reporter_destroy(mlxsw_core->health.fw_fatal);
}
static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core)
@@ -2127,6 +2184,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_devlink_alloc;
}
devl_lock(devlink);
+ devl_register(devlink);
}
mlxsw_core = devlink_priv(devlink);
@@ -2210,11 +2268,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_driver_init;
}
- if (!reload) {
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ if (!reload)
devl_unlock(devlink);
- devlink_register(devlink);
- }
return 0;
err_driver_init:
@@ -2246,6 +2301,7 @@ err_register_resources:
err_bus_init:
mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
+ devl_unregister(devlink);
devl_unlock(devlink);
devlink_free(devlink);
}
@@ -2284,10 +2340,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (!reload) {
- devlink_unregister(devlink);
+ if (!reload)
devl_lock(devlink);
- }
if (devlink_is_reload_failed(devlink)) {
if (!reload)
@@ -2316,6 +2370,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
+ devl_unregister(devlink);
devl_unlock(devlink);
devlink_free(devlink);
}
@@ -2325,6 +2380,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
devl_resources_unregister(devlink);
+ devl_unregister(devlink);
devl_unlock(devlink);
devlink_free(devlink);
}
@@ -3377,12 +3433,6 @@ bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
-void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
-{
- mlxsw_core->emad.enable_string_tlv = true;
-}
-EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
-
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index e0a6fcbbcb19..e5474d3e34db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -421,8 +421,6 @@ struct mlxsw_driver {
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
- int (*params_register)(struct mlxsw_core *mlxsw_core);
- void (*params_unregister)(struct mlxsw_core *mlxsw_core);
/* Notify a driver that a timestamped packet was transmitted. Driver
* is responsible for freeing the passed-in SKB.
@@ -448,8 +446,6 @@ u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core);
-void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
-
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index 83d2dc91ba2c..025e0db983fe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -1259,9 +1259,9 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
linecard->linecards = linecards;
mutex_init(&linecard->lock);
- devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core),
- slot_index, &mlxsw_linecard_ops,
- linecard);
+ devlink_linecard = devl_linecard_create(priv_to_devlink(mlxsw_core),
+ slot_index, &mlxsw_linecard_ops,
+ linecard);
if (IS_ERR(devlink_linecard))
return PTR_ERR(devlink_linecard);
@@ -1285,7 +1285,7 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
if (linecard->active)
mlxsw_linecard_active_clear(linecard);
mlxsw_linecard_bdev_del(linecard);
- devlink_linecard_destroy(linecard->devlink_linecard);
+ devl_linecard_destroy(linecard->devlink_linecard);
mutex_destroy(&linecard->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
index acfbbec52424..c51a61aa19b7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/emad.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -21,6 +21,7 @@ enum {
MLXSW_EMAD_TLV_TYPE_OP,
MLXSW_EMAD_TLV_TYPE_STRING,
MLXSW_EMAD_TLV_TYPE_REG,
+ MLXSW_EMAD_TLV_TYPE_LATENCY,
};
/* OP TLV */
@@ -90,6 +91,9 @@ enum {
/* STRING TLV */
#define MLXSW_EMAD_STRING_TLV_LEN 33 /* Length in u32 */
+/* LATENCY TLV */
+#define MLXSW_EMAD_LATENCY_TLV_LEN 7 /* Length in u32 */
+
/* END TLV */
#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index f2d6f8654e04..8165bf31a99a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -10009,6 +10009,18 @@ MLXSW_REG_DEFINE(mgir, MLXSW_REG_MGIR_ID, MLXSW_REG_MGIR_LEN);
*/
MLXSW_ITEM32(reg, mgir, hw_info_device_hw_revision, 0x0, 16, 16);
+/* reg_mgir_fw_info_latency_tlv
+ * When set, latency-TLV is supported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_latency_tlv, 0x20, 29, 1);
+
+/* reg_mgir_fw_info_string_tlv
+ * When set, string-TLV is supported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_string_tlv, 0x20, 28, 1);
+
#define MLXSW_REG_MGIR_FW_INFO_PSID_SIZE 16
/* reg_mgir_fw_info_psid
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f5b2d965d476..a8f94b7544ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3092,7 +3092,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->bus_info = mlxsw_bus_info;
mlxsw_sp_parsing_init(mlxsw_sp);
- mlxsw_core_emad_string_tlv_enable(mlxsw_core);
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
@@ -3862,62 +3861,6 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
return 0;
}
-static int
-mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-
- ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
- return 0;
-}
-
-static int
-mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-
- return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
-}
-
-static const struct devlink_param mlxsw_sp2_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
- "acl_region_rehash_interval",
- DEVLINK_PARAM_TYPE_U32,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlxsw_sp_params_acl_region_rehash_intrvl_get,
- mlxsw_sp_params_acl_region_rehash_intrvl_set,
- NULL),
-};
-
-static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
-{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- union devlink_param_value value;
- int err;
-
- err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
- ARRAY_SIZE(mlxsw_sp2_devlink_params));
- if (err)
- return err;
-
- value.vu32 = 0;
- devlink_param_driverinit_value_set(devlink,
- MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
- value);
- return 0;
-}
-
-static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
-{
- devlink_params_unregister(priv_to_devlink(mlxsw_core),
- mlxsw_sp2_devlink_params,
- ARRAY_SIZE(mlxsw_sp2_devlink_params));
-}
-
static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port)
{
@@ -3995,8 +3938,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp2_params_register,
- .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
@@ -4034,8 +3975,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp2_params_register,
- .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
@@ -4071,8 +4010,6 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp2_params_register,
- .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp4_config_profile,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bbc73324451d..4c22f8004514 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -973,6 +973,7 @@ enum mlxsw_sp_acl_profile {
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl);
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
@@ -1096,8 +1097,6 @@ mlxsw_sp_acl_act_cookie_lookup(struct mlxsw_sp *mlxsw_sp, u32 cookie_index)
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
-u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val);
struct mlxsw_sp_acl_mangle_action;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 6c5af018546f..0423ac262d89 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -40,6 +40,11 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
+struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl)
+{
+ return &acl->tcam;
+}
+
struct mlxsw_sp_acl_ruleset_ht_key {
struct mlxsw_sp_flow_block *block;
u32 chain_index;
@@ -1099,22 +1104,6 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
kfree(acl);
}
-u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-
- return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
- &acl->tcam);
-}
-
-int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
-{
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-
- return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
- &acl->tcam, val);
-}
-
struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
.act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 3b9ba8fa247a..d50786b0a6ce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <net/devlink.h>
#include <trace/events/mlxsw.h>
#include "reg.h"
@@ -29,67 +30,6 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
-int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- u64 max_tcam_regions;
- u64 max_regions;
- u64 max_groups;
- int err;
-
- mutex_init(&tcam->lock);
- tcam->vregion_rehash_intrvl =
- MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
- INIT_LIST_HEAD(&tcam->vregion_list);
-
- max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- ACL_MAX_TCAM_REGIONS);
- max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
-
- /* Use 1:1 mapping between ACL region and TCAM region */
- if (max_tcam_regions < max_regions)
- max_regions = max_tcam_regions;
-
- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
- if (!tcam->used_regions)
- return -ENOMEM;
- tcam->max_regions = max_regions;
-
- max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
- if (!tcam->used_groups) {
- err = -ENOMEM;
- goto err_alloc_used_groups;
- }
- tcam->max_groups = max_groups;
- tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- ACL_MAX_GROUP_SIZE);
-
- err = ops->init(mlxsw_sp, tcam->priv, tcam);
- if (err)
- goto err_tcam_init;
-
- return 0;
-
-err_tcam_init:
- bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
- bitmap_free(tcam->used_regions);
- return err;
-}
-
-void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
-
- mutex_destroy(&tcam->lock);
- ops->fini(mlxsw_sp, tcam->priv);
- bitmap_free(tcam->used_groups);
- bitmap_free(tcam->used_regions);
-}
-
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 *priority, bool fillup_priority)
@@ -893,41 +833,6 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
kfree(vregion);
}
-u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- u32 vregion_rehash_intrvl;
-
- if (WARN_ON(!ops->region_rehash_hints_get))
- return 0;
- vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
- return vregion_rehash_intrvl;
-}
-
-int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam,
- u32 val)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- struct mlxsw_sp_acl_tcam_vregion *vregion;
-
- if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
- return -EINVAL;
- if (WARN_ON(!ops->region_rehash_hints_get))
- return -EOPNOTSUPP;
- tcam->vregion_rehash_intrvl = val;
- mutex_lock(&tcam->lock);
- list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
- if (val)
- mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
- else
- cancel_delayed_work_sync(&vregion->rehash.dw);
- }
- mutex_unlock(&tcam->lock);
- return 0;
-}
-
static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
@@ -1542,6 +1447,153 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
}
+static int
+mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp_acl_tcam *tcam;
+ struct mlxsw_sp *mlxsw_sp;
+
+ mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
+ ctx->val.vu32 = tcam->vregion_rehash_intrvl;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ struct mlxsw_sp_acl_tcam *tcam;
+ struct mlxsw_sp *mlxsw_sp;
+ u32 val = ctx->val.vu32;
+
+ if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
+ return -EINVAL;
+
+ mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
+ tcam->vregion_rehash_intrvl = val;
+ mutex_lock(&tcam->lock);
+ list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
+ if (val)
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
+ else
+ cancel_delayed_work_sync(&vregion->rehash.dw);
+ }
+ mutex_unlock(&tcam->lock);
+ return 0;
+}
+
+static const struct devlink_param mlxsw_sp_acl_tcam_rehash_params[] = {
+ DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
+ "acl_region_rehash_interval",
+ DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlxsw_sp_acl_tcam_region_rehash_intrvl_get,
+ mlxsw_sp_acl_tcam_region_rehash_intrvl_set,
+ NULL),
+};
+
+static int mlxsw_sp_acl_tcam_rehash_params_register(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
+ return 0;
+
+ return devl_params_register(devlink, mlxsw_sp_acl_tcam_rehash_params,
+ ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
+}
+
+static void
+mlxsw_sp_acl_tcam_rehash_params_unregister(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
+ return;
+
+ devl_params_unregister(devlink, mlxsw_sp_acl_tcam_rehash_params,
+ ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
+}
+
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ u64 max_tcam_regions;
+ u64 max_regions;
+ u64 max_groups;
+ int err;
+
+ mutex_init(&tcam->lock);
+ tcam->vregion_rehash_intrvl =
+ MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
+ INIT_LIST_HEAD(&tcam->vregion_list);
+
+ err = mlxsw_sp_acl_tcam_rehash_params_register(mlxsw_sp);
+ if (err)
+ goto err_rehash_params_register;
+
+ max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_TCAM_REGIONS);
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+
+ /* Use 1:1 mapping between ACL region and TCAM region */
+ if (max_tcam_regions < max_regions)
+ max_regions = max_tcam_regions;
+
+ tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
+ if (!tcam->used_regions) {
+ err = -ENOMEM;
+ goto err_alloc_used_regions;
+ }
+ tcam->max_regions = max_regions;
+
+ max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
+ tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
+ if (!tcam->used_groups) {
+ err = -ENOMEM;
+ goto err_alloc_used_groups;
+ }
+ tcam->max_groups = max_groups;
+ tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_GROUP_SIZE);
+
+ err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ if (err)
+ goto err_tcam_init;
+
+ return 0;
+
+err_tcam_init:
+ bitmap_free(tcam->used_groups);
+err_alloc_used_groups:
+ bitmap_free(tcam->used_regions);
+err_alloc_used_regions:
+ mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
+err_rehash_params_register:
+ mutex_destroy(&tcam->lock);
+ return err;
+}
+
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->fini(mlxsw_sp, tcam->priv);
+ bitmap_free(tcam->used_groups);
+ bitmap_free(tcam->used_regions);
+ mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
+ mutex_destroy(&tcam->lock);
+}
+
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
MLXSW_AFK_ELEMENT_DMAC_32_47,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index edbbc89e7a71..462bf448497d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -29,11 +29,6 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
-u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam);
-int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam,
- u32 val);
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 *priority, bool fillup_priority);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index e91fb205e0b4..594cdcb90b3d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -103,7 +103,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
}
ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
- act->cookie, extack);
+ act->user_cookie, extack);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 534840f9a7ca..7e0871b631e4 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -792,7 +792,7 @@ static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
!(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
}
-static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
+static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index)
{
struct lan743x_adapter *adapter = bus->priv;
u32 val, mii_access;
@@ -814,8 +814,8 @@ static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
return (int)(val & 0xFFFF);
}
-static int lan743x_mdiobus_write(struct mii_bus *bus,
- int phy_id, int index, u16 regval)
+static int lan743x_mdiobus_write_c22(struct mii_bus *bus,
+ int phy_id, int index, u16 regval)
{
struct lan743x_adapter *adapter = bus->priv;
u32 val, mii_access;
@@ -835,12 +835,10 @@ static int lan743x_mdiobus_write(struct mii_bus *bus,
return ret;
}
-static u32 lan743x_mac_mmd_access(int id, int index, int op)
+static u32 lan743x_mac_mmd_access(int id, int dev_addr, int op)
{
- u16 dev_addr;
u32 ret;
- dev_addr = (index >> 16) & 0x1f;
ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
MAC_MII_ACC_PHY_ADDR_MASK_;
ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
@@ -858,7 +856,8 @@ static u32 lan743x_mac_mmd_access(int id, int index, int op)
return ret;
}
-static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index)
+static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id,
+ int dev_addr, int index)
{
struct lan743x_adapter *adapter = bus->priv;
u32 mmd_access;
@@ -868,32 +867,30 @@ static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index)
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
- if (index & MII_ADDR_C45) {
- /* Load Register Address */
- lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
- mmd_access = lan743x_mac_mmd_access(phy_id, index,
- MMD_ACCESS_ADDRESS);
- lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
- ret = lan743x_mac_mii_wait_till_not_busy(adapter);
- if (ret < 0)
- return ret;
- /* Read Data */
- mmd_access = lan743x_mac_mmd_access(phy_id, index,
- MMD_ACCESS_READ);
- lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
- ret = lan743x_mac_mii_wait_till_not_busy(adapter);
- if (ret < 0)
- return ret;
- ret = lan743x_csr_read(adapter, MAC_MII_DATA);
- return (int)(ret & 0xFFFF);
- }
- ret = lan743x_mdiobus_read(bus, phy_id, index);
- return ret;
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, index);
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* Read Data */
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_READ);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_csr_read(adapter, MAC_MII_DATA);
+ return (int)(ret & 0xFFFF);
}
-static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
- int phy_id, int index, u16 regval)
+static int lan743x_mdiobus_write_c45(struct mii_bus *bus, int phy_id,
+ int dev_addr, int index, u16 regval)
{
struct lan743x_adapter *adapter = bus->priv;
u32 mmd_access;
@@ -903,26 +900,23 @@ static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
- if (index & MII_ADDR_C45) {
- /* Load Register Address */
- lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
- mmd_access = lan743x_mac_mmd_access(phy_id, index,
- MMD_ACCESS_ADDRESS);
- lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
- ret = lan743x_mac_mii_wait_till_not_busy(adapter);
- if (ret < 0)
- return ret;
- /* Write Data */
- lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
- mmd_access = lan743x_mac_mmd_access(phy_id, index,
- MMD_ACCESS_WRITE);
- lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
- ret = lan743x_mac_mii_wait_till_not_busy(adapter);
- } else {
- ret = lan743x_mdiobus_write(bus, phy_id, index, regval);
- }
- return ret;
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)index);
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* Write Data */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_WRITE);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+
+ return lan743x_mac_mii_wait_till_not_busy(adapter);
}
static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
@@ -1424,14 +1418,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
data = lan743x_csr_read(adapter, MAC_CR);
- /* set interface mode */
- if (phy_interface_is_rgmii(phydev))
- /* RGMII */
- data &= ~MAC_CR_MII_EN_;
- else
- /* GMII */
- data |= MAC_CR_MII_EN_;
-
/* set duplex mode */
if (phydev->duplex)
data |= MAC_CR_DPX_;
@@ -1483,10 +1469,33 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
netdev->phydev = NULL;
}
+static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
+{
+ u32 id_rev;
+ u32 data;
+
+ data = lan743x_csr_read(adapter, MAC_CR);
+ id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (adapter->is_pci11x1x && adapter->is_sgmii_en)
+ adapter->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ else if (id_rev == ID_REV_ID_LAN7430_)
+ adapter->phy_interface = PHY_INTERFACE_MODE_GMII;
+ else if ((id_rev == ID_REV_ID_LAN7431_) && (data & MAC_CR_MII_EN_))
+ adapter->phy_interface = PHY_INTERFACE_MODE_MII;
+ else
+ adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
+}
+
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct lan743x_phy *phy = &adapter->phy;
+ struct fixed_phy_status fphy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ };
struct phy_device *phydev;
int ret = -EIO;
@@ -1497,17 +1506,25 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
if (!phydev) {
/* try internal phy */
phydev = phy_find_first(adapter->mdiobus);
- if (!phydev)
- goto return_error;
+ if (!phydev) {
+ if ((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
+ ID_REV_ID_LAN7431_) {
+ phydev = fixed_phy_register(PHY_POLL,
+ &fphy_status, NULL);
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "No PHY/fixed_PHY found\n");
+ return -EIO;
+ }
+ } else {
+ goto return_error;
+ }
+ }
- if (adapter->is_pci11x1x)
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- PHY_INTERFACE_MODE_RGMII);
- else
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- PHY_INTERFACE_MODE_GMII);
+ lan743x_phy_interface_select(adapter);
+
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ adapter->phy_interface);
if (ret)
goto return_error;
}
@@ -3285,9 +3302,10 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"SGMII operation\n");
- adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
- adapter->mdiobus->read = lan743x_mdiobus_c45_read;
- adapter->mdiobus->write = lan743x_mdiobus_c45_write;
+ adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+ adapter->mdiobus->write = lan743x_mdiobus_write_c22;
+ adapter->mdiobus->read_c45 = lan743x_mdiobus_read_c45;
+ adapter->mdiobus->write_c45 = lan743x_mdiobus_write_c45;
adapter->mdiobus->name = "lan743x-mdiobus-c45";
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus-c45\n");
@@ -3299,16 +3317,15 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
netif_dbg(adapter, drv, adapter->netdev,
"RGMII operation\n");
// Only C22 support when RGMII I/F
- adapter->mdiobus->probe_capabilities = MDIOBUS_C22;
- adapter->mdiobus->read = lan743x_mdiobus_read;
- adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+ adapter->mdiobus->write = lan743x_mdiobus_write_c22;
adapter->mdiobus->name = "lan743x-mdiobus";
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus\n");
}
} else {
- adapter->mdiobus->read = lan743x_mdiobus_read;
- adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+ adapter->mdiobus->write = lan743x_mdiobus_write_c22;
adapter->mdiobus->name = "lan743x-mdiobus";
netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 8438c3dbcf36..52609fc13ad9 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -1042,6 +1042,7 @@ struct lan743x_adapter {
#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
u32 flags;
u32 hw_cfg;
+ phy_interface_t phy_interface;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 56afd694f3c7..7b0cda4ffa6b 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -15,5 +15,7 @@ lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_xdp.o lan966x_vcap_impl.o lan966x_vcap_ag_api.o \
lan966x_tc_flower.o lan966x_goto.o
+lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
+
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
index bf0cfe24a8fc..9b18156eea1a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
@@ -4,7 +4,7 @@
#include "vcap_api_client.h"
int lan966x_goto_port_add(struct lan966x_port *port,
- struct flow_action_entry *act,
+ int from_cid, int to_cid,
unsigned long goto_id,
struct netlink_ext_ack *extack)
{
@@ -12,7 +12,7 @@ int lan966x_goto_port_add(struct lan966x_port *port,
int err;
err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev,
- act->chain_index, goto_id,
+ from_cid, to_cid, goto_id,
true);
if (err == -EFAULT) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported goto chain");
@@ -29,8 +29,6 @@ int lan966x_goto_port_add(struct lan966x_port *port,
return err;
}
- port->tc.goto_id = goto_id;
-
return 0;
}
@@ -41,14 +39,12 @@ int lan966x_goto_port_del(struct lan966x_port *port,
struct lan966x *lan966x = port->lan966x;
int err;
- err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, 0,
+ err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, 0, 0,
goto_id, false);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Could not disable VCAP lookups");
return err;
}
- port->tc.goto_id = 0;
-
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 580c91d24a52..8b89de0541ff 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -823,6 +823,11 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink = phylink;
+ if (lan966x->fdma)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
err = register_netdev(dev);
if (err) {
dev_err(lan966x->dev, "register_netdev failed\n");
@@ -1035,6 +1040,8 @@ static int lan966x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lan966x);
lan966x->dev = &pdev->dev;
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
if (!device_get_mac_address(&pdev->dev, mac_addr)) {
ether_addr_copy(lan966x->base_mac, mac_addr);
} else {
@@ -1223,6 +1230,8 @@ static int lan966x_remove(struct platform_device *pdev)
lan966x_fdb_deinit(lan966x);
lan966x_ptp_deinit(lan966x);
+ debugfs_remove_recursive(lan966x->debugfs_root);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 3491f1961835..49f5159afbf3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -3,6 +3,7 @@
#ifndef __LAN966X_MAIN_H__
#define __LAN966X_MAIN_H__
+#include <linux/debugfs.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
@@ -14,6 +15,9 @@
#include <net/pkt_sched.h>
#include <net/switchdev.h>
+#include <vcap_api.h>
+#include <vcap_api_client.h>
+
#include "lan966x_regs.h"
#include "lan966x_ifh.h"
@@ -128,6 +132,13 @@ enum LAN966X_PORT_MASK_MODE {
LAN966X_PMM_REDIRECT,
};
+enum vcap_is2_port_sel_ipv6 {
+ VCAP_IS2_PS_IPV6_TCPUDP_OTHER,
+ VCAP_IS2_PS_IPV6_STD,
+ VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER,
+ VCAP_IS2_PS_IPV6_MAC_ETYPE,
+};
+
struct lan966x_port;
struct lan966x_db {
@@ -315,6 +326,9 @@ struct lan966x {
/* vcap */
struct vcap_control *vcap_ctrl;
+
+ /* debugfs */
+ struct dentry *debugfs_root;
};
struct lan966x_port_config {
@@ -332,7 +346,6 @@ struct lan966x_port_tc {
unsigned long police_id;
unsigned long ingress_mirror_id;
unsigned long egress_mirror_id;
- unsigned long goto_id;
struct flow_stats police_stat;
struct flow_stats mirror_stat;
};
@@ -602,12 +615,25 @@ static inline bool lan966x_xdp_port_present(struct lan966x_port *port)
int lan966x_vcap_init(struct lan966x *lan966x);
void lan966x_vcap_deinit(struct lan966x *lan966x);
+#if defined(CONFIG_DEBUG_FS)
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out);
+#else
+static inline int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+#endif
int lan966x_tc_flower(struct lan966x_port *port,
- struct flow_cls_offload *f);
+ struct flow_cls_offload *f,
+ bool ingress);
int lan966x_goto_port_add(struct lan966x_port *port,
- struct flow_action_entry *act,
+ int from_cid, int to_cid,
unsigned long goto_id,
struct netlink_ext_ack *extack);
int lan966x_goto_port_del(struct lan966x_port *port,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index a8348437dd87..931e37b9a0ad 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -83,8 +83,7 @@ static int lan966x_ptp_add_trap(struct lan966x_port *port,
if (err)
goto free_rule;
- err = vcap_set_rule_set_actionset(vrule, VCAP_AFS_BASE_TYPE);
- err |= vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
+ err = vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, LAN966X_PMM_REPLACE);
err |= vcap_val_rule(vrule, proto);
if (err)
@@ -524,9 +523,9 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
if (WARN_ON(!skb_match))
continue;
- spin_lock(&lan966x->ptp_ts_id_lock);
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
lan966x->ptp_skbs--;
- spin_unlock(&lan966x->ptp_ts_id_lock);
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
/* Get the h/w timestamp */
lan966x_get_hwtimestamp(lan966x, &ts, delay);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
index 01072121c999..cf0cc7562d04 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include "lan966x_main.h"
@@ -70,7 +71,7 @@ static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSMATCHALL:
return lan966x_tc_matchall(port, type_data, ingress);
case TC_SETUP_CLSFLOWER:
- return lan966x_tc_flower(port, type_data);
+ return lan966x_tc_flower(port, type_data, ingress);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
index ba3fa917d6b7..f960727ecaee 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -3,53 +3,135 @@
#include "lan966x_main.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#include "vcap_tc.h"
-struct lan966x_tc_flower_parse_usage {
- struct flow_cls_offload *f;
- struct flow_rule *frule;
- struct vcap_rule *vrule;
- unsigned int used_keys;
- u16 l3_proto;
-};
+static bool lan966x_tc_is_known_etype(u16 etype)
+{
+ switch (etype) {
+ case ETH_P_ALL:
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ return true;
+ }
+
+ return false;
+}
-static int lan966x_tc_flower_handler_ethaddr_usage(struct lan966x_tc_flower_parse_usage *st)
+static int
+lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
- enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
- enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
- struct flow_match_eth_addrs match;
- struct vcap_u48_key smac, dmac;
+ struct flow_match_control match;
int err = 0;
- flow_rule_match_eth_addrs(st->frule, &match);
-
- if (!is_zero_ether_addr(match.mask->src)) {
- vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
- vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
+ flow_rule_match_control(st->frule, &match);
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_1);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_0);
if (err)
goto out;
}
- if (!is_zero_ether_addr(match.mask->dst)) {
- vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
- vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (match.key->flags & FLOW_DIS_FIRST_FRAG)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_0);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_1);
if (err)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
return err;
out:
- NL_SET_ERR_MSG_MOD(st->f->common.extack, "eth_addr parse error");
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
return err;
}
static int
-(*lan966x_tc_flower_handlers_usage[])(struct lan966x_tc_flower_parse_usage *st) = {
- [FLOW_DISSECTOR_KEY_ETH_ADDRS] = lan966x_tc_flower_handler_ethaddr_usage,
+lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_basic match;
+ int err = 0;
+
+ flow_rule_match_basic(st->frule, &match);
+ if (match.mask->n_proto) {
+ st->l3_proto = be16_to_cpu(match.key->n_proto);
+ if (!lan966x_tc_is_known_etype(st->l3_proto)) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IP) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+ }
+ if (match.mask->ip_proto) {
+ st->l4_proto = match.key->ip_proto;
+
+ if (st->l4_proto == IPPROTO_TCP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l4_proto == IPPROTO_UDP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ } else {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP_PROTO,
+ st->l4_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
+ return err;
+}
+
+static int
+lan966x_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ return vcap_tc_flower_handler_vlan_usage(st,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_8021Q_PCP_CLS);
+}
+
+static int
+(*lan966x_tc_flower_handlers_usage[])(struct vcap_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
+ [FLOW_DISSECTOR_KEY_CONTROL] = lan966x_tc_flower_handler_control_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_BASIC] = lan966x_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_VLAN] = lan966x_tc_flower_handler_vlan_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
};
static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
@@ -57,8 +139,8 @@ static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
struct vcap_rule *vrule,
u16 *l3_proto)
{
- struct lan966x_tc_flower_parse_usage state = {
- .f = f,
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = f,
.vrule = vrule,
.l3_proto = ETH_P_ALL,
};
@@ -82,8 +164,9 @@ static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
}
static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
+ struct net_device *dev,
struct flow_cls_offload *fco,
- struct vcap_admin *admin)
+ bool ingress)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
struct flow_action_entry *actent, *last_actent = NULL;
@@ -109,21 +192,24 @@ static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
last_actent = actent; /* Save last action for later check */
}
- /* Check that last action is a goto */
- if (last_actent->id != FLOW_ACTION_GOTO) {
+ /* Check that last action is a goto
+ * The last chain/lookup does not need to have goto action
+ */
+ if (last_actent->id == FLOW_ACTION_GOTO) {
+ /* Check if the destination chain is in one of the VCAPs */
+ if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
+ last_actent->chain_index)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid goto chain");
+ return -EINVAL;
+ }
+ } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
+ ingress)) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Last action must be 'goto'");
return -EINVAL;
}
- /* Check if the goto chain is in the next lookup */
- if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
- last_actent->chain_index)) {
- NL_SET_ERR_MSG_MOD(fco->common.extack,
- "Invalid goto chain");
- return -EINVAL;
- }
-
/* Catch unsupported combinations of actions */
if (action_mask & BIT(FLOW_ACTION_TRAP) &&
action_mask & BIT(FLOW_ACTION_ACCEPT)) {
@@ -137,7 +223,8 @@ static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
static int lan966x_tc_flower_add(struct lan966x_port *port,
struct flow_cls_offload *f,
- struct vcap_admin *admin)
+ struct vcap_admin *admin,
+ bool ingress)
{
struct flow_action_entry *act;
u16 l3_proto = ETH_P_ALL;
@@ -145,8 +232,8 @@ static int lan966x_tc_flower_add(struct lan966x_port *port,
struct vcap_rule *vrule;
int err, idx;
- err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl, f,
- admin);
+ err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl,
+ port->dev, f, ingress);
if (err)
return err;
@@ -174,8 +261,6 @@ static int lan966x_tc_flower_add(struct lan966x_port *port,
0);
err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
LAN966X_PMM_REPLACE);
- err |= vcap_set_rule_set_actionset(vrule,
- VCAP_AFS_BASE_TYPE);
if (err)
goto out;
@@ -229,8 +314,27 @@ static int lan966x_tc_flower_del(struct lan966x_port *port,
return err;
}
+static int lan966x_tc_flower_stats(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin)
+{
+ struct vcap_counter count = {};
+ int err;
+
+ err = vcap_get_rule_count_by_cookie(port->lan966x->vcap_ctrl,
+ &count, f->cookie);
+ if (err)
+ return err;
+
+ flow_stats_update(&f->stats, 0x0, count.value, 0, 0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ return err;
+}
+
int lan966x_tc_flower(struct lan966x_port *port,
- struct flow_cls_offload *f)
+ struct flow_cls_offload *f,
+ bool ingress)
{
struct vcap_admin *admin;
@@ -243,9 +347,11 @@ int lan966x_tc_flower(struct lan966x_port *port,
switch (f->command) {
case FLOW_CLS_REPLACE:
- return lan966x_tc_flower_add(port, f, admin);
+ return lan966x_tc_flower_add(port, f, admin, ingress);
case FLOW_CLS_DESTROY:
return lan966x_tc_flower_del(port, f, admin);
+ case FLOW_CLS_STATS:
+ return lan966x_tc_flower_stats(port, f, admin);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
index a539abaad9b6..20627323d656 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
@@ -24,7 +24,8 @@ static int lan966x_tc_matchall_add(struct lan966x_port *port,
return lan966x_mirror_port_add(port, act, f->cookie,
ingress, f->common.extack);
case FLOW_ACTION_GOTO:
- return lan966x_goto_port_add(port, act, f->cookie,
+ return lan966x_goto_port_add(port, f->common.chain_index,
+ act->chain_index, f->cookie,
f->common.extack);
default:
NL_SET_ERR_MSG_MOD(f->common.extack,
@@ -46,13 +47,8 @@ static int lan966x_tc_matchall_del(struct lan966x_port *port,
f->cookie == port->tc.egress_mirror_id) {
return lan966x_mirror_port_del(port, ingress,
f->common.extack);
- } else if (f->cookie == port->tc.goto_id) {
- return lan966x_goto_port_del(port, f->cookie,
- f->common.extack);
} else {
- NL_SET_ERR_MSG_MOD(f->common.extack,
- "Unsupported action");
- return -EOPNOTSUPP;
+ return lan966x_goto_port_del(port, f->cookie, f->common.extack);
}
return 0;
@@ -80,12 +76,6 @@ int lan966x_tc_matchall(struct lan966x_port *port,
struct tc_cls_matchall_offload *f,
bool ingress)
{
- if (!tc_cls_can_offload_and_chain0(port->dev, &f->common)) {
- NL_SET_ERR_MSG_MOD(f->common.extack,
- "Only chain zero is supported");
- return -EOPNOTSUPP;
- }
-
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return lan966x_tc_matchall_add(port, f, ingress);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
new file mode 100644
index 000000000000..7a0db58f5513
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "lan966x_vcap_ag_api.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+static void lan966x_vcap_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_VCAP_S2_CFG_ENA_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ for (int l = 0; l < admin->lookups; ++l) {
+ out->prf(out->dst, "\n Lookup %d: ", l);
+
+ out->prf(out->dst, "\n snap: ");
+ if (ANA_VCAP_S2_CFG_SNAP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_llc");
+ else
+ out->prf(out->dst, "mac_snap");
+
+ out->prf(out->dst, "\n oam: ");
+ if (ANA_VCAP_S2_CFG_OAM_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_oam");
+
+ out->prf(out->dst, "\n arp: ");
+ if (ANA_VCAP_S2_CFG_ARP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_arp");
+
+ out->prf(out->dst, "\n ipv4_other: ");
+ if (ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ip4_other");
+
+ out->prf(out->dst, "\n ipv4_tcp_udp: ");
+ if (ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ipv4_tcp_udp");
+
+ out->prf(out->dst, "\n ipv6: ");
+ switch (ANA_VCAP_S2_CFG_IP6_CFG_GET(val) & (0x3 << l)) {
+ case VCAP_IS2_PS_IPV6_TCPUDP_OTHER:
+ out->prf(out->dst, "ipv6_tcp_udp ipv6_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_STD:
+ out->prf(out->dst, "ipv6_std");
+ break;
+ case VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER:
+ out->prf(out->dst, "ipv4_tcp_udp ipv4_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ }
+ }
+
+ out->prf(out->dst, "\n");
+}
+
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct vcap_info *vcap;
+ struct vcap_control *vctrl;
+
+ vctrl = lan966x->vcap_ctrl;
+ vcap = &vctrl->vcaps[admin->vtype];
+
+ out->prf(out->dst, "%s:\n", vcap->name);
+ lan966x_vcap_port_keys(port, admin, out);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index a54c0426a35f..68f9d69fd37b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -4,18 +4,12 @@
#include "lan966x_vcap_ag_api.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#include "vcap_api_debugfs.h"
#define STREAMSIZE (64 * 4)
#define LAN966X_IS2_LOOKUPS 2
-enum vcap_is2_port_sel_ipv6 {
- VCAP_IS2_PS_IPV6_TCPUDP_OTHER,
- VCAP_IS2_PS_IPV6_STD,
- VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER,
- VCAP_IS2_PS_IPV6_MAC_ETYPE,
-};
-
static struct lan966x_vcap_inst {
enum vcap_type vtype; /* type of vcap */
int tgt_inst; /* hardware instance number */
@@ -23,6 +17,7 @@ static struct lan966x_vcap_inst {
int first_cid; /* first chain id in this vcap */
int last_cid; /* last chain id in this vcap */
int count; /* number of available addresses */
+ bool ingress; /* is vcap in the ingress path */
} lan966x_vcap_inst_cfg[] = {
{
.vtype = VCAP_TYPE_IS2, /* IS2-0 */
@@ -31,6 +26,7 @@ static struct lan966x_vcap_inst {
.first_cid = LAN966X_VCAP_CID_IS2_L0,
.last_cid = LAN966X_VCAP_CID_IS2_MAX,
.count = 256,
+ .ingress = true,
},
};
@@ -383,27 +379,6 @@ static void lan966x_vcap_move(struct net_device *dev,
lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
}
-static int lan966x_vcap_port_info(struct net_device *dev,
- struct vcap_admin *admin,
- struct vcap_output_print *out)
-{
- return 0;
-}
-
-static int lan966x_vcap_enable(struct net_device *dev,
- struct vcap_admin *admin,
- bool enable)
-{
- struct lan966x_port *port = netdev_priv(dev);
- struct lan966x *lan966x = port->lan966x;
-
- lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(enable),
- ANA_VCAP_S2_CFG_ENA,
- lan966x, ANA_VCAP_S2_CFG(port->chip_port));
-
- return 0;
-}
-
static struct vcap_operations lan966x_vcap_ops = {
.validate_keyset = lan966x_vcap_validate_keyset,
.add_default_fields = lan966x_vcap_add_default_fields,
@@ -414,7 +389,6 @@ static struct vcap_operations lan966x_vcap_ops = {
.update = lan966x_vcap_update,
.move = lan966x_vcap_move,
.port_info = lan966x_vcap_port_info,
- .enable = lan966x_vcap_enable,
};
static void lan966x_vcap_admin_free(struct vcap_admin *admin)
@@ -446,6 +420,7 @@ lan966x_vcap_admin_alloc(struct lan966x *lan966x, struct vcap_control *ctrl,
admin->vtype = cfg->vtype;
admin->vinst = 0;
+ admin->ingress = cfg->ingress;
admin->w32be = true;
admin->tgt_inst = cfg->tgt_inst;
@@ -498,6 +473,7 @@ int lan966x_vcap_init(struct lan966x *lan966x)
struct lan966x_vcap_inst *cfg;
struct vcap_control *ctrl;
struct vcap_admin *admin;
+ struct dentry *dir;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -521,6 +497,18 @@ int lan966x_vcap_init(struct lan966x *lan966x)
list_add_tail(&admin->list, &ctrl->list);
}
+ dir = vcap_debugfs(lan966x->dev, lan966x->debugfs_root, ctrl);
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (lan966x->ports[p]) {
+ vcap_port_debugfs(lan966x->dev, dir, ctrl,
+ lan966x->ports[p]->dev);
+
+ lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(true),
+ ANA_VCAP_S2_CFG_ENA, lan966x,
+ ANA_VCAP_S2_CFG(lan966x->ports[p]->chip_port));
+ }
+ }
+
lan966x->vcap_ctrl = ctrl;
return 0;
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index d0ed7090aa54..1cb1cc3f1a85 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -9,7 +9,8 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
- sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o sparx5_tc_matchall.o
+ sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \
+ sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o sparx5_psfp.o
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
index 74abb946b2a3..871a3e62f852 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
@@ -133,12 +133,17 @@ static bool sparx5_dcb_apptrust_contains(int portno, u8 selector)
static int sparx5_dcb_app_update(struct net_device *dev)
{
+ struct dcb_ieee_app_prio_map dscp_rewr_map = {0};
+ struct dcb_rewr_prio_pcp_map pcp_rewr_map = {0};
struct sparx5_port *port = netdev_priv(dev);
struct sparx5_port_qos_dscp_map *dscp_map;
struct sparx5_port_qos_pcp_map *pcp_map;
struct sparx5_port_qos qos = {0};
struct dcb_app app_itr = {0};
int portno = port->portno;
+ bool dscp_rewr = false;
+ bool pcp_rewr = false;
+ u16 dscp;
int i;
dscp_map = &qos.dscp.map;
@@ -163,31 +168,72 @@ static int sparx5_dcb_app_update(struct net_device *dev)
pcp_map->map[i] = dcb_getapp(dev, &app_itr);
}
+ /* Get pcp rewrite mapping */
+ dcb_getrewr_prio_pcp_mask_map(dev, &pcp_rewr_map);
+ for (i = 0; i < ARRAY_SIZE(pcp_rewr_map.map); i++) {
+ if (!pcp_rewr_map.map[i])
+ continue;
+ pcp_rewr = true;
+ qos.pcp_rewr.map.map[i] = fls(pcp_rewr_map.map[i]) - 1;
+ }
+
+ /* Get dscp rewrite mapping */
+ dcb_getrewr_prio_dscp_mask_map(dev, &dscp_rewr_map);
+ for (i = 0; i < ARRAY_SIZE(dscp_rewr_map.map); i++) {
+ if (!dscp_rewr_map.map[i])
+ continue;
+
+ /* The rewrite table of the switch has 32 entries; one for each
+ * priority for each DP level. Currently, the rewrite map does
+ * not indicate DP level, so we map classified QoS class to
+ * classified DSCP, for each classified DP level. Rewrite of
+ * DSCP is only enabled, if we have active mappings.
+ */
+ dscp_rewr = true;
+ dscp = fls64(dscp_rewr_map.map[i]) - 1;
+ qos.dscp_rewr.map.map[i] = dscp; /* DP 0 */
+ qos.dscp_rewr.map.map[i + 8] = dscp; /* DP 1 */
+ qos.dscp_rewr.map.map[i + 16] = dscp; /* DP 2 */
+ qos.dscp_rewr.map.map[i + 24] = dscp; /* DP 3 */
+ }
+
/* Enable use of pcp for queue classification ? */
if (sparx5_dcb_apptrust_contains(portno, DCB_APP_SEL_PCP)) {
qos.pcp.qos_enable = true;
qos.pcp.dp_enable = qos.pcp.qos_enable;
+ /* Enable rewrite of PCP and DEI if PCP is trusted *and* rewrite
+ * table is not empty.
+ */
+ if (pcp_rewr)
+ qos.pcp_rewr.enable = true;
}
/* Enable use of dscp for queue classification ? */
if (sparx5_dcb_apptrust_contains(portno, IEEE_8021QAZ_APP_SEL_DSCP)) {
qos.dscp.qos_enable = true;
qos.dscp.dp_enable = qos.dscp.qos_enable;
+ if (dscp_rewr)
+ /* Do not enable rewrite if no mappings are active, as
+ * classified DSCP will then be zero for all classified
+ * QoS class and DP combinations.
+ */
+ qos.dscp_rewr.enable = true;
}
return sparx5_port_qos_set(port, &qos);
}
-/* Set or delete dscp app entry.
+/* Set or delete DSCP app entry.
*
- * Dscp mapping is global for all ports, so set and delete app entries are
+ * DSCP mapping is global for all ports, so set and delete app entries are
* replicated for each port.
*/
-static int sparx5_dcb_ieee_dscp_setdel_app(struct net_device *dev,
- struct dcb_app *app, bool del)
+static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev,
+ struct dcb_app *app,
+ int (*setdel)(struct net_device *,
+ struct dcb_app *))
{
struct sparx5_port *port = netdev_priv(dev);
- struct dcb_app apps[SPX5_PORTS];
struct sparx5_port *port_itr;
int err, i;
@@ -195,11 +241,7 @@ static int sparx5_dcb_ieee_dscp_setdel_app(struct net_device *dev,
port_itr = port->sparx5->ports[i];
if (!port_itr)
continue;
- memcpy(&apps[i], app, sizeof(struct dcb_app));
- if (del)
- err = dcb_ieee_delapp(port_itr->ndev, &apps[i]);
- else
- err = dcb_ieee_setapp(port_itr->ndev, &apps[i]);
+ err = setdel(port_itr->ndev, app);
if (err)
return err;
}
@@ -226,7 +268,7 @@ static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
}
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
- err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, false);
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_setapp);
else
err = dcb_ieee_setapp(dev, app);
@@ -244,7 +286,7 @@ static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
int err;
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
- err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, true);
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
else
err = dcb_ieee_delapp(dev, app);
@@ -283,11 +325,60 @@ static int sparx5_dcb_getapptrust(struct net_device *dev, u8 *selectors,
return 0;
}
+static int sparx5_dcb_delrewr(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_delrewr);
+ else
+ err = dcb_delrewr(dev, app);
+
+ if (err < 0)
+ return err;
+
+ return sparx5_dcb_app_update(dev);
+}
+
+static int sparx5_dcb_setrewr(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app app_itr;
+ int err = 0;
+ u16 proto;
+
+ err = sparx5_dcb_app_validate(dev, app);
+ if (err)
+ goto out;
+
+ /* Delete current mapping, if it exists. */
+ proto = dcb_getrewr(dev, app);
+ if (proto) {
+ app_itr = *app;
+ app_itr.protocol = proto;
+ sparx5_dcb_delrewr(dev, &app_itr);
+ }
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_setrewr);
+ else
+ err = dcb_setrewr(dev, app);
+
+ if (err)
+ goto out;
+
+ sparx5_dcb_app_update(dev);
+
+out:
+ return err;
+}
+
const struct dcbnl_rtnl_ops sparx5_dcbnl_ops = {
.ieee_setapp = sparx5_dcb_ieee_setapp,
.ieee_delapp = sparx5_dcb_ieee_delapp,
.dcbnl_setapptrust = sparx5_dcb_setapptrust,
.dcbnl_getapptrust = sparx5_dcb_getapptrust,
+ .dcbnl_setrewr = sparx5_dcb_setrewr,
+ .dcbnl_delrewr = sparx5_dcb_delrewr,
};
int sparx5_dcb_init(struct sparx5 *sparx5)
@@ -304,6 +395,12 @@ int sparx5_dcb_init(struct sparx5 *sparx5)
sparx5_port_apptrust[port->portno] =
&sparx5_dcb_apptrust_policies
[SPARX5_DCB_APPTRUST_DSCP_PCP];
+
+ /* Enable DSCP classification based on classified QoS class and
+ * DP, for all DSCP values, for all ports.
+ */
+ sparx5_port_qos_dscp_rewr_mode_set(port,
+ SPARX5_PORT_REW_DSCP_ALL);
}
return 0;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 3c5d4fe99373..42b77ba9b572 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -198,12 +198,15 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_QSYS, 0x110a0000, 2 }, /* 0x6110a0000 */
{ TARGET_QFWD, 0x110b0000, 2 }, /* 0x6110b0000 */
{ TARGET_XQS, 0x110c0000, 2 }, /* 0x6110c0000 */
+ { TARGET_VCAP_ES2, 0x110d0000, 2 }, /* 0x6110d0000 */
+ { TARGET_VCAP_ES0, 0x110e0000, 2 }, /* 0x6110e0000 */
{ TARGET_CLKGEN, 0x11100000, 2 }, /* 0x611100000 */
{ TARGET_ANA_AC_POL, 0x11200000, 2 }, /* 0x611200000 */
{ TARGET_QRES, 0x11280000, 2 }, /* 0x611280000 */
{ TARGET_EACL, 0x112c0000, 2 }, /* 0x6112c0000 */
{ TARGET_ANA_CL, 0x11400000, 2 }, /* 0x611400000 */
{ TARGET_ANA_L3, 0x11480000, 2 }, /* 0x611480000 */
+ { TARGET_ANA_AC_SDLB, 0x11500000, 2 }, /* 0x611500000 */
{ TARGET_HSCH, 0x11580000, 2 }, /* 0x611580000 */
{ TARGET_REW, 0x11600000, 2 }, /* 0x611600000 */
{ TARGET_ANA_L2, 0x11800000, 2 }, /* 0x611800000 */
@@ -500,8 +503,8 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
clk_period = sparx5_clk_period(freq);
- spx5_rmw(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(clk_period / 100),
- HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS,
+ spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100),
+ HSCH_SYS_CLK_PER_100PS,
sparx5,
HSCH_SYS_CLK_PER);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 4a574cdcb584..72e7928912eb 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -396,6 +396,7 @@ int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
struct sk_buff *skb);
irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
+int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
/* sparx5_vcap_impl.c */
int sparx5_vcap_init(struct sparx5 *sparx5);
@@ -413,6 +414,129 @@ int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+/* sparx5_pool.c */
+struct sparx5_pool_entry {
+ u16 ref_cnt;
+ u32 idx; /* tc index */
+};
+
+u32 sparx5_pool_idx_to_id(u32 idx);
+int sparx5_pool_put(struct sparx5_pool_entry *pool, int size, u32 id);
+int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id);
+int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
+ u32 *id);
+
+/* sparx5_sdlb.c */
+#define SPX5_SDLB_PUP_TOKEN_DISABLE 0x1FFF
+#define SPX5_SDLB_PUP_TOKEN_MAX (SPX5_SDLB_PUP_TOKEN_DISABLE - 1)
+#define SPX5_SDLB_GROUP_RATE_MAX 25000000000ULL
+#define SPX5_SDLB_2CYCLES_TYPE2_THRES_OFFSET 13
+#define SPX5_SDLB_CNT 4096
+#define SPX5_SDLB_GROUP_CNT 10
+#define SPX5_CLK_PER_100PS_DEFAULT 16
+
+struct sparx5_sdlb_group {
+ u64 max_rate;
+ u32 min_burst;
+ u32 frame_size;
+ u32 pup_interval;
+ u32 nsets;
+};
+
+extern struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT];
+int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval,
+ u64 rate);
+
+int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5);
+int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst);
+int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group);
+
+int sparx5_sdlb_group_add(struct sparx5 *sparx5, u32 group, u32 idx);
+int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx);
+
+void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
+ u32 frame_size, u32 idx);
+
+/* sparx5_police.c */
+enum {
+ /* More policer types will be added later */
+ SPX5_POL_SERVICE
+};
+
+struct sparx5_policer {
+ u32 type;
+ u32 idx;
+ u64 rate;
+ u32 burst;
+ u32 group;
+ u8 event_mask;
+};
+
+int sparx5_policer_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol);
+
+/* sparx5_psfp.c */
+#define SPX5_PSFP_GCE_CNT 4
+#define SPX5_PSFP_SG_CNT 1024
+#define SPX5_PSFP_SG_MIN_CYCLE_TIME_NS (1 * NSEC_PER_USEC)
+#define SPX5_PSFP_SG_MAX_CYCLE_TIME_NS ((1 * NSEC_PER_SEC) - 1)
+#define SPX5_PSFP_SG_MAX_IPV (SPX5_PRIOS - 1)
+#define SPX5_PSFP_SG_OPEN (SPX5_PSFP_SG_CNT - 1)
+#define SPX5_PSFP_SG_CYCLE_TIME_DEFAULT 1000000
+#define SPX5_PSFP_SF_MAX_SDU 16383
+
+struct sparx5_psfp_fm {
+ struct sparx5_policer pol;
+};
+
+struct sparx5_psfp_gce {
+ bool gate_state; /* StreamGateState */
+ u32 interval; /* TimeInterval */
+ u32 ipv; /* InternalPriorityValue */
+ u32 maxoctets; /* IntervalOctetMax */
+};
+
+struct sparx5_psfp_sg {
+ bool gate_state; /* PSFPAdminGateStates */
+ bool gate_enabled; /* PSFPGateEnabled */
+ u32 ipv; /* PSFPAdminIPV */
+ struct timespec64 basetime; /* PSFPAdminBaseTime */
+ u32 cycletime; /* PSFPAdminCycleTime */
+ u32 cycletimeext; /* PSFPAdminCycleTimeExtension */
+ u32 num_entries; /* PSFPAdminControlListLength */
+ struct sparx5_psfp_gce gce[SPX5_PSFP_GCE_CNT];
+};
+
+struct sparx5_psfp_sf {
+ bool sblock_osize_ena;
+ bool sblock_osize;
+ u32 max_sdu;
+ u32 sgid; /* Gate id */
+ u32 fmid; /* Flow meter id */
+};
+
+int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_fm *fm, u32 *id);
+int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id);
+
+int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_sg *sg, u32 *id);
+int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id);
+
+int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
+ u32 *id);
+int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id);
+
+u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx);
+u32 sparx5_psfp_isdx_get_fm(struct sparx5 *sparx5, u32 isdx);
+u32 sparx5_psfp_sf_get_sg(struct sparx5 *sparx5, u32 sfid);
+void sparx5_isdx_conf_set(struct sparx5 *sparx5, u32 isdx, u32 sfid, u32 fmid);
+
+void sparx5_psfp_init(struct sparx5 *sparx5);
+
+/* sparx5_qos.c */
+void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
+ const ktime_t org_base_time, ktime_t *new_base_time);
+
/* Clock period in picoseconds */
static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 6c93dd6b01b0..bd03a0a3c1da 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -4,8 +4,8 @@
* Copyright (c) 2021 Microchip Technology Inc.
*/
-/* This file is autogenerated by cml-utils 2022-09-28 11:17:02 +0200.
- * Commit ID: 385c8a11d71a9f6a60368d3a3cb648fa257b479a
+/* This file is autogenerated by cml-utils 2023-02-10 11:18:53 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
*/
#ifndef _SPARX5_MAIN_REGS_H_
@@ -19,6 +19,7 @@ enum sparx5_target {
TARGET_ANA_AC = 1,
TARGET_ANA_ACL = 2,
TARGET_ANA_AC_POL = 4,
+ TARGET_ANA_AC_SDLB = 5,
TARGET_ANA_CL = 6,
TARGET_ANA_L2 = 7,
TARGET_ANA_L3 = 8,
@@ -46,6 +47,8 @@ enum sparx5_target {
TARGET_QS = 177,
TARGET_QSYS = 178,
TARGET_REW = 179,
+ TARGET_VCAP_ES0 = 323,
+ TARGET_VCAP_ES2 = 324,
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
@@ -55,7 +58,8 @@ enum sparx5_target {
#define __REG(...) __VA_ARGS__
/* ANA_AC:RAM_CTRL:RAM_INIT */
-#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC, 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
+#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC,\
+ 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
#define ANA_AC_RAM_INIT_RAM_INIT BIT(1)
#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\
@@ -70,7 +74,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
/* ANA_AC:PS_COMMON:OWN_UPSID */
-#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC, 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
+#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
#define ANA_AC_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -79,13 +84,16 @@ enum sparx5_target {
FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
/* ANA_AC:SRC:SRC_CFG */
-#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
+#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
/* ANA_AC:SRC:SRC_CFG1 */
-#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
+#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
/* ANA_AC:SRC:SRC_CFG2 */
-#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
+#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
#define ANA_AC_SRC_CFG2_PORT_MASK2 BIT(0)
#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\
@@ -94,13 +102,16 @@ enum sparx5_target {
FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x)
/* ANA_AC:PGID:PGID_CFG */
-#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
+#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
/* ANA_AC:PGID:PGID_CFG1 */
-#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
+#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
/* ANA_AC:PGID:PGID_CFG2 */
-#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
+#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
#define ANA_AC_PGID_CFG2_PORT_MASK2 BIT(0)
#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\
@@ -109,7 +120,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x)
/* ANA_AC:PGID:PGID_MISC_CFG */
-#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
+#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU GENMASK(6, 4)
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\
@@ -129,8 +141,257 @@ enum sparx5_target {
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\
FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+/* ANA_AC:TSN_SF:TSN_SF */
+#define ANA_AC_TSN_SF __REG(TARGET_ANA_AC,\
+ 0, 1, 839136, 0, 1, 4, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY BIT(9)
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
+
+#define ANA_AC_TSN_SF_PORT_NUM GENMASK(8, 0)
+#define ANA_AC_TSN_SF_PORT_NUM_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_PORT_NUM, x)
+#define ANA_AC_TSN_SF_PORT_NUM_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_PORT_NUM, x)
+
+/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */
+#define ANA_AC_TSN_SF_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 839680, g, 1024, 4, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_CFG_TSN_SGID GENMASK(25, 16)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU GENMASK(15, 2)
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_MAX_SDU, x)
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_MAX_SDU, x)
+
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA BIT(1)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA, x)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA, x)
+
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE BIT(0)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
+
+/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */
+#define ANA_AC_TSN_SF_STATUS __REG(TARGET_ANA_AC,\
+ 0, 1, 839072, 0, 1, 16, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN GENMASK(25, 12)
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_FRM_LEN, x)
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_FRM_LEN, x)
+
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP BIT(11)
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
+
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID GENMASK(10, 1)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD BIT(0)
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
+
+/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */
+#define ANA_AC_SG_ACCESS_CTRL __REG(TARGET_ANA_AC,\
+ 0, 1, 839140, 0, 1, 12, 0, 0, 1, 4)
+
+#define ANA_AC_SG_ACCESS_CTRL_SGID GENMASK(9, 0)
+#define ANA_AC_SG_ACCESS_CTRL_SGID_SET(x)\
+ FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+#define ANA_AC_SG_ACCESS_CTRL_SGID_GET(x)\
+ FIELD_GET(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(x)\
+ FIELD_GET(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
+
+/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD __REG(TARGET_ANA_AC,\
+ 0, 1, 839140, 0, 1, 12, 8, 0, 1, 4)
+
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS GENMASK(15, 0)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS, x)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_GET(x)\
+ FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS, x)
+
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA BIT(31)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */
+#define ANA_AC_SG_CONFIG_REG_1 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 48, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */
+#define ANA_AC_SG_CONFIG_REG_2 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 52, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */
+#define ANA_AC_SG_CONFIG_REG_3 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB GENMASK(15, 0)
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB, x)
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH GENMASK(18, 16)
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH, x)
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE, x)
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS GENMASK(24, 21)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INIT_IPS, x)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INIT_IPS, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(25)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE, x)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA BIT(26)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA, x)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX BIT(27)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INVALID_RX, x)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INVALID_RX, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA BIT(28)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA, x)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED BIT(29)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */
+#define ANA_AC_SG_CONFIG_REG_4 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 60, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */
+#define ANA_AC_SG_CONFIG_REG_5 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 64, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */
+#define ANA_AC_SG_GCL_GS_CONFIG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 0, r, 4, 4)
+
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS GENMASK(3, 0)
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_GCL_GS_CONFIG_IPS, x)
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_IPS, x)
+
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */
+#define ANA_AC_SG_GCL_TI_CONFIG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 16, r, 4, 4)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */
+#define ANA_AC_SG_GCL_OCT_CONFIG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 32, r, 4, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */
+#define ANA_AC_SG_STATUS_REG_1 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 0, 0, 1, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */
+#define ANA_AC_SG_STATUS_REG_2 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 4, 0, 1, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */
+#define ANA_AC_SG_STATUS_REG_3 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 8, 0, 1, 4)
+
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB GENMASK(15, 0)
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB, x)
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB, x)
+
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE BIT(16)
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_GATE_STATE, x)
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_GATE_STATE, x)
+
+#define ANA_AC_SG_STATUS_REG_3_IPS GENMASK(23, 20)
+#define ANA_AC_SG_STATUS_REG_3_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_IPS, x)
+#define ANA_AC_SG_STATUS_REG_3_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_IPS, x)
+
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING, x)
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING, x)
+
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX GENMASK(27, 25)
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */
+#define ANA_AC_SG_STATUS_REG_4 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 12, 0, 1, 4)
+
/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
-#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
+#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
#define ANA_AC_PORT_SGE_CFG_MASK GENMASK(15, 0)
#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\
@@ -139,7 +400,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x)
/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
-#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
+#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC,\
+ 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
#define ANA_AC_STAT_RESET_RESET BIT(0)
#define ANA_AC_STAT_RESET_RESET_SET(x)\
@@ -148,7 +410,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_STAT_RESET_RESET, x)
/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
-#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
+#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC,\
+ 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK GENMASK(11, 4)
#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\
@@ -169,10 +432,42 @@ enum sparx5_target {
FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
-#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC,\
+ 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:GLOBAL_CNT_FRM_TYPE_CFG */
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 893792, 0, 1, 24, 0, r, 2, 4)
+
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE GENMASK(2, 0)
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_SET(x)\
+ FIELD_PREP(ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE, x)
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_GET(x)\
+ FIELD_GET(ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE, x)
+
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_CFG */
+#define ANA_AC_ACL_STAT_GLOBAL_CFG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 893792, 0, 1, 24, 8, r, 2, 4)
+
+#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE BIT(0)
+#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_SET(x)\
+ FIELD_PREP(ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE, x)
+#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_GET(x)\
+ FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE, x)
+
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 893792, 0, 1, 24, 16, r, 2, 4)
+
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK GENMASK(3, 0)
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK, x)
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_GET(x)\
+ FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK, x)
/* ANA_ACL:COMMON:VCAP_S2_CFG */
-#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 0, r, 70, 4)
+#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 0, r, 70, 4)
#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA BIT(28)
#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_SET(x)\
@@ -259,7 +554,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x)
/* ANA_ACL:COMMON:SWAP_IP_CTRL */
-#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4)
+#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4)
#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL GENMASK(23, 18)
#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_SET(x)\
@@ -292,7 +588,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x)
/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */
-#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 424, r, 4, 4)
+#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 424, r, 4, 4)
#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK GENMASK(12, 6)
#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_SET(x)\
@@ -307,7 +604,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x)
/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */
-#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4)
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN GENMASK(9, 5)
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_SET(x)\
@@ -328,7 +626,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x)
/* ANA_ACL:COMMON:OWN_UPSID */
-#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
+#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
#define ANA_ACL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -337,7 +636,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */
-#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL, 0, 1, 34200, g, 134, 16, 0, r, 4, 4)
+#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 34200, g, 134, 16, 0, r, 4, 4)
#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA BIT(13)
#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(x)\
@@ -388,13 +688,16 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x)
/* ANA_ACL:CNT_A:CNT_A */
-#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL, 0, 1, 0, g, 4096, 4, 0, 0, 1, 4)
+#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL,\
+ 0, 1, 0, g, 4096, 4, 0, 0, 1, 4)
/* ANA_ACL:CNT_B:CNT_B */
-#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL, 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4)
+#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL,\
+ 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4)
/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */
-#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL, 0, 1, 36408, 0, 1, 16, 0, r, 4, 4)
+#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 36408, 0, 1, 16, 0, r, 4, 4)
#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY BIT(17)
#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_SET(x)\
@@ -505,7 +808,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
-#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL, 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
+#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL,\
+ 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT GENMASK(9, 0)
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\
@@ -514,7 +818,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
-#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
+#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
+ 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
@@ -541,7 +846,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
-#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
+#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
+ 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
@@ -567,8 +873,235 @@ enum sparx5_target {
#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */
+#define ANA_AC_SDLB_XLB_START(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 0, 0, 1, 4)
+
+#define ANA_AC_SDLB_XLB_START_LBSET_START GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_START_LBSET_START_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+#define ANA_AC_SDLB_XLB_START_LBSET_START_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */
+#define ANA_AC_SDLB_PUP_INTERVAL(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 4, 0, 1, 4)
+
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL GENMASK(19, 0)
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */
+#define ANA_AC_SDLB_PUP_CTRL(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 8, 0, 1, 4)
+
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT GENMASK(18, 0)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT, x)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT, x)
+
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA BIT(24)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */
+#define ANA_AC_SDLB_LBGRP_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 12, 0, 1, 4)
+
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT GENMASK(12, 8)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */
+#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 16, 0, 1, 4)
+
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS GENMASK(12, 0)
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */
+#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 20, 0, 1, 4)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING BIT(0)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING, x)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK BIT(1)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT GENMASK(28, 16)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */
+#define ANA_AC_SDLB_PUP_TOKENS(g, r) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 0, r, 2, 4)
+
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS GENMASK(12, 0)
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:THRES */
+#define ANA_AC_SDLB_THRES(g, r) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 8, r, 2, 4)
+
+#define ANA_AC_SDLB_THRES_THRES GENMASK(9, 0)
+#define ANA_AC_SDLB_THRES_THRES_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_THRES_THRES, x)
+#define ANA_AC_SDLB_THRES_THRES_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_THRES_THRES, x)
+
+#define ANA_AC_SDLB_THRES_THRES_HYS GENMASK(25, 16)
+#define ANA_AC_SDLB_THRES_THRES_HYS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_THRES_THRES_HYS, x)
+#define ANA_AC_SDLB_THRES_THRES_HYS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_THRES_THRES_HYS, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */
+#define ANA_AC_SDLB_XLB_NEXT(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 16, 0, 1, 4)
+
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP GENMASK(27, 24)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */
+#define ANA_AC_SDLB_INH_CTRL(g, r) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 20, r, 2, 4)
+
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, x)
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, x)
+
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE GENMASK(21, 20)
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_INH_MODE, x)
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_MODE, x)
+
+#define ANA_AC_SDLB_INH_CTRL_INH_LB BIT(24)
+#define ANA_AC_SDLB_INH_CTRL_INH_LB_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
+#define ANA_AC_SDLB_INH_CTRL_INH_LB_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */
+#define ANA_AC_SDLB_INH_LBSET_ADDR(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 28, 0, 1, 4)
+
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */
+#define ANA_AC_SDLB_DLB_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 32, 0, 1, 4)
+
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA BIT(0)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA, x)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA, x)
+
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA BIT(6)
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA, x)
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA, x)
+
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ GENMASK(14, 8)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */
+#define ANA_AC_SDLB_DLB_CFG(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 36, 0, 1, 4)
+
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA BIT(11)
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA, x)
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA, x)
+
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL GENMASK(10, 9)
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL, x)
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS BIT(8)
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS, x)
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS, x)
+
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS BIT(7)
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS, x)
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS, x)
+
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL GENMASK(6, 5)
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL, x)
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL GENMASK(4, 3)
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL, x)
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE BIT(2)
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DLB_MODE, x)
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DLB_MODE, x)
+
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK GENMASK(1, 0)
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
+
/* ANA_CL:PORT:FILTER_CTRL */
-#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
+#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS BIT(2)
#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\
@@ -589,7 +1122,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
/* ANA_CL:PORT:VLAN_FILTER_CTRL */
-#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
+#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10)
#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\
@@ -658,7 +1192,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
/* ANA_CL:PORT:ETAG_FILTER_CTRL */
-#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
+#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1)
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\
@@ -673,7 +1208,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
/* ANA_CL:PORT:VLAN_CTRL */
-#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
+#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26)
#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\
@@ -742,7 +1278,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x)
/* ANA_CL:PORT:VLAN_CTRL_2 */
-#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
+#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT GENMASK(1, 0)
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\
@@ -751,7 +1288,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
/* ANA_CL:PORT:PCP_DEI_MAP_CFG */
-#define ANA_CL_PCP_DEI_MAP_CFG(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 108, r, 16, 4)
+#define ANA_CL_PCP_DEI_MAP_CFG(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 108, r, 16, 4)
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL GENMASK(4, 3)
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(x)\
@@ -766,7 +1304,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL, x)
/* ANA_CL:PORT:QOS_CFG */
-#define ANA_CL_QOS_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 172, 0, 1, 4)
+#define ANA_CL_QOS_CFG(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 172, 0, 1, 4)
#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA BIT(17)
#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA_SET(x)\
@@ -841,10 +1380,74 @@ enum sparx5_target {
FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL, x)
/* ANA_CL:PORT:CAPTURE_BPDU_CFG */
-#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+
+/* ANA_CL:PORT:ADV_CL_CFG_2 */
+#define ANA_CL_ADV_CL_CFG_2(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 200, r, 6, 4)
+
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA BIT(1)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA, x)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA, x)
+
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA BIT(0)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA, x)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA, x)
+
+/* ANA_CL:PORT:ADV_CL_CFG */
+#define ANA_CL_ADV_CL_CFG(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 224, r, 6, 4)
+
+#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL GENMASK(30, 26)
+#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL GENMASK(25, 21)
+#define ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL GENMASK(20, 16)
+#define ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL GENMASK(15, 11)
+#define ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL GENMASK(10, 6)
+#define ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL GENMASK(5, 1)
+#define ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA BIT(0)
+#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_LOOKUP_ENA, x)
+#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_LOOKUP_ENA, x)
/* ANA_CL:COMMON:OWN_UPSID */
-#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
+#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL,\
+ 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
#define ANA_CL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -853,7 +1456,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x)
/* ANA_CL:COMMON:DSCP_CFG */
-#define ANA_CL_DSCP_CFG(r) __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 256, r, 64, 4)
+#define ANA_CL_DSCP_CFG(r) __REG(TARGET_ANA_CL,\
+ 0, 1, 166912, 0, 1, 756, 256, r, 64, 4)
#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL GENMASK(12, 7)
#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL_SET(x)\
@@ -885,14 +1489,103 @@ enum sparx5_target {
#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\
FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x)
+/* ANA_CL:COMMON:QOS_MAP_CFG */
+#define ANA_CL_QOS_MAP_CFG(r) __REG(TARGET_ANA_CL,\
+ 0, 1, 166912, 0, 1, 756, 512, r, 32, 4)
+
+#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL GENMASK(9, 4)
+#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x)
+#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x)
+
+/* ANA_L2:COMMON:FWD_CFG */
+#define ANA_L2_FWD_CFG __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 0, 0, 1, 4)
+
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL GENMASK(21, 20)
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL, x)
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL, x)
+
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA BIT(18)
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA, x)
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA, x)
+
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA BIT(17)
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA, x)
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA, x)
+
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA BIT(16)
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, x)
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, x)
+
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU GENMASK(10, 8)
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_CPU_DMAC_QU, x)
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_CPU_DMAC_QU, x)
+
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA BIT(7)
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_LOOPBACK_ENA, x)
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_LOOPBACK_ENA, x)
+
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA BIT(6)
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA, x)
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA, x)
+
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL BIT(4)
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FILTER_MODE_SEL, x)
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FILTER_MODE_SEL, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA BIT(3)
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA BIT(2)
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA BIT(1)
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA, x)
+
+#define ANA_L2_FWD_CFG_FWD_ENA BIT(0)
+#define ANA_L2_FWD_CFG_FWD_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FWD_ENA, x)
+#define ANA_L2_FWD_CFG_FWD_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FWD_ENA, x)
+
/* ANA_L2:COMMON:AUTO_LRN_CFG */
-#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
+#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
/* ANA_L2:COMMON:AUTO_LRN_CFG1 */
-#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
+#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
/* ANA_L2:COMMON:AUTO_LRN_CFG2 */
-#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
+#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2 BIT(0)
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\
@@ -901,7 +1594,8 @@ enum sparx5_target {
FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
/* ANA_L2:COMMON:OWN_UPSID */
-#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
+#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
#define ANA_L2_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -909,8 +1603,29 @@ enum sparx5_target {
#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+/* ANA_L2:ISDX:DLB_CFG */
+#define ANA_L2_DLB_CFG(g) __REG(TARGET_ANA_L2,\
+ 0, 1, 0, g, 4096, 128, 56, 0, 1, 4)
+
+#define ANA_L2_DLB_CFG_DLB_IDX GENMASK(12, 0)
+#define ANA_L2_DLB_CFG_DLB_IDX_SET(x)\
+ FIELD_PREP(ANA_L2_DLB_CFG_DLB_IDX, x)
+#define ANA_L2_DLB_CFG_DLB_IDX_GET(x)\
+ FIELD_GET(ANA_L2_DLB_CFG_DLB_IDX, x)
+
+/* ANA_L2:ISDX:TSN_CFG */
+#define ANA_L2_TSN_CFG(g) __REG(TARGET_ANA_L2,\
+ 0, 1, 0, g, 4096, 128, 100, 0, 1, 4)
+
+#define ANA_L2_TSN_CFG_TSN_SFID GENMASK(9, 0)
+#define ANA_L2_TSN_CFG_TSN_SFID_SET(x)\
+ FIELD_PREP(ANA_L2_TSN_CFG_TSN_SFID, x)
+#define ANA_L2_TSN_CFG_TSN_SFID_GET(x)\
+ FIELD_GET(ANA_L2_TSN_CFG_TSN_SFID, x)
+
/* ANA_L3:COMMON:VLAN_CTRL */
-#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3, 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
+#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3,\
+ 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
#define ANA_L3_VLAN_CTRL_VLAN_ENA BIT(0)
#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\
@@ -919,7 +1634,8 @@ enum sparx5_target {
FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
/* ANA_L3:VLAN:VLAN_CFG */
-#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
+#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR GENMASK(30, 24)
#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\
@@ -976,13 +1692,16 @@ enum sparx5_target {
FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
/* ANA_L3:VLAN:VLAN_MASK_CFG */
-#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
+#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
/* ANA_L3:VLAN:VLAN_MASK_CFG1 */
-#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
+#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
/* ANA_L3:VLAN:VLAN_MASK_CFG2 */
-#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
+#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2 BIT(0)
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\
@@ -991,274 +1710,364 @@ enum sparx5_target {
FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
-#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
+#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
-#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
+#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */
-#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
+#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
-#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
+#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
-#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
+#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
-#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
+#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_UC_CNT */
-#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
+#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_MC_CNT */
-#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
+#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_BC_CNT */
-#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
+#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
-#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
+#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
-#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
+#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
-#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
+#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
-#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
+#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
+#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
-#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
+#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */
-#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
+#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */
-#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
+#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
-#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
+#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
-#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
+#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
-#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
+#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
-#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
+#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
-#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
+#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
-#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
+#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
-#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
+#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
-#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
+#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */
-#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
+#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
-#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
+#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_UC_CNT */
-#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
+#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_MC_CNT */
-#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
+#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_BC_CNT */
-#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
+#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */
-#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
+#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
-#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
+#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
-#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
+#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
-#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
+#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
-#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
+#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
-#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
+#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
-#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
+#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
-#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
+#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
-#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
+#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
-#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
+#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
-#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
+#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
-#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
+#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
-#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
+#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
-#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
+#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
-#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
+#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
-#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
+#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
-#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
+#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
-#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
+#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
-#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
+#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
-#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
+#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
-#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
+#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
-#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
+#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
-#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
+#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
-#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
+#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
+#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
-#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
+#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
-#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
+#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
-#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
-#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
-#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
-#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
-#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
-#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
-#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
-#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
+#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
-#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
+#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
-#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
+#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
-#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
+#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
-#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
+#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
-#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
-#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
-#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
-#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
-#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
-#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
-#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
-#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
+#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
-#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
+#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
-#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
+#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
-#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
+#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
-#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
+#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
-#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
+#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
-#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
+#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
-#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
+#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */
-#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
+#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_DEFER_CNT */
-#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
+#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */
-#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
+#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
-#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
+#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */
-#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
+#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
-#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
+#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
@@ -1267,7 +2076,8 @@ enum sparx5_target {
FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
-#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
+#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -1276,7 +2086,8 @@ enum sparx5_target {
FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
-#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -1285,7 +2096,8 @@ enum sparx5_target {
FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
-#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
+#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -1294,7 +2106,8 @@ enum sparx5_target {
FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
-#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -1303,7 +2116,8 @@ enum sparx5_target {
FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
-#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
+#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
@@ -1312,7 +2126,8 @@ enum sparx5_target {
FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
-#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
+#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -1321,7 +2136,8 @@ enum sparx5_target {
FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
-#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -1330,10 +2146,12 @@ enum sparx5_target {
FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
-#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
+#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
/* ASM:CFG:STAT_CFG */
-#define ASM_STAT_CFG __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
+#define ASM_STAT_CFG __REG(TARGET_ASM,\
+ 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT BIT(0)
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\
@@ -1342,7 +2160,8 @@ enum sparx5_target {
FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
/* ASM:CFG:PORT_CFG */
-#define ASM_PORT_CFG(r) __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
+#define ASM_PORT_CFG(r) __REG(TARGET_ASM,\
+ 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
#define ASM_PORT_CFG_CSC_STAT_DIS BIT(12)
#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\
@@ -1411,7 +2230,8 @@ enum sparx5_target {
FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x)
/* ASM:RAM_CTRL:RAM_INIT */
-#define ASM_RAM_INIT __REG(TARGET_ASM, 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
+#define ASM_RAM_INIT __REG(TARGET_ASM,\
+ 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
#define ASM_RAM_INIT_RAM_INIT BIT(1)
#define ASM_RAM_INIT_RAM_INIT_SET(x)\
@@ -1426,7 +2246,8 @@ enum sparx5_target {
FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x)
/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
-#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN,\
+ 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV GENMASK(7, 0)
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\
@@ -1465,7 +2286,8 @@ enum sparx5_target {
FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
/* CPU:CPU_REGS:PROC_CTRL */
-#define CPU_PROC_CTRL __REG(TARGET_CPU, 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
+#define CPU_PROC_CTRL __REG(TARGET_CPU,\
+ 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
#define CPU_PROC_CTRL_AARCH64_MODE_ENA BIT(12)
#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\
@@ -1546,7 +2368,8 @@ enum sparx5_target {
FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
+#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV10G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -1561,7 +2384,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
+#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -1576,7 +2400,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
-#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
+#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS GENMASK(1, 0)
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\
@@ -1585,7 +2410,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
-#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 16, r, 3, 4)
+#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 16, r, 3, 4)
#define DEV10G_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\
@@ -1600,7 +2426,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
+#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -1645,7 +2472,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
-#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
+#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4)
#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\
@@ -1678,7 +2506,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G, t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
+#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G,\
+ t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -1735,7 +2564,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
-#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G, t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
+#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
#define DEV10G_PCS25G_CFG_PCS25G_ENA BIT(0)
#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\
@@ -1744,7 +2574,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
+#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV25G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -1759,7 +2590,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
+#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -1774,7 +2606,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
+#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -1819,7 +2652,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
+#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G,\
+ t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -1876,7 +2710,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
-#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
+#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
#define DEV25G_PCS25G_CFG_PCS25G_ENA BIT(0)
#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\
@@ -1885,7 +2720,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
-#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
+#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
#define DEV25G_PCS25G_SD_CFG_SD_SEL BIT(8)
#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\
@@ -1906,7 +2742,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5, t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
+#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5,\
+ t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23)
#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
@@ -1957,7 +2794,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
+#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
#define DEV2G5_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -1972,7 +2810,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
-#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
+#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\
@@ -1993,7 +2832,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
+#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
@@ -2002,7 +2842,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
-#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
+#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
#define DEV2G5_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\
@@ -2029,7 +2870,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
-#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
+#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3 GENMASK(31, 16)
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\
@@ -2044,7 +2886,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
+#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\
@@ -2053,7 +2896,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
-#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
+#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\
@@ -2080,7 +2924,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
-#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
+#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\
@@ -2113,7 +2958,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
-#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
+#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\
@@ -2134,7 +2980,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
-#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
+#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\
@@ -2155,7 +3002,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
-#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
+#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
#define DEV2G5_PCS1G_SD_CFG_SD_SEL BIT(8)
#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\
@@ -2176,7 +3024,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
-#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
+#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
@@ -2203,7 +3052,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
-#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
+#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
#define DEV2G5_PCS1G_LB_CFG_RA_ENA BIT(4)
#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\
@@ -2224,7 +3074,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
-#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
+#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY GENMASK(31, 16)
#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\
@@ -2251,7 +3102,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
-#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
+#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR GENMASK(15, 12)
#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\
@@ -2278,7 +3130,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
-#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
+#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
@@ -2293,7 +3146,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
-#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5, t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
+#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
#define DEV2G5_PCS_FX100_CFG_SD_SEL BIT(26)
#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\
@@ -2374,7 +3228,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
-#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
+#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5,\
+ t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP GENMASK(11, 8)
#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\
@@ -2425,7 +3280,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
+#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G,\
+ t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV5G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -2440,7 +3296,8 @@ enum sparx5_target {
FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
+#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G,\
+ t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -2455,7 +3312,8 @@ enum sparx5_target {
FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
+#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G,\
+ t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -2500,142 +3358,188 @@ enum sparx5_target {
FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
-#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
+#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
-#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
+#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
-#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
+#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
-#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
+#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
-#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
+#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
-#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
+#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
-#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
+#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
-#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
+#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
-#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
+#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
-#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
+#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
+#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
-#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
+#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
-#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
+#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
-#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
+#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
-#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
+#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
-#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
+#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
-#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
+#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
-#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
+#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
-#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
+#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
-#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
+#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
-#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
+#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
-#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
+#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
-#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
+#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
-#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
+#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
-#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
+#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
-#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
+#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
-#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
+#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
-#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
+#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
-#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
+#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
-#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
+#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
-#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
+#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
-#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
+#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
-#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
+#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
-#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
+#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
-#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
+#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
-#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
+#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
-#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
+#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
-#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
+#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
-#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
+#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
-#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
+#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
-#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
+#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
-#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
+#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
-#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
+#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
-#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
+#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
-#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
+#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
-#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
+#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
@@ -2646,100 +3550,132 @@ enum sparx5_target {
t, 13, 60, 0, 1, 312, 188, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
-#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
+#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
-#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
+#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
-#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
-#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
-#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
-#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
-#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
-#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
-#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
-#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
+#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
-#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
+#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
-#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
+#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
-#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
+#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
-#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
-#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
-#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
-#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
-#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
-#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
-#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
-#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
+#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
-#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
+#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
-#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
+#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
-#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
+#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
-#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
+#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
-#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
+#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
-#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
+#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
-#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
+#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
-#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
+#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
-#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
+#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
-#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
+#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
-#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
+#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
@@ -2748,10 +3684,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
-#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
+#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
-#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
+#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2760,10 +3698,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
-#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
+#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
-#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -2772,10 +3712,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
-#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
+#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
-#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
@@ -2784,10 +3726,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
-#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
+#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
-#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
+#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2796,10 +3740,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
-#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
+#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
-#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2808,10 +3754,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
-#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
+#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
-#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -2820,10 +3768,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
-#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
+#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
-#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2832,7 +3782,8 @@ enum sparx5_target {
FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G, t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
+#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G,\
+ t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -2889,7 +3840,8 @@ enum sparx5_target {
FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
/* DSM:RAM_CTRL:RAM_INIT */
-#define DSM_RAM_INIT __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
+#define DSM_RAM_INIT __REG(TARGET_DSM,\
+ 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
#define DSM_RAM_INIT_RAM_INIT BIT(1)
#define DSM_RAM_INIT_RAM_INIT_SET(x)\
@@ -2904,7 +3856,8 @@ enum sparx5_target {
FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x)
/* DSM:CFG:BUF_CFG */
-#define DSM_BUF_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
+#define DSM_BUF_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
#define DSM_BUF_CFG_CSC_STAT_DIS BIT(13)
#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\
@@ -2931,7 +3884,8 @@ enum sparx5_target {
FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
/* DSM:CFG:DEV_TX_STOP_WM_CFG */
-#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
+#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA BIT(9)
#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\
@@ -2958,7 +3912,8 @@ enum sparx5_target {
FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
/* DSM:CFG:RX_PAUSE_CFG */
-#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
+#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN BIT(1)
#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\
@@ -2973,7 +3928,8 @@ enum sparx5_target {
FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
/* DSM:CFG:MAC_CFG */
-#define DSM_MAC_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
+#define DSM_MAC_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
#define DSM_MAC_CFG_TX_PAUSE_VAL GENMASK(31, 16)
#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\
@@ -3000,7 +3956,8 @@ enum sparx5_target {
FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
-#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0)
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\
@@ -3009,7 +3966,8 @@ enum sparx5_target {
FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
-#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
+#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW GENMASK(23, 0)
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\
@@ -3018,7 +3976,8 @@ enum sparx5_target {
FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
/* DSM:CFG:TAXI_CAL_CFG */
-#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
+#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
#define DSM_TAXI_CAL_CFG_CAL_IDX GENMASK(20, 15)
#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\
@@ -3050,8 +4009,41 @@ enum sparx5_target {
#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\
FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */
+#define EACL_VCAP_ES2_KEY_SEL(g, r) __REG(TARGET_EACL,\
+ 0, 1, 149504, g, 138, 8, 0, r, 2, 4)
+
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL GENMASK(7, 5)
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL GENMASK(4, 2)
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL BIT(1)
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA BIT(0)
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
+
+/* EACL:CNT_TBL:ES2_CNT */
+#define EACL_ES2_CNT(g) __REG(TARGET_EACL,\
+ 0, 1, 122880, g, 2048, 4, 0, 0, 1, 4)
+
/* EACL:POL_CFG:POL_EACL_CFG */
-#define EACL_POL_EACL_CFG __REG(TARGET_EACL, 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
+#define EACL_POL_EACL_CFG __REG(TARGET_EACL,\
+ 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5)
#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\
@@ -3089,8 +4081,61 @@ enum sparx5_target {
#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\
FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */
+#define EACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_EACL,\
+ 0, 1, 118696, 0, 1, 8, 0, r, 2, 4)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY BIT(6)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY BIT(5)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY BIT(4)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY BIT(3)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY BIT(2)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY BIT(1)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY BIT(0)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+
/* EACL:RAM_CTRL:RAM_INIT */
-#define EACL_RAM_INIT __REG(TARGET_EACL, 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
+#define EACL_RAM_INIT __REG(TARGET_EACL,\
+ 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
#define EACL_RAM_INIT_RAM_INIT BIT(1)
#define EACL_RAM_INIT_RAM_INIT_SET(x)\
@@ -3105,7 +4150,8 @@ enum sparx5_target {
FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x)
/* FDMA:FDMA:FDMA_CH_ACTIVATE */
-#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
@@ -3114,7 +4160,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
/* FDMA:FDMA:FDMA_CH_RELOAD */
-#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
@@ -3123,7 +4170,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
/* FDMA:FDMA:FDMA_CH_DISABLE */
-#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
@@ -3132,19 +4180,24 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
/* FDMA:FDMA:FDMA_DCB_LLP */
-#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
/* FDMA:FDMA:FDMA_DCB_LLP1 */
-#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
/* FDMA:FDMA:FDMA_DCB_LLP_PREV */
-#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
+#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
-#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
+#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
/* FDMA:FDMA:FDMA_CH_CFG */
-#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
#define FDMA_CH_CFG_CH_XTR_STATUS_MODE BIT(7)
#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\
@@ -3177,7 +4230,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
/* FDMA:FDMA:FDMA_CH_TRANSLATE */
-#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
+#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
#define FDMA_CH_TRANSLATE_OFFSET GENMASK(15, 0)
#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\
@@ -3186,7 +4240,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x)
/* FDMA:FDMA:FDMA_XTR_CFG */
-#define FDMA_XTR_CFG __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
+#define FDMA_XTR_CFG __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
#define FDMA_XTR_CFG_XTR_FIFO_WM GENMASK(15, 11)
#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\
@@ -3201,7 +4256,8 @@ enum sparx5_target {
FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x)
/* FDMA:FDMA:FDMA_PORT_CTRL */
-#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
@@ -3234,7 +4290,8 @@ enum sparx5_target {
FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x)
/* FDMA:FDMA:FDMA_INTR_DCB */
-#define FDMA_INTR_DCB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
+#define FDMA_INTR_DCB __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
#define FDMA_INTR_DCB_INTR_DCB GENMASK(7, 0)
#define FDMA_INTR_DCB_INTR_DCB_SET(x)\
@@ -3243,7 +4300,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x)
/* FDMA:FDMA:FDMA_INTR_DCB_ENA */
-#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
+#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA GENMASK(7, 0)
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\
@@ -3252,7 +4310,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
/* FDMA:FDMA:FDMA_INTR_DB */
-#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+#define FDMA_INTR_DB __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
#define FDMA_INTR_DB_INTR_DB GENMASK(7, 0)
#define FDMA_INTR_DB_INTR_DB_SET(x)\
@@ -3261,7 +4320,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_DB_INTR_DB, x)
/* FDMA:FDMA:FDMA_INTR_DB_ENA */
-#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
@@ -3270,7 +4330,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
/* FDMA:FDMA:FDMA_INTR_ERR */
-#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+#define FDMA_INTR_ERR __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
#define FDMA_INTR_ERR_INTR_PORT_ERR GENMASK(9, 8)
#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\
@@ -3285,7 +4346,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x)
/* FDMA:FDMA:FDMA_ERRORS */
-#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+#define FDMA_ERRORS __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
#define FDMA_ERRORS_ERR_XTR_WR GENMASK(31, 30)
#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\
@@ -3336,7 +4398,8 @@ enum sparx5_target {
FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x)
/* FDMA:FDMA:FDMA_ERRORS_2 */
-#define FDMA_ERRORS_2 __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
+#define FDMA_ERRORS_2 __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
#define FDMA_ERRORS_2_ERR_XTR_FRAG GENMASK(1, 0)
#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\
@@ -3345,7 +4408,8 @@ enum sparx5_target {
FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
/* FDMA:FDMA:FDMA_CTRL */
-#define FDMA_CTRL __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
+#define FDMA_CTRL __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
#define FDMA_CTRL_NRESET BIT(0)
#define FDMA_CTRL_NRESET_SET(x)\
@@ -3354,7 +4418,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CTRL_NRESET, x)
/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */
-#define GCB_CHIP_ID __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
+#define GCB_CHIP_ID __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
#define GCB_CHIP_ID_REV_ID GENMASK(31, 28)
#define GCB_CHIP_ID_REV_ID_SET(x)\
@@ -3381,7 +4446,8 @@ enum sparx5_target {
FIELD_GET(GCB_CHIP_ID_ONE, x)
/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */
-#define GCB_SOFT_RST __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
+#define GCB_SOFT_RST __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
#define GCB_SOFT_RST_SOFT_NON_CFG_RST BIT(2)
#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\
@@ -3402,7 +4468,8 @@ enum sparx5_target {
FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x)
/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
-#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
+#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA BIT(1)
#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\
@@ -3417,7 +4484,8 @@ enum sparx5_target {
FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
-#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0)
#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\
@@ -3426,7 +4494,8 @@ enum sparx5_target {
FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
-#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB, 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
+#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB,\
+ 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
#define GCB_SIO_CLOCK_SIO_CLK_FREQ GENMASK(19, 8)
#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\
@@ -3441,7 +4510,8 @@ enum sparx5_target {
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
/* HSCH:HSCH_CFG:CIR_CFG */
-#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
+#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
@@ -3456,7 +4526,8 @@ enum sparx5_target {
FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
/* HSCH:HSCH_CFG:EIR_CFG */
-#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
+#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
@@ -3471,7 +4542,8 @@ enum sparx5_target {
FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
/* HSCH:HSCH_CFG:SE_CFG */
-#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
+#define HSCH_SE_CFG(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
@@ -3504,7 +4576,8 @@ enum sparx5_target {
FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
/* HSCH:HSCH_CFG:SE_CONNECT */
-#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
+#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
@@ -3513,7 +4586,8 @@ enum sparx5_target {
FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
/* HSCH:HSCH_CFG:SE_DLB_SENSE */
-#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
+#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
@@ -3546,7 +4620,8 @@ enum sparx5_target {
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
/* HSCH:HSCH_DWRR:DWRR_ENTRY */
-#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH,\
+ 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
@@ -3561,7 +4636,8 @@ enum sparx5_target {
FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
-#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
+#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH,\
+ 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
@@ -3582,16 +4658,18 @@ enum sparx5_target {
FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
/* HSCH:HSCH_MISC:SYS_CLK_PER */
-#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
+#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH,\
+ 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
-#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS GENMASK(7, 0)
-#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(x)\
- FIELD_PREP(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
-#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
- FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_100PS GENMASK(7, 0)
+#define HSCH_SYS_CLK_PER_100PS_SET(x)\
+ FIELD_PREP(HSCH_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_100PS_GET(x)\
+ FIELD_GET(HSCH_SYS_CLK_PER_100PS, x)
/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
-#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
+#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH,\
+ 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
@@ -3600,7 +4678,8 @@ enum sparx5_target {
FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
-#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
+#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH,\
+ 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
@@ -3615,7 +4694,8 @@ enum sparx5_target {
FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
/* HSCH:SYSTEM:FLUSH_CTRL */
-#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
+#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH,\
+ 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
#define HSCH_FLUSH_CTRL_FLUSH_ENA BIT(27)
#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\
@@ -3660,7 +4740,8 @@ enum sparx5_target {
FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
/* HSCH:SYSTEM:PORT_MODE */
-#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
+#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH,\
+ 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
#define HSCH_PORT_MODE_DEQUEUE_DIS BIT(4)
#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\
@@ -3693,7 +4774,8 @@ enum sparx5_target {
FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
/* HSCH:SYSTEM:OUTB_SHARE_ENA */
-#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
+#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH,\
+ 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA GENMASK(7, 0)
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\
@@ -3702,7 +4784,8 @@ enum sparx5_target {
FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
/* HSCH:MMGT:RESET_CFG */
-#define HSCH_RESET_CFG __REG(TARGET_HSCH, 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
+#define HSCH_RESET_CFG __REG(TARGET_HSCH,\
+ 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
#define HSCH_RESET_CFG_CORE_ENA BIT(0)
#define HSCH_RESET_CFG_CORE_ENA_SET(x)\
@@ -3711,7 +4794,8 @@ enum sparx5_target {
FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x)
/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
-#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH, 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
+#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH,\
+ 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY GENMASK(7, 0)
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\
@@ -3720,7 +4804,8 @@ enum sparx5_target {
FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
/* LRN:COMMON:COMMON_ACCESS_CTRL */
-#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
+#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\
@@ -3753,7 +4838,8 @@ enum sparx5_target {
FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
/* LRN:COMMON:MAC_ACCESS_CFG_0 */
-#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
+#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID GENMASK(28, 16)
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\
@@ -3768,10 +4854,12 @@ enum sparx5_target {
FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
/* LRN:COMMON:MAC_ACCESS_CFG_1 */
-#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
+#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
/* LRN:COMMON:MAC_ACCESS_CFG_2 */
-#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
+#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28)
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\
@@ -3846,7 +4934,8 @@ enum sparx5_target {
FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
/* LRN:COMMON:MAC_ACCESS_CFG_3 */
-#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
+#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0)
#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\
@@ -3855,7 +4944,8 @@ enum sparx5_target {
FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
/* LRN:COMMON:SCAN_NEXT_CFG */
-#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
+#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19)
#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\
@@ -3948,7 +5038,8 @@ enum sparx5_target {
FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
/* LRN:COMMON:SCAN_NEXT_CFG_1 */
-#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
+#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR GENMASK(30, 16)
#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\
@@ -3963,7 +5054,8 @@ enum sparx5_target {
FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
/* LRN:COMMON:AUTOAGE_CFG */
-#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
+#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
#define LRN_AUTOAGE_CFG_UNIT_SIZE GENMASK(29, 28)
#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\
@@ -3978,7 +5070,8 @@ enum sparx5_target {
FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
/* LRN:COMMON:AUTOAGE_CFG_1 */
-#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
+#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA BIT(25)
#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\
@@ -4023,7 +5116,8 @@ enum sparx5_target {
FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
/* LRN:COMMON:AUTOAGE_CFG_2 */
-#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
+#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
#define LRN_AUTOAGE_CFG_2_NEXT_ROW GENMASK(17, 4)
#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\
@@ -4038,7 +5132,8 @@ enum sparx5_target {
FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
-#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
+#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
#define PCEP_RCTRL_2_OUT_0_MSG_CODE GENMASK(7, 0)
#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\
@@ -4101,7 +5196,8 @@ enum sparx5_target {
FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
+#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW GENMASK(15, 0)
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\
@@ -4116,10 +5212,12 @@ enum sparx5_target {
FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
+#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
+#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW GENMASK(15, 0)
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\
@@ -4134,13 +5232,16 @@ enum sparx5_target {
FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
+#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
+#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
+#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0)
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\
@@ -4155,7 +5256,8 @@ enum sparx5_target {
FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
+#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR,\
+ t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
#define PCS10G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -4230,7 +5332,8 @@ enum sparx5_target {
FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
+#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR,\
+ t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
#define PCS10G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -4251,7 +5354,8 @@ enum sparx5_target {
FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
+#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR,\
+ t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
#define PCS25G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -4326,7 +5430,8 @@ enum sparx5_target {
FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
+#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR,\
+ t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
#define PCS25G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -4347,7 +5452,8 @@ enum sparx5_target {
FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
+#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR,\
+ t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
#define PCS5G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -4422,7 +5528,8 @@ enum sparx5_target {
FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
+#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR,\
+ t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
#define PCS5G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -4443,7 +5550,8 @@ enum sparx5_target {
FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
/* PORT_CONF:HW_CFG:DEV5G_MODES */
-#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE BIT(0)
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\
@@ -4524,7 +5632,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
/* PORT_CONF:HW_CFG:DEV10G_MODES */
-#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
+#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE BIT(0)
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\
@@ -4599,7 +5708,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
/* PORT_CONF:HW_CFG:DEV25G_MODES */
-#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
+#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE BIT(0)
#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\
@@ -4650,7 +5760,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
/* PORT_CONF:HW_CFG:QSGMII_ENA */
-#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
+#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0 BIT(0)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\
@@ -4725,7 +5836,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
-#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
+#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF,\
+ 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM BIT(9)
#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\
@@ -4770,7 +5882,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
-#define PTP_PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
+#define PTP_PTP_PIN_INTR __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0)
#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\
@@ -4779,7 +5892,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x)
/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
-#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
+#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0)
#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\
@@ -4788,7 +5902,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
-#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
+#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0)
#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\
@@ -4797,7 +5912,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
-#define PTP_PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
+#define PTP_PTP_DOM_CFG __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9)
#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\
@@ -4824,10 +5940,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
-#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 0, r, 2, 4)
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 0, r, 2, 4)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
-#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
+#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0)
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\
@@ -4836,7 +5954,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
-#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
+#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0)
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\
@@ -4845,10 +5964,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
-#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
+#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
-#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
+#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0)
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\
@@ -4857,10 +5978,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
-#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
+#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
-#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
+#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26)
#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\
@@ -4917,7 +6040,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
-#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
+#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0)
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\
@@ -4926,10 +6050,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
-#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
+#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
-#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
+#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0)
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\
@@ -4938,7 +6064,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
-#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
+#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0)
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\
@@ -4947,10 +6074,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
-#define PTP_NTP_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
+#define PTP_NTP_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
-#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
+#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0)
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\
@@ -4959,7 +6088,8 @@ enum sparx5_target {
FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
-#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
+#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0)
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\
@@ -4968,7 +6098,8 @@ enum sparx5_target {
FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
-#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
+#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3)
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\
@@ -4983,7 +6114,8 @@ enum sparx5_target {
FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
-#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
+#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP,\
+ 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
#define PTP_PHAD_CTRL_PHAD_ENA BIT(7)
#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\
@@ -5010,10 +6142,12 @@ enum sparx5_target {
FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x)
/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
-#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
+#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP,\
+ 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
/* QFWD:SYSTEM:SWITCH_PORT_MODE */
-#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
+#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD,\
+ 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
#define QFWD_SWITCH_PORT_MODE_PORT_ENA BIT(19)
#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\
@@ -5070,7 +6204,8 @@ enum sparx5_target {
FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
/* QRES:RES_CTRL:RES_CFG */
-#define QRES_RES_CFG(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
+#define QRES_RES_CFG(g) __REG(TARGET_QRES,\
+ 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
#define QRES_RES_CFG_WM_HIGH GENMASK(11, 0)
#define QRES_RES_CFG_WM_HIGH_SET(x)\
@@ -5079,7 +6214,8 @@ enum sparx5_target {
FIELD_GET(QRES_RES_CFG_WM_HIGH, x)
/* QRES:RES_CTRL:RES_STAT */
-#define QRES_RES_STAT(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
+#define QRES_RES_STAT(g) __REG(TARGET_QRES,\
+ 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
#define QRES_RES_STAT_MAXUSE GENMASK(20, 0)
#define QRES_RES_STAT_MAXUSE_SET(x)\
@@ -5088,7 +6224,8 @@ enum sparx5_target {
FIELD_GET(QRES_RES_STAT_MAXUSE, x)
/* QRES:RES_CTRL:RES_STAT_CUR */
-#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
+#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES,\
+ 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
#define QRES_RES_STAT_CUR_INUSE GENMASK(20, 0)
#define QRES_RES_STAT_CUR_INUSE_SET(x)\
@@ -5097,7 +6234,8 @@ enum sparx5_target {
FIELD_GET(QRES_RES_STAT_CUR_INUSE, x)
/* DEVCPU_QS:XTR:XTR_GRP_CFG */
-#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
#define QS_XTR_GRP_CFG_MODE_SET(x)\
@@ -5118,10 +6256,12 @@ enum sparx5_target {
FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
/* DEVCPU_QS:XTR:XTR_RD */
-#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+#define QS_XTR_RD(r) __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
/* DEVCPU_QS:XTR:XTR_FLUSH */
-#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+#define QS_XTR_FLUSH __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
#define QS_XTR_FLUSH_FLUSH GENMASK(1, 0)
#define QS_XTR_FLUSH_FLUSH_SET(x)\
@@ -5130,7 +6270,8 @@ enum sparx5_target {
FIELD_GET(QS_XTR_FLUSH_FLUSH, x)
/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
-#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+#define QS_XTR_DATA_PRESENT __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
#define QS_XTR_DATA_PRESENT_DATA_PRESENT GENMASK(1, 0)
#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\
@@ -5139,7 +6280,8 @@ enum sparx5_target {
FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
/* DEVCPU_QS:INJ:INJ_GRP_CFG */
-#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
#define QS_INJ_GRP_CFG_MODE_SET(x)\
@@ -5154,10 +6296,12 @@ enum sparx5_target {
FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
/* DEVCPU_QS:INJ:INJ_WR */
-#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+#define QS_INJ_WR(r) __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
/* DEVCPU_QS:INJ:INJ_CTRL */
-#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+#define QS_INJ_CTRL(r) __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
@@ -5190,7 +6334,8 @@ enum sparx5_target {
FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
/* DEVCPU_QS:INJ:INJ_STATUS */
-#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+#define QS_INJ_STATUS __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
@@ -5211,7 +6356,8 @@ enum sparx5_target {
FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
/* QSYS:PAUSE_CFG:PAUSE_CFG */
-#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
+#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
#define QSYS_PAUSE_CFG_PAUSE_START GENMASK(25, 14)
#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\
@@ -5238,7 +6384,8 @@ enum sparx5_target {
FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
/* QSYS:PAUSE_CFG:ATOP */
-#define QSYS_ATOP(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
+#define QSYS_ATOP(r) __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
#define QSYS_ATOP_ATOP GENMASK(11, 0)
#define QSYS_ATOP_ATOP_SET(x)\
@@ -5247,7 +6394,8 @@ enum sparx5_target {
FIELD_GET(QSYS_ATOP_ATOP, x)
/* QSYS:PAUSE_CFG:FWD_PRESSURE */
-#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
+#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
#define QSYS_FWD_PRESSURE_FWD_PRESSURE GENMASK(11, 1)
#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\
@@ -5262,7 +6410,8 @@ enum sparx5_target {
FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */
-#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
+#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
#define QSYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(11, 0)
#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\
@@ -5271,7 +6420,8 @@ enum sparx5_target {
FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
/* QSYS:CALCFG:CAL_AUTO */
-#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
+#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS,\
+ 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
#define QSYS_CAL_AUTO_CAL_AUTO GENMASK(29, 0)
#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\
@@ -5280,7 +6430,8 @@ enum sparx5_target {
FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x)
/* QSYS:CALCFG:CAL_CTRL */
-#define QSYS_CAL_CTRL __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
+#define QSYS_CAL_CTRL __REG(TARGET_QSYS,\
+ 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
#define QSYS_CAL_CTRL_CAL_MODE GENMASK(14, 11)
#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\
@@ -5301,7 +6452,8 @@ enum sparx5_target {
FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
/* QSYS:RAM_CTRL:RAM_INIT */
-#define QSYS_RAM_INIT __REG(TARGET_QSYS, 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
+#define QSYS_RAM_INIT __REG(TARGET_QSYS,\
+ 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
#define QSYS_RAM_INIT_RAM_INIT BIT(1)
#define QSYS_RAM_INIT_RAM_INIT_SET(x)\
@@ -5316,7 +6468,8 @@ enum sparx5_target {
FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
/* REW:COMMON:OWN_UPSID */
-#define REW_OWN_UPSID(r) __REG(TARGET_REW, 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
+#define REW_OWN_UPSID(r) __REG(TARGET_REW,\
+ 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
#define REW_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define REW_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -5324,8 +6477,71 @@ enum sparx5_target {
#define REW_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x)
+/* REW:COMMON:RTAG_ETAG_CTRL */
+#define REW_RTAG_ETAG_CTRL(r) __REG(TARGET_REW,\
+ 0, 1, 387264, 0, 1, 1232, 560, r, 70, 4)
+
+#define REW_RTAG_ETAG_CTRL_IPE_TBL GENMASK(9, 3)
+#define REW_RTAG_ETAG_CTRL_IPE_TBL_SET(x)\
+ FIELD_PREP(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+#define REW_RTAG_ETAG_CTRL_IPE_TBL_GET(x)\
+ FIELD_GET(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+
+#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA GENMASK(2, 1)
+#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(x)\
+ FIELD_PREP(REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA, x)
+#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_GET(x)\
+ FIELD_GET(REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA, x)
+
+#define REW_RTAG_ETAG_CTRL_KEEP_ETAG BIT(0)
+#define REW_RTAG_ETAG_CTRL_KEEP_ETAG_SET(x)\
+ FIELD_PREP(REW_RTAG_ETAG_CTRL_KEEP_ETAG, x)
+#define REW_RTAG_ETAG_CTRL_KEEP_ETAG_GET(x)\
+ FIELD_GET(REW_RTAG_ETAG_CTRL_KEEP_ETAG, x)
+
+/* REW:COMMON:ES0_CTRL */
+#define REW_ES0_CTRL __REG(TARGET_REW,\
+ 0, 1, 387264, 0, 1, 1232, 852, 0, 1, 4)
+
+#define REW_ES0_CTRL_ES0_BY_RT_FWD BIT(5)
+#define REW_ES0_CTRL_ES0_BY_RT_FWD_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_BY_RT_FWD, x)
+#define REW_ES0_CTRL_ES0_BY_RT_FWD_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_BY_RT_FWD, x)
+
+#define REW_ES0_CTRL_ES0_BY_RLEG BIT(4)
+#define REW_ES0_CTRL_ES0_BY_RLEG_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_BY_RLEG, x)
+#define REW_ES0_CTRL_ES0_BY_RLEG_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_BY_RLEG, x)
+
+#define REW_ES0_CTRL_ES0_DPORT_ENA BIT(3)
+#define REW_ES0_CTRL_ES0_DPORT_ENA_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_DPORT_ENA, x)
+#define REW_ES0_CTRL_ES0_DPORT_ENA_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_DPORT_ENA, x)
+
+#define REW_ES0_CTRL_ES0_FRM_LBK_CFG BIT(2)
+#define REW_ES0_CTRL_ES0_FRM_LBK_CFG_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_FRM_LBK_CFG, x)
+#define REW_ES0_CTRL_ES0_FRM_LBK_CFG_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_FRM_LBK_CFG, x)
+
+#define REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA BIT(1)
+#define REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA, x)
+#define REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA, x)
+
+#define REW_ES0_CTRL_ES0_LU_ENA BIT(0)
+#define REW_ES0_CTRL_ES0_LU_ENA_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_LU_ENA, x)
+#define REW_ES0_CTRL_ES0_LU_ENA_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_LU_ENA, x)
+
/* REW:PORT:PORT_VLAN_CFG */
-#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
#define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(15, 13)
#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\
@@ -5345,8 +6561,49 @@ enum sparx5_target {
#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+/* REW:PORT:PCP_MAP_DE0 */
+#define REW_PCP_MAP_DE0(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 4, r, 8, 4)
+
+#define REW_PCP_MAP_DE0_PCP_DE0 GENMASK(2, 0)
+#define REW_PCP_MAP_DE0_PCP_DE0_SET(x)\
+ FIELD_PREP(REW_PCP_MAP_DE0_PCP_DE0, x)
+#define REW_PCP_MAP_DE0_PCP_DE0_GET(x)\
+ FIELD_GET(REW_PCP_MAP_DE0_PCP_DE0, x)
+
+/* REW:PORT:PCP_MAP_DE1 */
+#define REW_PCP_MAP_DE1(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 36, r, 8, 4)
+
+#define REW_PCP_MAP_DE1_PCP_DE1 GENMASK(2, 0)
+#define REW_PCP_MAP_DE1_PCP_DE1_SET(x)\
+ FIELD_PREP(REW_PCP_MAP_DE1_PCP_DE1, x)
+#define REW_PCP_MAP_DE1_PCP_DE1_GET(x)\
+ FIELD_GET(REW_PCP_MAP_DE1_PCP_DE1, x)
+
+/* REW:PORT:DEI_MAP_DE0 */
+#define REW_DEI_MAP_DE0(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 68, r, 8, 4)
+
+#define REW_DEI_MAP_DE0_DEI_DE0 BIT(0)
+#define REW_DEI_MAP_DE0_DEI_DE0_SET(x)\
+ FIELD_PREP(REW_DEI_MAP_DE0_DEI_DE0, x)
+#define REW_DEI_MAP_DE0_DEI_DE0_GET(x)\
+ FIELD_GET(REW_DEI_MAP_DE0_DEI_DE0, x)
+
+/* REW:PORT:DEI_MAP_DE1 */
+#define REW_DEI_MAP_DE1(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 100, r, 8, 4)
+
+#define REW_DEI_MAP_DE1_DEI_DE1 BIT(0)
+#define REW_DEI_MAP_DE1_DEI_DE1_SET(x)\
+ FIELD_PREP(REW_DEI_MAP_DE1_DEI_DE1, x)
+#define REW_DEI_MAP_DE1_DEI_DE1_GET(x)\
+ FIELD_GET(REW_DEI_MAP_DE1_DEI_DE1, x)
+
/* REW:PORT:TAG_CTRL */
-#define REW_TAG_CTRL(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
+#define REW_TAG_CTRL(g) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED BIT(13)
#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\
@@ -5384,8 +6641,25 @@ enum sparx5_target {
#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
+/* REW:PORT:DSCP_MAP */
+#define REW_DSCP_MAP(g) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 136, 0, 1, 4)
+
+#define REW_DSCP_MAP_DSCP_UPDATE_ENA BIT(1)
+#define REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(x)\
+ FIELD_PREP(REW_DSCP_MAP_DSCP_UPDATE_ENA, x)
+#define REW_DSCP_MAP_DSCP_UPDATE_ENA_GET(x)\
+ FIELD_GET(REW_DSCP_MAP_DSCP_UPDATE_ENA, x)
+
+#define REW_DSCP_MAP_DSCP_REMAP_ENA BIT(0)
+#define REW_DSCP_MAP_DSCP_REMAP_ENA_SET(x)\
+ FIELD_PREP(REW_DSCP_MAP_DSCP_REMAP_ENA, x)
+#define REW_DSCP_MAP_DSCP_REMAP_ENA_GET(x)\
+ FIELD_GET(REW_DSCP_MAP_DSCP_REMAP_ENA, x)
+
/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
-#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
+#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
@@ -5424,7 +6698,8 @@ enum sparx5_target {
FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
-#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
+#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0)
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
@@ -5433,7 +6708,8 @@ enum sparx5_target {
FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
-#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0)
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\
@@ -5442,13 +6718,16 @@ enum sparx5_target {
FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
-#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
+#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
-#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
+#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
-#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
+#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0)
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\
@@ -5457,7 +6736,8 @@ enum sparx5_target {
FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
-#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
+#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2)
#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\
@@ -5472,7 +6752,8 @@ enum sparx5_target {
FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
/* REW:RAM_CTRL:RAM_INIT */
-#define REW_RAM_INIT __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
+#define REW_RAM_INIT __REG(TARGET_REW,\
+ 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
#define REW_RAM_INIT_RAM_INIT BIT(1)
#define REW_RAM_INIT_RAM_INIT_SET(x)\
@@ -5486,8 +6767,333 @@ enum sparx5_target {
#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES0_CTRL __REG(TARGET_VCAP_ES0,\
+ 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES0_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_ES0_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_CMD, x)
+#define VCAP_ES0_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_CMD, x)
+
+#define VCAP_ES0_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_ES0_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_ES0_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_ES0_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_ES0_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_ES0_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_ES0_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_ADDR, x)
+#define VCAP_ES0_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_ES0_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_ES0_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_SHOT, x)
+#define VCAP_ES0_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_ES0_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_ES0_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_CLEAR_CACHE, x)
+#define VCAP_ES0_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES0_CFG __REG(TARGET_VCAP_ES0,\
+ 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES0_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_ES0_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CFG_MV_NUM_POS, x)
+#define VCAP_ES0_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_ES0_CFG_MV_NUM_POS, x)
+
+#define VCAP_ES0_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_ES0_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_ES0_CFG_MV_SIZE, x)
+#define VCAP_ES0_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_ES0_CFG_MV_SIZE, x)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES0_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES0_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES0_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES0_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES0_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES0_VCAP_TG_DAT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES0_IDX __REG(TARGET_VCAP_ES0,\
+ 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES0_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_ES0_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_ES0_IDX_CORE_IDX, x)
+#define VCAP_ES0_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_ES0_IDX_CORE_IDX, x)
+
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES0_MAP __REG(TARGET_VCAP_ES0,\
+ 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES0_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_ES0_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_ES0_MAP_CORE_MAP, x)
+#define VCAP_ES0_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_ES0_MAP_CORE_MAP, x)
+
+/* VCAP_ES0:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES0_VCAP_STICKY __REG(TARGET_VCAP_ES0,\
+ 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
+ FIELD_PREP(VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
+ FIELD_GET(VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+
+/* VCAP_ES0:VCAP_CONST:VCAP_VER */
+#define VCAP_ES0_VCAP_VER __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES0_ENTRY_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES0_ENTRY_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES0_ENTRY_SWCNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES0_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES0_ACTION_DEF_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES0_ACTION_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES0_CNT_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:CORE_CNT */
+#define VCAP_ES0_CORE_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:IF_CNT */
+#define VCAP_ES0_IF_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES2_CTRL __REG(TARGET_VCAP_ES2,\
+ 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES2_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_ES2_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_CMD, x)
+#define VCAP_ES2_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_CMD, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_ES2_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ADDR, x)
+#define VCAP_ES2_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_ES2_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_ES2_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_SHOT, x)
+#define VCAP_ES2_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_ES2_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_ES2_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_CLEAR_CACHE, x)
+#define VCAP_ES2_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES2_CFG __REG(TARGET_VCAP_ES2,\
+ 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES2_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_ES2_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CFG_MV_NUM_POS, x)
+#define VCAP_ES2_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_ES2_CFG_MV_NUM_POS, x)
+
+#define VCAP_ES2_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_ES2_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_ES2_CFG_MV_SIZE, x)
+#define VCAP_ES2_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_ES2_CFG_MV_SIZE, x)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES2_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES2_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES2_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES2_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES2_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES2_VCAP_TG_DAT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES2_IDX __REG(TARGET_VCAP_ES2,\
+ 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES2_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_ES2_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_ES2_IDX_CORE_IDX, x)
+#define VCAP_ES2_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_ES2_IDX_CORE_IDX, x)
+
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES2_MAP __REG(TARGET_VCAP_ES2,\
+ 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES2_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_ES2_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_ES2_MAP_CORE_MAP, x)
+#define VCAP_ES2_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_ES2_MAP_CORE_MAP, x)
+
+/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES2_VCAP_STICKY __REG(TARGET_VCAP_ES2,\
+ 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
+ FIELD_PREP(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
+ FIELD_GET(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+
+/* VCAP_ES2:VCAP_CONST:VCAP_VER */
+#define VCAP_ES2_VCAP_VER __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES2_ENTRY_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES2_ENTRY_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES2_ENTRY_SWCNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES2_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES2_ACTION_DEF_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES2_ACTION_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES2_CNT_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:CORE_CNT */
+#define VCAP_ES2_CORE_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:IF_CNT */
+#define VCAP_ES2_IF_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+
/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
-#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_SUPER_CTRL_UPDATE_CMD GENMASK(24, 22)
#define VCAP_SUPER_CTRL_UPDATE_CMD_SET(x)\
@@ -5538,7 +7144,8 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x)
/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */
-#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_SUPER_CFG_MV_NUM_POS GENMASK(31, 16)
#define VCAP_SUPER_CFG_MV_NUM_POS_SET(x)\
@@ -5553,25 +7160,32 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_CFG_MV_SIZE, x)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
-#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */
-#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
-#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */
-#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
-#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */
-#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */
-#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_SUPER_IDX_CORE_IDX GENMASK(3, 0)
#define VCAP_SUPER_IDX_CORE_IDX_SET(x)\
@@ -5580,7 +7194,8 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_IDX_CORE_IDX, x)
/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */
-#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_SUPER_MAP_CORE_MAP GENMASK(2, 0)
#define VCAP_SUPER_MAP_CORE_MAP_SET(x)\
@@ -5589,37 +7204,48 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_MAP_CORE_MAP, x)
/* VCAP_SUPER:VCAP_CONST:VCAP_VER */
-#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */
-#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */
-#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */
-#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */
-#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */
-#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */
-#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */
-#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:CORE_CNT */
-#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:IF_CNT */
-#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
/* VCAP_SUPER:RAM_CTRL:RAM_INIT */
-#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
+#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
#define VCAP_SUPER_RAM_INIT_RAM_INIT BIT(1)
#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\
@@ -5634,7 +7260,8 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
/* VOP:RAM_CTRL:RAM_INIT */
-#define VOP_RAM_INIT __REG(TARGET_VOP, 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
+#define VOP_RAM_INIT __REG(TARGET_VOP,\
+ 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
#define VOP_RAM_INIT_RAM_INIT BIT(1)
#define VOP_RAM_INIT_RAM_INIT_SET(x)\
@@ -5649,7 +7276,8 @@ enum sparx5_target {
FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x)
/* XQS:SYSTEM:STAT_CFG */
-#define XQS_STAT_CFG __REG(TARGET_XQS, 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
+#define XQS_STAT_CFG __REG(TARGET_XQS,\
+ 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
#define XQS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(21, 18)
#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\
@@ -5676,7 +7304,8 @@ enum sparx5_target {
FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x)
/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
-#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
+#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP GENMASK(14, 0)
#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\
@@ -5685,7 +7314,8 @@ enum sparx5_target {
FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
-#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
+#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP GENMASK(14, 0)
#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\
@@ -5694,7 +7324,8 @@ enum sparx5_target {
FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
-#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
+#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP GENMASK(14, 0)
#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\
@@ -5703,7 +7334,8 @@ enum sparx5_target {
FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
-#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
+#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM GENMASK(14, 0)
#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\
@@ -5712,6 +7344,7 @@ enum sparx5_target {
FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
/* XQS:STAT:CNT */
-#define XQS_CNT(g) __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+#define XQS_CNT(g) __REG(TARGET_XQS,\
+ 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
new file mode 100644
index 000000000000..8ada5cee1342
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static int sparx5_policer_service_conf_set(struct sparx5 *sparx5,
+ struct sparx5_policer *pol)
+{
+ u32 idx, pup_tokens, max_pup_tokens, burst, thres;
+ struct sparx5_sdlb_group *g;
+ u64 rate;
+
+ g = &sdlb_groups[pol->group];
+ idx = pol->idx;
+
+ rate = pol->rate * 1000;
+ burst = pol->burst;
+
+ pup_tokens = sparx5_sdlb_pup_token_get(sparx5, g->pup_interval, rate);
+ max_pup_tokens =
+ sparx5_sdlb_pup_token_get(sparx5, g->pup_interval, g->max_rate);
+
+ thres = DIV_ROUND_UP(burst, g->min_burst);
+
+ spx5_wr(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(pup_tokens), sparx5,
+ ANA_AC_SDLB_PUP_TOKENS(idx, 0));
+
+ spx5_rmw(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(max_pup_tokens),
+ ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, sparx5,
+ ANA_AC_SDLB_INH_CTRL(idx, 0));
+
+ spx5_rmw(ANA_AC_SDLB_THRES_THRES_SET(thres), ANA_AC_SDLB_THRES_THRES,
+ sparx5, ANA_AC_SDLB_THRES(idx, 0));
+
+ return 0;
+}
+
+int sparx5_policer_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol)
+{
+ /* More policer types will be added later */
+ switch (pol->type) {
+ case SPX5_POL_SERVICE:
+ return sparx5_policer_service_conf_set(sparx5, pol);
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c
new file mode 100644
index 000000000000..b4b280c6138b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static u32 sparx5_pool_id_to_idx(u32 id)
+{
+ return --id;
+}
+
+u32 sparx5_pool_idx_to_id(u32 idx)
+{
+ return ++idx;
+}
+
+/* Release resource from pool.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_put(struct sparx5_pool_entry *pool, int size, u32 id)
+{
+ struct sparx5_pool_entry *e_itr;
+
+ e_itr = (pool + sparx5_pool_id_to_idx(id));
+ if (e_itr->ref_cnt == 0)
+ return -EINVAL;
+
+ return --e_itr->ref_cnt;
+}
+
+/* Get resource from pool.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id)
+{
+ struct sparx5_pool_entry *e_itr;
+ int i;
+
+ for (i = 0, e_itr = pool; i < size; i++, e_itr++) {
+ if (e_itr->ref_cnt == 0) {
+ *id = sparx5_pool_idx_to_id(i);
+ return ++e_itr->ref_cnt;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/* Get resource from pool that matches index.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
+ u32 *id)
+{
+ struct sparx5_pool_entry *e_itr;
+ int i, ret = -ENOSPC;
+
+ for (i = 0, e_itr = pool; i < size; i++, e_itr++) {
+ /* Pool index of first free entry */
+ if (e_itr->ref_cnt == 0 && ret == -ENOSPC)
+ ret = i;
+ /* Tc index already in use ? */
+ if (e_itr->idx == idx && e_itr->ref_cnt > 0) {
+ ret = i;
+ break;
+ }
+ }
+
+ /* Did we find a free entry? */
+ if (ret >= 0) {
+ *id = sparx5_pool_idx_to_id(ret);
+ e_itr = (pool + ret);
+ e_itr->idx = idx;
+ return ++e_itr->ref_cnt;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 107b9cd931c0..3a1b1a1f5a19 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -1071,6 +1071,11 @@ int sparx5_port_init(struct sparx5 *sparx5,
/* Discard pause frame 01-80-C2-00-00-01 */
spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
+ /* Discard SMAC multicast */
+ spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
+ ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
+ sparx5, ANA_CL_FILTER_CTRL(port->portno));
+
if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
conf->portmode == PHY_INTERFACE_MODE_SGMII) {
err = sparx5_serdes_set(sparx5, port, conf);
@@ -1151,11 +1156,69 @@ int sparx5_port_qos_set(struct sparx5_port *port,
{
sparx5_port_qos_dscp_set(port, &qos->dscp);
sparx5_port_qos_pcp_set(port, &qos->pcp);
+ sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
+ sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
sparx5_port_qos_default_set(port, qos);
return 0;
}
+int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp_rewr *qos)
+{
+ int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 pcp, dei;
+
+ /* Use mapping table, with classified QoS as index, to map QoS and DP
+ * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
+ * PCP. Classified PCP equals frame PCP.
+ */
+ if (qos->enable)
+ mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
+
+ spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
+ REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
+ REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
+ port->sparx5, REW_TAG_CTRL(port->portno));
+
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ /* Extract PCP and DEI */
+ pcp = qos->map.map[i];
+ if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
+ dei = 1;
+ else
+ dei = 0;
+
+ /* Rewrite PCP and DEI, for each classified QoS class and DP
+ * level. This table is only used if tag ctrl mode is set to
+ * 'mapped'.
+ *
+ * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0
+ * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1
+ */
+ if (dei) {
+ spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
+ REW_PCP_MAP_DE1_PCP_DE1, sparx5,
+ REW_PCP_MAP_DE1(port->portno, i));
+
+ spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
+ REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
+ REW_DEI_MAP_DE1(port->portno, i));
+ } else {
+ spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
+ REW_PCP_MAP_DE0_PCP_DE0, sparx5,
+ REW_PCP_MAP_DE0(port->portno, i));
+
+ spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
+ REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
+ REW_DEI_MAP_DE0(port->portno, i));
+ }
+ }
+
+ return 0;
+}
+
int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
struct sparx5_port_qos_pcp *qos)
{
@@ -1184,6 +1247,45 @@ int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
return 0;
}
+void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
+ int mode)
+{
+ spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
+ ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
+ ANA_CL_QOS_CFG(port->portno));
+}
+
+int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp_rewr *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ bool rewr = false;
+ u16 dscp;
+ int i;
+
+ /* On egress, rewrite DSCP value to either classified DSCP or frame
+ * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
+ */
+ if (qos->enable)
+ rewr = true;
+
+ spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
+ REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
+ REW_DSCP_MAP(port->portno));
+
+ /* On ingress, map each classified QoS class and DP to classified DSCP
+ * value. This mapping table is global for all ports.
+ */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ dscp = qos->map.map[i];
+ spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
+ ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
+ ANA_CL_QOS_MAP_CFG(i));
+ }
+
+ return 0;
+}
+
int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
struct sparx5_port_qos_dscp *qos)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index fbafe22e25cc..607c4ff1df6b 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -9,6 +9,17 @@
#include "sparx5_main.h"
+/* Port PCP rewrite mode */
+#define SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED 0
+#define SPARX5_PORT_REW_TAG_CTRL_DEFAULT 1
+#define SPARX5_PORT_REW_TAG_CTRL_MAPPED 2
+
+/* Port DSCP rewrite mode */
+#define SPARX5_PORT_REW_DSCP_NONE 0
+#define SPARX5_PORT_REW_DSCP_IF_ZERO 1
+#define SPARX5_PORT_REW_DSCP_SELECTED 2
+#define SPARX5_PORT_REW_DSCP_ALL 3
+
static inline bool sparx5_port_is_2g5(int portno)
{
return portno >= 16 && portno <= 47;
@@ -99,6 +110,15 @@ struct sparx5_port_qos_pcp_map {
u8 map[SPARX5_PORT_QOS_PCP_DEI_COUNT];
};
+struct sparx5_port_qos_pcp_rewr_map {
+ u16 map[SPX5_PRIOS];
+};
+
+#define SPARX5_PORT_QOS_DP_NUM 4
+struct sparx5_port_qos_dscp_rewr_map {
+ u16 map[SPX5_PRIOS * SPARX5_PORT_QOS_DP_NUM];
+};
+
#define SPARX5_PORT_QOS_DSCP_COUNT 64
struct sparx5_port_qos_dscp_map {
u8 map[SPARX5_PORT_QOS_DSCP_COUNT];
@@ -110,15 +130,27 @@ struct sparx5_port_qos_pcp {
bool dp_enable;
};
+struct sparx5_port_qos_pcp_rewr {
+ struct sparx5_port_qos_pcp_rewr_map map;
+ bool enable;
+};
+
struct sparx5_port_qos_dscp {
struct sparx5_port_qos_dscp_map map;
bool qos_enable;
bool dp_enable;
};
+struct sparx5_port_qos_dscp_rewr {
+ struct sparx5_port_qos_dscp_rewr_map map;
+ bool enable;
+};
+
struct sparx5_port_qos {
struct sparx5_port_qos_pcp pcp;
+ struct sparx5_port_qos_pcp_rewr pcp_rewr;
struct sparx5_port_qos_dscp dscp;
+ struct sparx5_port_qos_dscp_rewr dscp_rewr;
u8 default_prio;
};
@@ -127,9 +159,18 @@ int sparx5_port_qos_set(struct sparx5_port *port, struct sparx5_port_qos *qos);
int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
struct sparx5_port_qos_pcp *qos);
+int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp_rewr *qos);
+
int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
struct sparx5_port_qos_dscp *qos);
+void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
+ int mode);
+
+int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp_rewr *qos);
+
int sparx5_port_qos_default_set(const struct sparx5_port *port,
const struct sparx5_port_qos *qos);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
new file mode 100644
index 000000000000..8dee1ab1fa75
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define SPX5_PSFP_SF_CNT 1024
+#define SPX5_PSFP_SG_CONFIG_CHANGE_SLEEP 1000
+#define SPX5_PSFP_SG_CONFIG_CHANGE_TIMEO 100000
+
+/* Pool of available service policers */
+static struct sparx5_pool_entry sparx5_psfp_fm_pool[SPX5_SDLB_CNT];
+
+/* Pool of available stream gates */
+static struct sparx5_pool_entry sparx5_psfp_sg_pool[SPX5_PSFP_SG_CNT];
+
+/* Pool of available stream filters */
+static struct sparx5_pool_entry sparx5_psfp_sf_pool[SPX5_PSFP_SF_CNT];
+
+static int sparx5_psfp_sf_get(u32 *id)
+{
+ return sparx5_pool_get(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+}
+
+static int sparx5_psfp_sf_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+}
+
+static int sparx5_psfp_sg_get(u32 idx, u32 *id)
+{
+ return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT,
+ idx, id);
+}
+
+static int sparx5_psfp_sg_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT, id);
+}
+
+static int sparx5_psfp_fm_get(u32 idx, u32 *id)
+{
+ return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, idx,
+ id);
+}
+
+static int sparx5_psfp_fm_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, id);
+}
+
+u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx)
+{
+ return ANA_L2_TSN_CFG_TSN_SFID_GET(spx5_rd(sparx5,
+ ANA_L2_TSN_CFG(isdx)));
+}
+
+u32 sparx5_psfp_isdx_get_fm(struct sparx5 *sparx5, u32 isdx)
+{
+ return ANA_L2_DLB_CFG_DLB_IDX_GET(spx5_rd(sparx5,
+ ANA_L2_DLB_CFG(isdx)));
+}
+
+u32 sparx5_psfp_sf_get_sg(struct sparx5 *sparx5, u32 sfid)
+{
+ return ANA_AC_TSN_SF_CFG_TSN_SGID_GET(spx5_rd(sparx5,
+ ANA_AC_TSN_SF_CFG(sfid)));
+}
+
+void sparx5_isdx_conf_set(struct sparx5 *sparx5, u32 isdx, u32 sfid, u32 fmid)
+{
+ spx5_rmw(ANA_L2_TSN_CFG_TSN_SFID_SET(sfid), ANA_L2_TSN_CFG_TSN_SFID,
+ sparx5, ANA_L2_TSN_CFG(isdx));
+
+ spx5_rmw(ANA_L2_DLB_CFG_DLB_IDX_SET(fmid), ANA_L2_DLB_CFG_DLB_IDX,
+ sparx5, ANA_L2_DLB_CFG(isdx));
+}
+
+/* Internal priority value to internal priority selector */
+static u32 sparx5_psfp_ipv_to_ips(s32 ipv)
+{
+ return ipv > 0 ? (ipv | BIT(3)) : 0;
+}
+
+static int sparx5_psfp_sgid_get_status(struct sparx5 *sparx5)
+{
+ return spx5_rd(sparx5, ANA_AC_SG_ACCESS_CTRL);
+}
+
+static int sparx5_psfp_sgid_wait_for_completion(struct sparx5 *sparx5)
+{
+ u32 val;
+
+ return readx_poll_timeout(sparx5_psfp_sgid_get_status, sparx5, val,
+ !ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(val),
+ SPX5_PSFP_SG_CONFIG_CHANGE_SLEEP,
+ SPX5_PSFP_SG_CONFIG_CHANGE_TIMEO);
+}
+
+static void sparx5_psfp_sg_config_change(struct sparx5 *sparx5, u32 id)
+{
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_SGID_SET(id), sparx5,
+ ANA_AC_SG_ACCESS_CTRL);
+
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(1) |
+ ANA_AC_SG_ACCESS_CTRL_SGID_SET(id),
+ sparx5, ANA_AC_SG_ACCESS_CTRL);
+
+ if (sparx5_psfp_sgid_wait_for_completion(sparx5) < 0)
+ pr_debug("%s:%d timed out waiting for sgid completion",
+ __func__, __LINE__);
+}
+
+static void sparx5_psfp_sf_set(struct sparx5 *sparx5, u32 id,
+ const struct sparx5_psfp_sf *sf)
+{
+ /* Configure stream gate*/
+ spx5_rmw(ANA_AC_TSN_SF_CFG_TSN_SGID_SET(sf->sgid) |
+ ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(sf->max_sdu) |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_SET(sf->sblock_osize) |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_SET(sf->sblock_osize_ena),
+ ANA_AC_TSN_SF_CFG_TSN_SGID | ANA_AC_TSN_SF_CFG_TSN_MAX_SDU |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA,
+ sparx5, ANA_AC_TSN_SF_CFG(id));
+}
+
+static int sparx5_psfp_sg_set(struct sparx5 *sparx5, u32 id,
+ const struct sparx5_psfp_sg *sg)
+{
+ u32 ips, base_lsb, base_msb, accum_time_interval = 0;
+ const struct sparx5_psfp_gce *gce;
+ int i;
+
+ ips = sparx5_psfp_ipv_to_ips(sg->ipv);
+ base_lsb = sg->basetime.tv_sec & 0xffffffff;
+ base_msb = sg->basetime.tv_sec >> 32;
+
+ /* Set stream gate id */
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_SGID_SET(id), sparx5,
+ ANA_AC_SG_ACCESS_CTRL);
+
+ /* Write AdminPSFP values */
+ spx5_wr(sg->basetime.tv_nsec, sparx5, ANA_AC_SG_CONFIG_REG_1);
+ spx5_wr(base_lsb, sparx5, ANA_AC_SG_CONFIG_REG_2);
+
+ spx5_rmw(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(base_msb) |
+ ANA_AC_SG_CONFIG_REG_3_INIT_IPS_SET(ips) |
+ ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_SET(sg->num_entries) |
+ ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_SET(sg->gate_state) |
+ ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_SET(1),
+ ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB |
+ ANA_AC_SG_CONFIG_REG_3_INIT_IPS |
+ ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH |
+ ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE,
+ sparx5, ANA_AC_SG_CONFIG_REG_3);
+
+ spx5_wr(sg->cycletime, sparx5, ANA_AC_SG_CONFIG_REG_4);
+ spx5_wr(sg->cycletimeext, sparx5, ANA_AC_SG_CONFIG_REG_5);
+
+ /* For each scheduling entry */
+ for (i = 0; i < sg->num_entries; i++) {
+ gce = &sg->gce[i];
+ ips = sparx5_psfp_ipv_to_ips(gce->ipv);
+ /* hardware needs TimeInterval to be cumulative */
+ accum_time_interval += gce->interval;
+ /* Set gate state */
+ spx5_wr(ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(ips) |
+ ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_SET(gce->gate_state),
+ sparx5, ANA_AC_SG_GCL_GS_CONFIG(i));
+
+ /* Set time interval */
+ spx5_wr(accum_time_interval, sparx5,
+ ANA_AC_SG_GCL_TI_CONFIG(i));
+
+ /* Set maximum octets */
+ spx5_wr(gce->maxoctets, sparx5, ANA_AC_SG_GCL_OCT_CONFIG(i));
+ }
+
+ return 0;
+}
+
+static int sparx5_sdlb_conf_set(struct sparx5 *sparx5,
+ struct sparx5_psfp_fm *fm)
+{
+ int (*sparx5_sdlb_group_action)(struct sparx5 *sparx5, u32 group,
+ u32 idx);
+
+ if (!fm->pol.rate && !fm->pol.burst)
+ sparx5_sdlb_group_action = &sparx5_sdlb_group_del;
+ else
+ sparx5_sdlb_group_action = &sparx5_sdlb_group_add;
+
+ sparx5_policer_conf_set(sparx5, &fm->pol);
+
+ return sparx5_sdlb_group_action(sparx5, fm->pol.group, fm->pol.idx);
+}
+
+int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
+ u32 *id)
+{
+ int ret;
+
+ ret = sparx5_psfp_sf_get(id);
+ if (ret < 0)
+ return ret;
+
+ sparx5_psfp_sf_set(sparx5, *id, sf);
+
+ return 0;
+}
+
+int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id)
+{
+ const struct sparx5_psfp_sf sf = { 0 };
+
+ sparx5_psfp_sf_set(sparx5, id, &sf);
+
+ return sparx5_psfp_sf_put(id);
+}
+
+int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_sg *sg, u32 *id)
+{
+ ktime_t basetime;
+ int ret;
+
+ ret = sparx5_psfp_sg_get(uidx, id);
+ if (ret < 0)
+ return ret;
+ /* Was already in use, no need to reconfigure */
+ if (ret > 1)
+ return 0;
+
+ /* Calculate basetime for this stream gate */
+ sparx5_new_base_time(sparx5, sg->cycletime, 0, &basetime);
+ sg->basetime = ktime_to_timespec64(basetime);
+
+ sparx5_psfp_sg_set(sparx5, *id, sg);
+
+ /* Signal hardware to copy AdminPSFP values into OperPSFP values */
+ sparx5_psfp_sg_config_change(sparx5, *id);
+
+ return 0;
+}
+
+int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id)
+{
+ const struct sparx5_psfp_sg sg = { 0 };
+ int ret;
+
+ ret = sparx5_psfp_sg_put(id);
+ if (ret < 0)
+ return ret;
+ /* Stream gate still in use ? */
+ if (ret > 0)
+ return 0;
+
+ return sparx5_psfp_sg_set(sparx5, id, &sg);
+}
+
+int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_fm *fm, u32 *id)
+{
+ struct sparx5_policer *pol = &fm->pol;
+ int ret;
+
+ /* Get flow meter */
+ ret = sparx5_psfp_fm_get(uidx, &fm->pol.idx);
+ if (ret < 0)
+ return ret;
+ /* Was already in use, no need to reconfigure */
+ if (ret > 1)
+ return 0;
+
+ ret = sparx5_sdlb_group_get_by_rate(sparx5, pol->rate, pol->burst);
+ if (ret < 0)
+ return ret;
+
+ fm->pol.group = ret;
+
+ ret = sparx5_sdlb_conf_set(sparx5, fm);
+ if (ret < 0)
+ return ret;
+
+ *id = fm->pol.idx;
+
+ return 0;
+}
+
+int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id)
+{
+ struct sparx5_psfp_fm fm = { .pol.idx = id,
+ .pol.type = SPX5_POL_SERVICE };
+ int ret;
+
+ /* Find the group that this lb belongs to */
+ ret = sparx5_sdlb_group_get_by_index(sparx5, id, &fm.pol.group);
+ if (ret < 0)
+ return ret;
+
+ ret = sparx5_psfp_fm_put(id);
+ if (ret < 0)
+ return ret;
+ /* Do not reset flow-meter if still in use. */
+ if (ret > 0)
+ return 0;
+
+ return sparx5_sdlb_conf_set(sparx5, &fm);
+}
+
+void sparx5_psfp_init(struct sparx5 *sparx5)
+{
+ const struct sparx5_sdlb_group *group;
+ int i;
+
+ for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ group = &sdlb_groups[i];
+ sparx5_sdlb_group_init(sparx5, group->max_rate,
+ group->min_burst, group->frame_size, i);
+ }
+
+ spx5_wr(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_SET(1),
+ sparx5, ANA_AC_SG_CYCLETIME_UPDATE_PERIOD);
+
+ spx5_rmw(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_SET(1),
+ ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, sparx5, ANA_L2_FWD_CFG);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 69e76634f9aa..0edb98cef7e4 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -476,8 +476,7 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
return 0;
}
-static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
struct sparx5 *sparx5 = phc->sparx5;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
index 379e540e5e6a..5f34febaee6b 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -9,6 +9,63 @@
#include "sparx5_main.h"
#include "sparx5_qos.h"
+/* Calculate new base_time based on cycle_time.
+ *
+ * The hardware requires a base_time that is always in the future.
+ * We define threshold_time as current_time + (2 * cycle_time).
+ * If base_time is below threshold_time this function recalculates it to be in
+ * the interval:
+ * threshold_time <= base_time < (threshold_time + cycle_time)
+ *
+ * A very simple algorithm could be like this:
+ * new_base_time = org_base_time + N * cycle_time
+ * using the lowest N so (new_base_time >= threshold_time
+ */
+void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
+ const ktime_t org_base_time, ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time, new_time;
+ struct timespec64 ts;
+ u64 nr_of_cycles_p2;
+ u64 nr_of_cycles;
+ u64 diff_time;
+
+ new_time = org_base_time;
+
+ sparx5_ptp_gettime64(&sparx5->phc[SPARX5_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+ diff_time = threshold_time - new_time;
+ nr_of_cycles = div_u64(diff_time, cycle_time);
+ nr_of_cycles_p2 = 1; /* Use 2^0 as start value */
+
+ if (new_time >= threshold_time) {
+ *new_base_time = new_time;
+ return;
+ }
+
+ /* Calculate the smallest power of 2 (nr_of_cycles_p2)
+ * that is larger than nr_of_cycles.
+ */
+ while (nr_of_cycles_p2 < nr_of_cycles)
+ nr_of_cycles_p2 <<= 1; /* Next (higher) power of 2 */
+
+ /* Add as big chunks (power of 2 * cycle_time)
+ * as possible for each power of 2
+ */
+ while (nr_of_cycles_p2) {
+ if (new_time < threshold_time) {
+ new_time += cycle_time * nr_of_cycles_p2;
+ while (new_time < threshold_time)
+ new_time += cycle_time * nr_of_cycles_p2;
+ new_time -= cycle_time * nr_of_cycles_p2;
+ }
+ nr_of_cycles_p2 >>= 1; /* Next (lower) power of 2 */
+ }
+ new_time += cycle_time;
+ *new_base_time = new_time;
+}
+
/* Max rates for leak groups */
static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
1048568, /* 1.049 Gbps */
@@ -393,6 +450,8 @@ int sparx5_qos_init(struct sparx5 *sparx5)
if (ret < 0)
return ret;
+ sparx5_psfp_init(sparx5);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
new file mode 100644
index 000000000000..f5267218caeb
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT] = {
+ { SPX5_SDLB_GROUP_RATE_MAX, 8192 / 1, 64 }, /* 25 G */
+ { 15000000000ULL, 8192 / 1, 64 }, /* 15 G */
+ { 10000000000ULL, 8192 / 1, 64 }, /* 10 G */
+ { 5000000000ULL, 8192 / 1, 64 }, /* 5 G */
+ { 2500000000ULL, 8192 / 1, 64 }, /* 2.5 G */
+ { 1000000000ULL, 8192 / 2, 64 }, /* 1 G */
+ { 500000000ULL, 8192 / 2, 64 }, /* 500 M */
+ { 100000000ULL, 8192 / 4, 64 }, /* 100 M */
+ { 50000000ULL, 8192 / 4, 64 }, /* 50 M */
+ { 5000000ULL, 8192 / 8, 64 } /* 5 M */
+};
+
+int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5)
+{
+ u32 clk_per_100ps;
+ u64 clk_hz;
+
+ clk_per_100ps = HSCH_SYS_CLK_PER_100PS_GET(spx5_rd(sparx5,
+ HSCH_SYS_CLK_PER));
+ if (!clk_per_100ps)
+ clk_per_100ps = SPX5_CLK_PER_100PS_DEFAULT;
+
+ clk_hz = (10 * 1000 * 1000) / clk_per_100ps;
+ return clk_hz *= 1000;
+}
+
+static int sparx5_sdlb_pup_interval_get(struct sparx5 *sparx5, u32 max_token,
+ u64 max_rate)
+{
+ u64 clk_hz;
+
+ clk_hz = sparx5_sdlb_clk_hz_get(sparx5);
+
+ return div64_u64((8 * clk_hz * max_token), max_rate);
+}
+
+int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval, u64 rate)
+{
+ u64 clk_hz;
+
+ if (!rate)
+ return SPX5_SDLB_PUP_TOKEN_DISABLE;
+
+ clk_hz = sparx5_sdlb_clk_hz_get(sparx5);
+
+ return DIV64_U64_ROUND_UP((rate * pup_interval), (clk_hz * 8));
+}
+
+static void sparx5_sdlb_group_disable(struct sparx5 *sparx5, u32 group)
+{
+ spx5_rmw(ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(0),
+ ANA_AC_SDLB_PUP_CTRL_PUP_ENA, sparx5,
+ ANA_AC_SDLB_PUP_CTRL(group));
+}
+
+static void sparx5_sdlb_group_enable(struct sparx5 *sparx5, u32 group)
+{
+ spx5_rmw(ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(1),
+ ANA_AC_SDLB_PUP_CTRL_PUP_ENA, sparx5,
+ ANA_AC_SDLB_PUP_CTRL(group));
+}
+
+static u32 sparx5_sdlb_group_get_first(struct sparx5 *sparx5, u32 group)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_XLB_START(group));
+
+ return ANA_AC_SDLB_XLB_START_LBSET_START_GET(val);
+}
+
+static u32 sparx5_sdlb_group_get_next(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_XLB_NEXT(lb));
+
+ return ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(val);
+}
+
+static bool sparx5_sdlb_group_is_first(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ return lb == sparx5_sdlb_group_get_first(sparx5, group);
+}
+
+static bool sparx5_sdlb_group_is_last(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ return lb == sparx5_sdlb_group_get_next(sparx5, group, lb);
+}
+
+static bool sparx5_sdlb_group_is_empty(struct sparx5 *sparx5, u32 group)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_PUP_CTRL(group));
+
+ return ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(val) == 0;
+}
+
+static u32 sparx5_sdlb_group_get_last(struct sparx5 *sparx5, u32 group)
+{
+ u32 itr, next;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, group);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+ if (itr == next)
+ return itr;
+
+ itr = next;
+ }
+}
+
+static bool sparx5_sdlb_group_is_singular(struct sparx5 *sparx5, u32 group)
+{
+ if (sparx5_sdlb_group_is_empty(sparx5, group))
+ return false;
+
+ return sparx5_sdlb_group_get_first(sparx5, group) ==
+ sparx5_sdlb_group_get_last(sparx5, group);
+}
+
+static int sparx5_sdlb_group_get_adjacent(struct sparx5 *sparx5, u32 group,
+ u32 idx, u32 *prev, u32 *next,
+ u32 *first)
+{
+ u32 itr;
+
+ *first = sparx5_sdlb_group_get_first(sparx5, group);
+ *prev = *first;
+ *next = *first;
+ itr = *first;
+
+ for (;;) {
+ *next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+
+ if (itr == idx)
+ return 0; /* Found it */
+
+ if (itr == *next)
+ return -EINVAL; /* Was not found */
+
+ *prev = itr;
+ itr = *next;
+ }
+}
+
+static int sparx5_sdlb_group_get_count(struct sparx5 *sparx5, u32 group)
+{
+ u32 itr, next;
+ int count = 0;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, group);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+ if (itr == next)
+ return count;
+
+ itr = next;
+ count++;
+ }
+}
+
+int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst)
+{
+ const struct sparx5_sdlb_group *group;
+ u64 rate_bps;
+ int i, count;
+
+ rate_bps = rate * 1000;
+
+ for (i = SPX5_SDLB_GROUP_CNT - 1; i >= 0; i--) {
+ group = &sdlb_groups[i];
+
+ count = sparx5_sdlb_group_get_count(sparx5, i);
+
+ /* Check that this group is not full.
+ * According to LB group configuration rules: the number of XLBs
+ * in a group must not exceed PUP_INTERVAL/4 - 1.
+ */
+ if (count > ((group->pup_interval / 4) - 1))
+ continue;
+
+ if (rate_bps < group->max_rate)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group)
+{
+ u32 itr, next;
+ int i;
+
+ for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ if (sparx5_sdlb_group_is_empty(sparx5, i))
+ continue;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, i);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, i, itr);
+
+ if (itr == idx) {
+ *group = i;
+ return 0; /* Found it */
+ }
+ if (itr == next)
+ break; /* Was not found */
+
+ itr = next;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int sparx5_sdlb_group_link(struct sparx5 *sparx5, u32 group, u32 idx,
+ u32 first, u32 next, bool empty)
+{
+ /* Stop leaking */
+ sparx5_sdlb_group_disable(sparx5, group);
+
+ if (empty)
+ return 0;
+
+ /* Link insertion lb to next lb */
+ spx5_wr(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(next) |
+ ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(group),
+ sparx5, ANA_AC_SDLB_XLB_NEXT(idx));
+
+ /* Set the first lb */
+ spx5_wr(ANA_AC_SDLB_XLB_START_LBSET_START_SET(first), sparx5,
+ ANA_AC_SDLB_XLB_START(group));
+
+ /* Start leaking */
+ sparx5_sdlb_group_enable(sparx5, group);
+
+ return 0;
+};
+
+int sparx5_sdlb_group_add(struct sparx5 *sparx5, u32 group, u32 idx)
+{
+ u32 first, next;
+
+ /* We always add to head of the list */
+ first = idx;
+
+ if (sparx5_sdlb_group_is_empty(sparx5, group))
+ next = idx;
+ else
+ next = sparx5_sdlb_group_get_first(sparx5, group);
+
+ return sparx5_sdlb_group_link(sparx5, group, idx, first, next, false);
+}
+
+int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx)
+{
+ u32 first, next, prev;
+ bool empty = false;
+
+ if (sparx5_sdlb_group_get_adjacent(sparx5, group, idx, &prev, &next,
+ &first) < 0) {
+ pr_err("%s:%d Could not find idx: %d in group: %d", __func__,
+ __LINE__, idx, group);
+ return -EINVAL;
+ }
+
+ if (sparx5_sdlb_group_is_singular(sparx5, group)) {
+ empty = true;
+ } else if (sparx5_sdlb_group_is_last(sparx5, group, idx)) {
+ /* idx is removed, prev is now last */
+ idx = prev;
+ next = prev;
+ } else if (sparx5_sdlb_group_is_first(sparx5, group, idx)) {
+ /* idx is removed and points to itself, first is next */
+ first = next;
+ next = idx;
+ } else {
+ /* Next is not touched */
+ idx = prev;
+ }
+
+ return sparx5_sdlb_group_link(sparx5, group, idx, first, next, empty);
+}
+
+void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
+ u32 frame_size, u32 idx)
+{
+ u32 thres_shift, mask = 0x01, power = 0;
+ struct sparx5_sdlb_group *group;
+ u64 max_token;
+
+ group = &sdlb_groups[idx];
+
+ /* Number of positions to right-shift LB's threshold value. */
+ while ((min_burst & mask) == 0) {
+ power++;
+ mask <<= 1;
+ }
+ thres_shift = SPX5_SDLB_2CYCLES_TYPE2_THRES_OFFSET - power;
+
+ max_token = (min_burst > SPX5_SDLB_PUP_TOKEN_MAX) ?
+ SPX5_SDLB_PUP_TOKEN_MAX :
+ min_burst;
+ group->pup_interval =
+ sparx5_sdlb_pup_interval_get(sparx5, max_token, max_rate);
+
+ group->frame_size = frame_size;
+
+ spx5_wr(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(group->pup_interval),
+ sparx5, ANA_AC_SDLB_PUP_INTERVAL(idx));
+
+ spx5_wr(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(frame_size),
+ sparx5, ANA_AC_SDLB_FRM_RATE_TOKENS(idx));
+
+ spx5_wr(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(thres_shift), sparx5,
+ ANA_AC_SDLB_LBGRP_MISC(idx));
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
index 205246b5af82..e80f3166db7d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -5,6 +5,7 @@
*/
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include "sparx5_tc.h"
#include "sparx5_main.h"
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
index adab88e6b21f..7ef470b28566 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
@@ -21,6 +21,80 @@ enum SPX5_PORT_MASK_MODE {
SPX5_PMM_OR_PGID_MASK,
};
+/* Controls ES0 forwarding */
+enum SPX5_FORWARDING_SEL {
+ SPX5_FWSEL_NO_ACTION,
+ SPX5_FWSEL_COPY_TO_LOOPBACK,
+ SPX5_FWSEL_REDIRECT_TO_LOOPBACK,
+ SPX5_FWSEL_DISCARD,
+};
+
+/* Controls tag A (outer tagging) */
+enum SPX5_OUTER_TAG_SEL {
+ SPX5_OTAG_PORT,
+ SPX5_OTAG_TAG_A,
+ SPX5_OTAG_FORCED_PORT,
+ SPX5_OTAG_UNTAG,
+};
+
+/* Selects TPID for ES0 tag A */
+enum SPX5_TPID_A_SEL {
+ SPX5_TPID_A_8100,
+ SPX5_TPID_A_88A8,
+ SPX5_TPID_A_CUST1,
+ SPX5_TPID_A_CUST2,
+ SPX5_TPID_A_CUST3,
+ SPX5_TPID_A_CLASSIFIED,
+};
+
+/* Selects VID for ES0 tag A */
+enum SPX5_VID_A_SEL {
+ SPX5_VID_A_CLASSIFIED,
+ SPX5_VID_A_VAL,
+ SPX5_VID_A_IFH,
+ SPX5_VID_A_RESERVED,
+};
+
+/* Select PCP source for ES0 tag A */
+enum SPX5_PCP_A_SEL {
+ SPX5_PCP_A_CLASSIFIED,
+ SPX5_PCP_A_VAL,
+ SPX5_PCP_A_RESERVED,
+ SPX5_PCP_A_POPPED,
+ SPX5_PCP_A_MAPPED_0,
+ SPX5_PCP_A_MAPPED_1,
+ SPX5_PCP_A_MAPPED_2,
+ SPX5_PCP_A_MAPPED_3,
+};
+
+/* Select DEI source for ES0 tag A */
+enum SPX5_DEI_A_SEL {
+ SPX5_DEI_A_CLASSIFIED,
+ SPX5_DEI_A_VAL,
+ SPX5_DEI_A_REW,
+ SPX5_DEI_A_POPPED,
+ SPX5_DEI_A_MAPPED_0,
+ SPX5_DEI_A_MAPPED_1,
+ SPX5_DEI_A_MAPPED_2,
+ SPX5_DEI_A_MAPPED_3,
+};
+
+/* Controls tag B (inner tagging) */
+enum SPX5_INNER_TAG_SEL {
+ SPX5_ITAG_NO_PUSH,
+ SPX5_ITAG_PUSH_B_TAG,
+};
+
+/* Selects TPID for ES0 tag B. */
+enum SPX5_TPID_B_SEL {
+ SPX5_TPID_B_8100,
+ SPX5_TPID_B_88A8,
+ SPX5_TPID_B_CUST1,
+ SPX5_TPID_B_CUST2,
+ SPX5_TPID_B_CUST3,
+ SPX5_TPID_B_CLASSIFIED,
+};
+
int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 1ed304a816cc..b36819aafaca 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -4,11 +4,13 @@
* Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
*/
+#include <net/tc_act/tc_gate.h>
#include <net/tcp.h>
#include "sparx5_tc.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#include "vcap_tc.h"
#include "sparx5_main.h"
#include "sparx5_vcap_impl.h"
@@ -26,249 +28,33 @@ struct sparx5_multiple_rules {
struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
};
-struct sparx5_tc_flower_parse_usage {
- struct flow_cls_offload *fco;
- struct flow_rule *frule;
- struct vcap_rule *vrule;
- u16 l3_proto;
- u8 l4_proto;
- unsigned int used_keys;
-};
-
-struct sparx5_tc_rule_pkt_cnt {
- u64 cookie;
- u32 pkts;
-};
-
-/* These protocols have dedicated keysets in IS2 and a TC dissector
- * ETH_P_ARP does not have a TC dissector
- */
-static u16 sparx5_tc_known_etypes[] = {
- ETH_P_ALL,
- ETH_P_ARP,
- ETH_P_IP,
- ETH_P_IPV6,
-};
-
-enum sparx5_is2_arp_opcode {
- SPX5_IS2_ARP_REQUEST,
- SPX5_IS2_ARP_REPLY,
- SPX5_IS2_RARP_REQUEST,
- SPX5_IS2_RARP_REPLY,
-};
-
-enum tc_arp_opcode {
- TC_ARP_OP_RESERVED,
- TC_ARP_OP_REQUEST,
- TC_ARP_OP_REPLY,
-};
-
-static bool sparx5_tc_is_known_etype(u16 etype)
-{
- int idx;
-
- /* For now this only knows about IS2 traffic classification */
- for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
- if (sparx5_tc_known_etypes[idx] == etype)
- return true;
-
- return false;
-}
-
-static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
- enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
- struct flow_match_eth_addrs match;
- struct vcap_u48_key smac, dmac;
- int err = 0;
-
- flow_rule_match_eth_addrs(st->frule, &match);
-
- if (!is_zero_ether_addr(match.mask->src)) {
- vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
- vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
- if (err)
- goto out;
- }
-
- if (!is_zero_ether_addr(match.mask->dst)) {
- vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
- vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- int err = 0;
-
- if (st->l3_proto == ETH_P_IP) {
- struct flow_match_ipv4_addrs mt;
-
- flow_rule_match_ipv4_addrs(st->frule, &mt);
- if (mt.mask->src) {
- err = vcap_rule_add_key_u32(st->vrule,
- VCAP_KF_L3_IP4_SIP,
- be32_to_cpu(mt.key->src),
- be32_to_cpu(mt.mask->src));
- if (err)
- goto out;
- }
- if (mt.mask->dst) {
- err = vcap_rule_add_key_u32(st->vrule,
- VCAP_KF_L3_IP4_DIP,
- be32_to_cpu(mt.key->dst),
- be32_to_cpu(mt.mask->dst));
- if (err)
- goto out;
- }
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- int err = 0;
-
- if (st->l3_proto == ETH_P_IPV6) {
- struct flow_match_ipv6_addrs mt;
- struct vcap_u128_key sip;
- struct vcap_u128_key dip;
-
- flow_rule_match_ipv6_addrs(st->frule, &mt);
- /* Check if address masks are non-zero */
- if (!ipv6_addr_any(&mt.mask->src)) {
- vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
- vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
- err = vcap_rule_add_key_u128(st->vrule,
- VCAP_KF_L3_IP6_SIP, &sip);
- if (err)
- goto out;
- }
- if (!ipv6_addr_any(&mt.mask->dst)) {
- vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
- vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
- err = vcap_rule_add_key_u128(st->vrule,
- VCAP_KF_L3_IP6_DIP, &dip);
- if (err)
- goto out;
- }
- }
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
- return err;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
- return err;
-}
-
static int
-sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
{
- struct flow_match_control mt;
- u32 value, mask;
int err = 0;
- flow_rule_match_control(st->frule, &mt);
-
- if (mt.mask->flags) {
- if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
- if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
- value = 1; /* initial fragment */
- mask = 0x3;
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
-
+ switch (st->tpid) {
+ case ETH_P_8021Q:
err = vcap_rule_add_key_u32(st->vrule,
- VCAP_KF_L3_FRAGMENT_TYPE,
- value, mask);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- struct flow_match_ports mt;
- u16 value, mask;
- int err = 0;
-
- flow_rule_match_ports(st->frule, &mt);
-
- if (mt.mask->src) {
- value = be16_to_cpu(mt.key->src);
- mask = be16_to_cpu(mt.mask->src);
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
- mask);
- if (err)
- goto out;
- }
-
- if (mt.mask->dst) {
- value = be16_to_cpu(mt.key->dst);
- mask = be16_to_cpu(mt.mask->dst);
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
- mask);
- if (err)
- goto out;
+ VCAP_KF_8021Q_TPID,
+ SPX5_TPID_SEL_8100, ~0);
+ break;
+ case ETH_P_8021AD:
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_8021Q_TPID,
+ SPX5_TPID_SEL_88A8, ~0);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ break;
}
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
return err;
}
static int
-sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
{
struct flow_match_basic mt;
int err = 0;
@@ -277,7 +63,7 @@ sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
if (mt.mask->n_proto) {
st->l3_proto = be16_to_cpu(mt.key->n_proto);
- if (!sparx5_tc_is_known_etype(st->l3_proto)) {
+ if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) {
err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
st->l3_proto, ~0);
if (err)
@@ -292,6 +78,13 @@ sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
VCAP_BIT_0);
if (err)
goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
}
}
@@ -309,6 +102,13 @@ sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
VCAP_BIT_0);
if (err)
goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
} else {
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_IP_PROTO,
@@ -328,253 +128,131 @@ out:
}
static int
-sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
- enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
- enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
- struct flow_match_vlan mt;
- int err;
-
- flow_rule_match_vlan(st->frule, &mt);
-
- if (mt.mask->vlan_id) {
- err = vcap_rule_add_key_u32(st->vrule, vid_key,
- mt.key->vlan_id,
- mt.mask->vlan_id);
- if (err)
- goto out;
- }
-
- if (mt.mask->vlan_priority) {
- err = vcap_rule_add_key_u32(st->vrule, pcp_key,
- mt.key->vlan_priority,
- mt.mask->vlan_priority);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
-
- return 0;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- struct flow_match_tcp mt;
- u16 tcp_flags_mask;
- u16 tcp_flags_key;
- enum vcap_bit val;
+ struct flow_match_control mt;
+ u32 value, mask;
int err = 0;
- flow_rule_match_tcp(st->frule, &mt);
- tcp_flags_key = be16_to_cpu(mt.key->flags);
- tcp_flags_mask = be16_to_cpu(mt.mask->flags);
-
- if (tcp_flags_mask & TCPHDR_FIN) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_FIN)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
- if (err)
- goto out;
- }
-
- if (tcp_flags_mask & TCPHDR_SYN) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_SYN)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
- if (err)
- goto out;
- }
-
- if (tcp_flags_mask & TCPHDR_RST) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_RST)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
- if (err)
- goto out;
- }
-
- if (tcp_flags_mask & TCPHDR_PSH) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_PSH)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
- if (err)
- goto out;
- }
+ flow_rule_match_control(st->frule, &mt);
- if (tcp_flags_mask & TCPHDR_ACK) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_ACK)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
- if (err)
- goto out;
- }
+ if (mt.mask->flags) {
+ if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
+ value = 1; /* initial fragment */
+ mask = 0x3;
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
- if (tcp_flags_mask & TCPHDR_URG) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_URG)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ value, mask);
if (err)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
return err;
out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
return err;
}
static int
-sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
{
- struct flow_match_arp mt;
- u16 value, mask;
- u32 ipval, ipmsk;
- int err;
-
- flow_rule_match_arp(st->frule, &mt);
-
- if (mt.mask->op) {
- mask = 0x3;
- if (st->l3_proto == ETH_P_ARP) {
- value = mt.key->op == TC_ARP_OP_REQUEST ?
- SPX5_IS2_ARP_REQUEST :
- SPX5_IS2_ARP_REPLY;
- } else { /* RARP */
- value = mt.key->op == TC_ARP_OP_REQUEST ?
- SPX5_IS2_RARP_REQUEST :
- SPX5_IS2_RARP_REPLY;
- }
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
- value, mask);
- if (err)
- goto out;
- }
-
- /* The IS2 ARP keyset does not support ARP hardware addresses */
- if (!is_zero_ether_addr(mt.mask->sha) ||
- !is_zero_ether_addr(mt.mask->tha)) {
- err = -EINVAL;
- goto out;
- }
-
- if (mt.mask->sip) {
- ipval = be32_to_cpu((__force __be32)mt.key->sip);
- ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
-
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
- ipval, ipmsk);
- if (err)
- goto out;
- }
-
- if (mt.mask->tip) {
- ipval = be32_to_cpu((__force __be32)mt.key->tip);
- ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
-
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
- ipval, ipmsk);
- if (err)
- goto out;
+ if (st->admin->vtype != VCAP_TYPE_IS0) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "cvlan not supported in this VCAP");
+ return -EINVAL;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
-
- return 0;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
- return err;
+ return vcap_tc_flower_handler_cvlan_usage(st);
}
static int
-sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
{
- struct flow_match_ip mt;
- int err = 0;
-
- flow_rule_match_ip(st->frule, &mt);
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
+ int err;
- if (mt.mask->tos) {
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
- mt.key->tos,
- mt.mask->tos);
- if (err)
- goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ vid_key = VCAP_KF_8021Q_VID0;
+ pcp_key = VCAP_KF_8021Q_PCP0;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
+ err = vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
+ if (err)
+ return err;
- return err;
+ if (st->admin->vtype == VCAP_TYPE_ES0 && st->tpid)
+ err = sparx5_tc_flower_es0_tpid(st);
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
return err;
}
-static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
- [FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
- [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
- [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
+static int (*sparx5_tc_flower_usage_handlers[])(struct vcap_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
- [FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage,
[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
- [FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
- [FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
- [FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
};
-static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
+static int sparx5_tc_use_dissectors(struct vcap_tc_flower_parse_usage *st,
struct vcap_admin *admin,
- struct vcap_rule *vrule,
- u16 *l3_proto)
+ struct vcap_rule *vrule)
{
- struct sparx5_tc_flower_parse_usage state = {
- .fco = fco,
- .vrule = vrule,
- .l3_proto = ETH_P_ALL,
- };
int idx, err = 0;
- state.frule = flow_cls_offload_flow_rule(fco);
for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
- if (!flow_rule_match_key(state.frule, idx))
+ if (!flow_rule_match_key(st->frule, idx))
continue;
if (!sparx5_tc_flower_usage_handlers[idx])
continue;
- err = sparx5_tc_flower_usage_handlers[idx](&state);
+ err = sparx5_tc_flower_usage_handlers[idx](st);
if (err)
return err;
}
- if (state.frule->match.dissector->used_keys ^ state.used_keys) {
- NL_SET_ERR_MSG_MOD(fco->common.extack,
+ if (st->frule->match.dissector->used_keys ^ st->used_keys) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
"Unsupported match item");
return -ENOENT;
}
- if (l3_proto)
- *l3_proto = state.l3_proto;
return err;
}
static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
+ struct net_device *ndev,
struct flow_cls_offload *fco,
- struct vcap_admin *admin)
+ bool ingress)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
struct flow_action_entry *actent, *last_actent = NULL;
@@ -600,21 +278,24 @@ static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
last_actent = actent; /* Save last action for later check */
}
- /* Check that last action is a goto */
- if (last_actent->id != FLOW_ACTION_GOTO) {
+ /* Check if last action is a goto
+ * The last chain/lookup does not need to have a goto action
+ */
+ if (last_actent->id == FLOW_ACTION_GOTO) {
+ /* Check if the destination chain is in one of the VCAPs */
+ if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
+ last_actent->chain_index)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid goto chain");
+ return -EINVAL;
+ }
+ } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
+ ingress)) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Last action must be 'goto'");
return -EINVAL;
}
- /* Check if the goto chain is in the next lookup */
- if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
- last_actent->chain_index)) {
- NL_SET_ERR_MSG_MOD(fco->common.extack,
- "Invalid goto chain");
- return -EINVAL;
- }
-
/* Catch unsupported combinations of actions */
if (action_mask & BIT(FLOW_ACTION_TRAP) &&
action_mask & BIT(FLOW_ACTION_ACCEPT)) {
@@ -623,21 +304,60 @@ static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
return -EOPNOTSUPP;
}
+ if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
+ action_mask & BIT(FLOW_ACTION_VLAN_POP)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine vlan push and pop action");
+ return -EOPNOTSUPP;
+ }
+
+ if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
+ action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine vlan push and modify action");
+ return -EOPNOTSUPP;
+ }
+
+ if (action_mask & BIT(FLOW_ACTION_VLAN_POP) &&
+ action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine vlan pop and modify action");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
-/* Add a rule counter action - only IS2 is considered for now */
+/* Add a rule counter action */
static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
struct vcap_rule *vrule)
{
int err;
- err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID, vrule->id);
- if (err)
- return err;
-
- vcap_rule_set_counter_id(vrule, vrule->id);
- return err;
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ break;
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_ESDX,
+ vrule->id);
+ if (err)
+ return err;
+ vcap_rule_set_counter_id(vrule, vrule->id);
+ break;
+ case VCAP_TYPE_IS2:
+ case VCAP_TYPE_ES2:
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
+ vrule->id);
+ if (err)
+ return err;
+ vcap_rule_set_counter_id(vrule, vrule->id);
+ break;
+ default:
+ pr_err("%s:%d: vcap type: %d not supported\n",
+ __func__, __LINE__, admin->vtype);
+ break;
+ }
+ return 0;
}
/* Collect all port keysets and apply the first of them, possibly wildcarded */
@@ -818,22 +538,490 @@ static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
return err;
}
+/* Add the actionset that is the default for the VCAP type */
+static int sparx5_tc_set_actionset(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ enum vcap_actionfield_set aset;
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ aset = VCAP_AFS_CLASSIFICATION;
+ break;
+ case VCAP_TYPE_IS2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
+ case VCAP_TYPE_ES0:
+ aset = VCAP_AFS_ES0;
+ break;
+ case VCAP_TYPE_ES2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
+ default:
+ pr_err("%s:%d: %s\n", __func__, __LINE__, "Invalid VCAP type");
+ return -EINVAL;
+ }
+ /* Do not overwrite any current actionset */
+ if (vrule->actionset == VCAP_AFS_NO_VALUE)
+ err = vcap_set_rule_set_actionset(vrule, aset);
+ return err;
+}
+
+/* Add the VCAP key to match on for a rule target value */
+static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ int target_cid)
+{
+ int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
+ int err;
+
+ if (!link_val)
+ return 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ /* Add NXT_IDX key for chaining rules between IS0 instances */
+ err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ 1, /* enable */
+ ~0);
+ if (err)
+ return err;
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
+ link_val, /* target */
+ ~0);
+ case VCAP_TYPE_IS2:
+ /* Add PAG key for chaining rules from IS0 */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
+ link_val, /* target */
+ ~0);
+ case VCAP_TYPE_ES0:
+ case VCAP_TYPE_ES2:
+ /* Add ISDX key for chaining rules from IS0 */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS, link_val,
+ ~0);
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* Add the VCAP action that adds a target value to a rule */
+static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ int from_cid, int to_cid)
+{
+ struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
+ int diff, err = 0;
+
+ if (!to_admin) {
+ pr_err("%s:%d: unsupported chain direction: %d\n",
+ __func__, __LINE__, to_cid);
+ return -EINVAL;
+ }
+
+ diff = vcap_chain_offset(vctrl, from_cid, to_cid);
+ if (!diff)
+ return 0;
+
+ if (admin->vtype == VCAP_TYPE_IS0 &&
+ to_admin->vtype == VCAP_TYPE_IS0) {
+ /* Between IS0 instances the G_IDX value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL,
+ 1); /* Replace */
+ if (err)
+ goto out;
+ } else if (admin->vtype == VCAP_TYPE_IS0 &&
+ to_admin->vtype == VCAP_TYPE_IS2) {
+ /* Between IS0 and IS2 the PAG value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PAG_OVERRIDE_MASK,
+ 0xff);
+ if (err)
+ goto out;
+ } else if (admin->vtype == VCAP_TYPE_IS0 &&
+ (to_admin->vtype == VCAP_TYPE_ES0 ||
+ to_admin->vtype == VCAP_TYPE_ES2)) {
+ /* Between IS0 and ES0/ES2 the ISDX value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL,
+ diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else {
+ pr_err("%s:%d: unsupported chain destination: %d\n",
+ __func__, __LINE__, to_cid);
+ err = -EOPNOTSUPP;
+ }
+out:
+ return err;
+}
+
+static int sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg *sg,
+ struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ if (act->gate.prio < -1 || act->gate.prio > SPX5_PSFP_SG_MAX_IPV) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate priority");
+ return -EINVAL;
+ }
+
+ if (act->gate.cycletime < SPX5_PSFP_SG_MIN_CYCLE_TIME_NS ||
+ act->gate.cycletime > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletime");
+ return -EINVAL;
+ }
+
+ if (act->gate.cycletimeext > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletimeext");
+ return -EINVAL;
+ }
+
+ if (act->gate.num_entries >= SPX5_PSFP_GCE_CNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid number of gate entries");
+ return -EINVAL;
+ }
+
+ sg->gate_state = true;
+ sg->ipv = act->gate.prio;
+ sg->num_entries = act->gate.num_entries;
+ sg->cycletime = act->gate.cycletime;
+ sg->cycletimeext = act->gate.cycletimeext;
+
+ for (i = 0; i < sg->num_entries; i++) {
+ sg->gce[i].gate_state = !!act->gate.entries[i].gate_state;
+ sg->gce[i].interval = act->gate.entries[i].interval;
+ sg->gce[i].ipv = act->gate.entries[i].ipv;
+ sg->gce[i].maxoctets = act->gate.entries[i].maxoctets;
+ }
+
+ return 0;
+}
+
+static int sparx5_tc_flower_parse_act_police(struct sparx5_policer *pol,
+ struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ pol->type = SPX5_POL_SERVICE;
+ pol->rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
+ pol->burst = act->police.burst;
+ pol->idx = act->hw_index;
+
+ /* rate is now in kbit */
+ if (pol->rate > DIV_ROUND_UP(SPX5_SDLB_GROUP_RATE_MAX, 1000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum rate exceeded");
+ return -EINVAL;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
+ struct vcap_rule *vrule, int sg_idx,
+ int pol_idx, struct sparx5_psfp_sg *sg,
+ struct sparx5_psfp_fm *fm,
+ struct sparx5_psfp_sf *sf)
+{
+ u32 psfp_sfid = 0, psfp_fmid = 0, psfp_sgid = 0;
+ int ret;
+
+ /* Must always have a stream gate - max sdu (filter option) is evaluated
+ * after frames have passed the gate, so in case of only a policer, we
+ * allocate a stream gate that is always open.
+ */
+ if (sg_idx < 0) {
+ sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
+ sg->ipv = 0; /* Disabled */
+ sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
+ sg->num_entries = 1;
+ sg->gate_state = 1; /* Open */
+ sg->gate_enabled = 1;
+ sg->gce[0].gate_state = 1;
+ sg->gce[0].interval = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
+ sg->gce[0].ipv = 0;
+ sg->gce[0].maxoctets = 0; /* Disabled */
+ }
+
+ ret = sparx5_psfp_sg_add(sparx5, sg_idx, sg, &psfp_sgid);
+ if (ret < 0)
+ return ret;
+
+ if (pol_idx >= 0) {
+ /* Add new flow-meter */
+ ret = sparx5_psfp_fm_add(sparx5, pol_idx, fm, &psfp_fmid);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Map stream filter to stream gate */
+ sf->sgid = psfp_sgid;
+
+ /* Add new stream-filter and map it to a steam gate */
+ ret = sparx5_psfp_sf_add(sparx5, sf, &psfp_sfid);
+ if (ret < 0)
+ return ret;
+
+ /* Streams are classified by ISDX - map ISDX 1:1 to sfid for now. */
+ sparx5_isdx_conf_set(sparx5, psfp_sfid, psfp_sfid, psfp_fmid);
+
+ ret = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_BIT_1);
+ if (ret)
+ return ret;
+
+ ret = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL, psfp_sfid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Handle the action trap for a VCAP rule */
+static int sparx5_tc_action_trap(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS2:
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM, 0);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_MASK_MODE,
+ SPX5_PMM_REPLACE_ALL);
+ break;
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_FWD_SEL,
+ SPX5_FWSEL_REDIRECT_TO_LOOPBACK);
+ break;
+ case VCAP_TYPE_ES2:
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM, 0);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Trap action not supported in this VCAP");
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int sparx5_tc_action_vlan_pop(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ u16 tpid)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "VLAN pop action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PUSH_OUTER_TAG,
+ SPX5_OTAG_UNTAG);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int sparx5_tc_action_vlan_modify(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act,
+ u16 tpid)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PUSH_OUTER_TAG,
+ SPX5_OTAG_TAG_A);
+ if (err)
+ return err;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "VLAN modify action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_TPID_SEL,
+ SPX5_TPID_A_8100);
+ break;
+ case ETH_P_8021AD:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_TPID_SEL,
+ SPX5_TPID_A_88A8);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ }
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_VID_SEL,
+ SPX5_VID_A_VAL);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_VID_A_VAL,
+ act->vlan.vid);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_PCP_SEL,
+ SPX5_PCP_A_VAL);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PCP_A_VAL,
+ act->vlan.prio);
+ if (err)
+ return err;
+
+ return vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_DEI_SEL,
+ SPX5_DEI_A_CLASSIFIED);
+}
+
+static int sparx5_tc_action_vlan_push(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act,
+ u16 tpid)
+{
+ u16 act_tpid = be16_to_cpu(act->vlan.proto);
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "VLAN push action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ if (tpid == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot push on double tagged frames");
+ return -EOPNOTSUPP;
+ }
+
+ err = sparx5_tc_action_vlan_modify(admin, vrule, fco, act, act_tpid);
+ if (err)
+ return err;
+
+ switch (act_tpid) {
+ case ETH_P_8021Q:
+ break;
+ case ETH_P_8021AD:
+ /* Push classified tag as inner tag */
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PUSH_INNER_TAG,
+ SPX5_ITAG_PUSH_B_TAG);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_B_TPID_SEL,
+ SPX5_TPID_B_CLASSIFIED);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ }
+ return err;
+}
+
static int sparx5_tc_flower_replace(struct net_device *ndev,
struct flow_cls_offload *fco,
- struct vcap_admin *admin)
+ struct vcap_admin *admin,
+ bool ingress)
{
+ struct sparx5_psfp_sf sf = { .max_sdu = SPX5_PSFP_SF_MAX_SDU };
+ struct netlink_ext_ack *extack = fco->common.extack;
+ int err, idx, tc_sg_idx = -1, tc_pol_idx = -1;
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = fco,
+ .l3_proto = ETH_P_ALL,
+ .admin = admin,
+ };
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5_multiple_rules multi = {};
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_psfp_sg sg = { 0 };
+ struct sparx5_psfp_fm fm = { 0 };
struct flow_action_entry *act;
struct vcap_control *vctrl;
struct flow_rule *frule;
struct vcap_rule *vrule;
- u16 l3_proto;
- int err, idx;
vctrl = port->sparx5->vcap_ctrl;
- err = sparx5_tc_flower_action_check(vctrl, fco, admin);
+ err = sparx5_tc_flower_action_check(vctrl, ndev, fco, ingress);
if (err)
return err;
@@ -844,8 +1032,9 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
vrule->cookie = fco->cookie;
- l3_proto = ETH_P_ALL;
- err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
+ state.vrule = vrule;
+ state.frule = flow_cls_offload_flow_rule(fco);
+ err = sparx5_tc_use_dissectors(&state, admin, vrule);
if (err)
goto out;
@@ -853,38 +1042,69 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
if (err)
goto out;
+ err = sparx5_tc_add_rule_link_target(admin, vrule,
+ fco->common.chain_index);
+ if (err)
+ goto out;
+
frule = flow_cls_offload_flow_rule(fco);
flow_action_for_each(idx, act, &frule->action) {
switch (act->id) {
+ case FLOW_ACTION_GATE: {
+ err = sparx5_tc_flower_parse_act_gate(&sg, act, extack);
+ if (err < 0)
+ goto out;
+
+ tc_sg_idx = act->hw_index;
+
+ break;
+ }
+ case FLOW_ACTION_POLICE: {
+ err = sparx5_tc_flower_parse_act_police(&fm.pol, act,
+ extack);
+ if (err < 0)
+ goto out;
+
+ tc_pol_idx = fm.pol.idx;
+ sf.max_sdu = act->police.mtu;
+
+ break;
+ }
case FLOW_ACTION_TRAP:
- err = vcap_rule_add_action_bit(vrule,
- VCAP_AF_CPU_COPY_ENA,
- VCAP_BIT_1);
+ err = sparx5_tc_action_trap(admin, vrule, fco);
if (err)
goto out;
- err = vcap_rule_add_action_u32(vrule,
- VCAP_AF_CPU_QUEUE_NUM, 0);
+ break;
+ case FLOW_ACTION_ACCEPT:
+ err = sparx5_tc_set_actionset(admin, vrule);
if (err)
goto out;
- err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
- SPX5_PMM_REPLACE_ALL);
+ break;
+ case FLOW_ACTION_GOTO:
+ err = sparx5_tc_set_actionset(admin, vrule);
if (err)
goto out;
- /* For now the actionset is hardcoded */
- err = vcap_set_rule_set_actionset(vrule,
- VCAP_AFS_BASE_TYPE);
+ sparx5_tc_add_rule_link(vctrl, admin, vrule,
+ fco->common.chain_index,
+ act->chain_index);
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ err = sparx5_tc_action_vlan_pop(admin, vrule, fco,
+ state.tpid);
if (err)
goto out;
break;
- case FLOW_ACTION_ACCEPT:
- /* For now the actionset is hardcoded */
- err = vcap_set_rule_set_actionset(vrule,
- VCAP_AFS_BASE_TYPE);
+ case FLOW_ACTION_VLAN_PUSH:
+ err = sparx5_tc_action_vlan_push(admin, vrule, fco,
+ act, state.tpid);
if (err)
goto out;
break;
- case FLOW_ACTION_GOTO:
- /* Links between VCAPs will be added later */
+ case FLOW_ACTION_VLAN_MANGLE:
+ err = sparx5_tc_action_vlan_modify(admin, vrule, fco,
+ act, state.tpid);
+ if (err)
+ goto out;
break;
default:
NL_SET_ERR_MSG_MOD(fco->common.extack,
@@ -894,8 +1114,16 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
}
}
- err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
- &multi);
+ /* Setup PSFP */
+ if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
+ err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
+ tc_pol_idx, &sg, &fm, &sf);
+ if (err)
+ goto out;
+ }
+
+ err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin,
+ state.l3_proto, &multi);
if (err) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"No matching port keyset for filter protocol and keys");
@@ -903,7 +1131,7 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
}
/* provide the l3 protocol to guide the keyset selection */
- err = vcap_val_rule(vrule, l3_proto);
+ err = vcap_val_rule(vrule, state.l3_proto);
if (err) {
vcap_set_tc_exterr(fco, vrule);
goto out;
@@ -913,7 +1141,7 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Could not add the filter");
- if (l3_proto == ETH_P_ALL)
+ if (state.l3_proto == ETH_P_ALL)
err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
&multi);
@@ -922,19 +1150,86 @@ out:
return err;
}
+static void sparx5_tc_free_psfp_resources(struct sparx5 *sparx5,
+ struct vcap_rule *vrule)
+{
+ struct vcap_client_actionfield *afield;
+ u32 isdx, sfid, sgid, fmid;
+
+ /* Check if VCAP_AF_ISDX_VAL action is set for this rule - and if
+ * it is used for stream and/or flow-meter classification.
+ */
+ afield = vcap_find_actionfield(vrule, VCAP_AF_ISDX_VAL);
+ if (!afield)
+ return;
+
+ isdx = afield->data.u32.value;
+ sfid = sparx5_psfp_isdx_get_sf(sparx5, isdx);
+
+ if (!sfid)
+ return;
+
+ fmid = sparx5_psfp_isdx_get_fm(sparx5, isdx);
+ sgid = sparx5_psfp_sf_get_sg(sparx5, sfid);
+
+ if (fmid && sparx5_psfp_fm_del(sparx5, fmid) < 0)
+ pr_err("%s:%d Could not delete invalid fmid: %d", __func__,
+ __LINE__, fmid);
+
+ if (sgid && sparx5_psfp_sg_del(sparx5, sgid) < 0)
+ pr_err("%s:%d Could not delete invalid sgid: %d", __func__,
+ __LINE__, sgid);
+
+ if (sparx5_psfp_sf_del(sparx5, sfid) < 0)
+ pr_err("%s:%d Could not delete invalid sfid: %d", __func__,
+ __LINE__, sfid);
+
+ sparx5_isdx_conf_set(sparx5, isdx, 0, 0);
+}
+
+static int sparx5_tc_free_rule_resources(struct net_device *ndev,
+ struct vcap_control *vctrl,
+ int rule_id)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct vcap_rule *vrule;
+ int ret = 0;
+
+ vrule = vcap_get_rule(vctrl, rule_id);
+ if (!vrule || IS_ERR(vrule))
+ return -EINVAL;
+
+ sparx5_tc_free_psfp_resources(sparx5, vrule);
+
+ vcap_free_rule(vrule);
+ return ret;
+}
+
static int sparx5_tc_flower_destroy(struct net_device *ndev,
struct flow_cls_offload *fco,
struct vcap_admin *admin)
{
struct sparx5_port *port = netdev_priv(ndev);
+ int err = -ENOENT, count = 0, rule_id;
struct vcap_control *vctrl;
- int err = -ENOENT, rule_id;
vctrl = port->sparx5->vcap_ctrl;
while (true) {
rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
if (rule_id <= 0)
break;
+ if (count == 0) {
+ /* Resources are attached to the first rule of
+ * a set of rules. Only works if the rules are
+ * in the correct order.
+ */
+ err = sparx5_tc_free_rule_resources(ndev, vctrl,
+ rule_id);
+ if (err)
+ pr_err("%s:%d: could not free resources %d\n",
+ __func__, __LINE__, rule_id);
+ }
err = vcap_del_rule(vctrl, ndev, rule_id);
if (err) {
pr_err("%s:%d: could not delete rule %d\n",
@@ -945,44 +1240,21 @@ static int sparx5_tc_flower_destroy(struct net_device *ndev,
return err;
}
-/* Collect packet counts from all rules with the same cookie */
-static int sparx5_tc_rule_counter_cb(void *arg, struct vcap_rule *rule)
-{
- struct sparx5_tc_rule_pkt_cnt *rinfo = arg;
- struct vcap_counter counter;
- int err = 0;
-
- if (rule->cookie == rinfo->cookie) {
- err = vcap_rule_get_counter(rule, &counter);
- if (err)
- return err;
- rinfo->pkts += counter.value;
- /* Reset the rule counter */
- counter.value = 0;
- vcap_rule_set_counter(rule, &counter);
- }
- return err;
-}
-
static int sparx5_tc_flower_stats(struct net_device *ndev,
struct flow_cls_offload *fco,
struct vcap_admin *admin)
{
struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5_tc_rule_pkt_cnt rinfo = {};
+ struct vcap_counter ctr = {};
struct vcap_control *vctrl;
ulong lastused = 0;
- u64 drops = 0;
- u32 pkts = 0;
int err;
- rinfo.cookie = fco->cookie;
vctrl = port->sparx5->vcap_ctrl;
- err = vcap_rule_iter(vctrl, sparx5_tc_rule_counter_cb, &rinfo);
+ err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
if (err)
return err;
- pkts = rinfo.pkts;
- flow_stats_update(&fco->stats, 0x0, pkts, drops, lastused,
+ flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return err;
}
@@ -1005,7 +1277,7 @@ int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
switch (fco->command) {
case FLOW_CLS_REPLACE:
- return sparx5_tc_flower_replace(ndev, fco, admin);
+ return sparx5_tc_flower_replace(ndev, fco, admin, ingress);
case FLOW_CLS_DESTROY:
return sparx5_tc_flower_destroy(ndev, fco, admin);
case FLOW_CLS_STATS:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
index 30dd61e5d150..d88a93f22606 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
@@ -31,6 +31,7 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
switch (action->id) {
case FLOW_ACTION_GOTO:
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
+ tmo->common.chain_index,
action->chain_index, tmo->cookie,
true);
if (err == -EFAULT) {
@@ -43,6 +44,11 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
"VCAP already enabled");
return -EOPNOTSUPP;
}
+ if (err == -EADDRNOTAVAIL) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Already matching this chain");
+ return -EOPNOTSUPP;
+ }
if (err) {
NL_SET_ERR_MSG_MOD(tmo->common.extack,
"Could not enable VCAP lookups");
@@ -66,8 +72,8 @@ static int sparx5_tc_matchall_destroy(struct net_device *ndev,
sparx5 = port->sparx5;
if (!tmo->rule && tmo->cookie) {
- err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev, 0,
- tmo->cookie, false);
+ err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
+ 0, 0, tmo->cookie, false);
if (err)
return err;
return 0;
@@ -80,12 +86,6 @@ int sparx5_tc_matchall(struct net_device *ndev,
struct tc_cls_matchall_offload *tmo,
bool ingress)
{
- if (!tc_cls_can_offload_and_chain0(ndev, &tmo->common)) {
- NL_SET_ERR_MSG_MOD(tmo->common.extack,
- "Only chain zero is supported");
- return -EOPNOTSUPP;
- }
-
switch (tmo->command) {
case TC_CLSMATCHALL_REPLACE:
return sparx5_tc_matchall_replace(ndev, tmo, ingress);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
index 1bd987c664e8..556d6ea0acd1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
* Microchip VCAP API
*/
-/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200.
- * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86
+/* This file is autogenerated by cml-utils 2023-02-10 11:15:56 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
*/
#include <linux/types.h>
@@ -14,6 +14,372 @@
#include "sparx5_vcap_ag_api.h"
/* keyfields */
+static const struct vcap_field is0_normal_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 110,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 113,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 114,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 126,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 129,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 132,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 133,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 145,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 193,
+ .width = 48,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 241,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 242,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 243,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 260,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 261,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 263,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 264,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 265,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 271,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 399,
+ .width = 128,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 527,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 528,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 529,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 545,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 19,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 92,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 114,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 115,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 133,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 134,
+ .width = 12,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 148,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 158,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 190,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 222,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 232,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 240,
+ .width = 32,
+ },
+};
+
static const struct vcap_field is2_mac_etype_keyfield[] = {
[VCAP_KF_TYPE] = {
.type = VCAP_FIELD_U32,
@@ -967,7 +1333,971 @@ static const struct vcap_field is2_ip_7tuple_keyfield[] = {
},
};
+static const struct vcap_field es0_isdx_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_IF_EGR_PORT_NO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 13,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 27,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field es2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 99,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 147,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 195,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 196,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 212,
+ .width = 64,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 276,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 277,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 98,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 148,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 154,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 177,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 178,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 210,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 226,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 232,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 233,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 234,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 185,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field es2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 74,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 75,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 96,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 193,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 202,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 330,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 458,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 459,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 460,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 461,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 477,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 493,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 509,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 510,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 511,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 512,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 513,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 514,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 515,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 516,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 517,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 100,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 229,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 237,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 253,
+ .width = 40,
+ },
+};
+
/* keyfield_set */
+static const struct vcap_set is0_keyfield_set[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = {
+ .type_id = 0,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
static const struct vcap_set is2_keyfield_set[] = {
[VCAP_KFS_MAC_ETYPE] = {
.type_id = 0,
@@ -1001,7 +2331,53 @@ static const struct vcap_set is2_keyfield_set[] = {
},
};
+static const struct vcap_set es0_keyfield_set[] = {
+ [VCAP_KFS_ISDX] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = -1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
/* keyfield_set map */
+static const struct vcap_field *is0_keyfield_set_map[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
+};
+
static const struct vcap_field *is2_keyfield_set_map[] = {
[VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
[VCAP_KFS_ARP] = is2_arp_keyfield,
@@ -1011,7 +2387,25 @@ static const struct vcap_field *is2_keyfield_set_map[] = {
[VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
};
+static const struct vcap_field *es0_keyfield_set_map[] = {
+ [VCAP_KFS_ISDX] = es0_isdx_keyfield,
+};
+
+static const struct vcap_field *es2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = es2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
+};
+
/* keyfield_set map sizes */
+static int is0_keyfield_set_map_size[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
+};
+
static int is2_keyfield_set_map_size[] = {
[VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
[VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
@@ -1021,7 +2415,319 @@ static int is2_keyfield_set_map_size[] = {
[VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
};
+static int es0_keyfield_set_map_size[] = {
+ [VCAP_KFS_ISDX] = ARRAY_SIZE(es0_isdx_keyfield),
+};
+
+static int es2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
+};
+
/* actionfields */
+static const struct vcap_field is0_classification_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 68,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 69,
+ .width = 12,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 117,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 171,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_full_actionfield[] = {
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 67,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 12,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 80,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 83,
+ .width = 65,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 204,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 212,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 298,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 301,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_class_reduced_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 47,
+ .width = 12,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 93,
+ .width = 12,
+ },
+};
+
static const struct vcap_field is2_base_type_actionfield[] = {
[VCAP_AF_PIPELINE_FORCE_ENA] = {
.type = VCAP_FIELD_BIT,
@@ -1110,7 +2816,276 @@ static const struct vcap_field is2_base_type_actionfield[] = {
},
};
+static const struct vcap_field es0_es0_actionfield[] = {
+ [VCAP_AF_PUSH_OUTER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_AF_PUSH_INNER_TAG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 3,
+ },
+ [VCAP_AF_VID_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 34,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_A_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_AF_VID_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_B_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 65,
+ .width = 1,
+ },
+ [VCAP_AF_VID_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_C_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_AF_POP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 2,
+ },
+ [VCAP_AF_UNTAG_VID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_C_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 2,
+ },
+ [VCAP_AF_DSCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 6,
+ },
+ [VCAP_AF_ESDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 323,
+ .width = 13,
+ },
+ [VCAP_AF_FWD_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 459,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_QU] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 461,
+ .width = 3,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 464,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 466,
+ .width = 1,
+ },
+ [VCAP_AF_SWAP_MACS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 475,
+ .width = 1,
+ },
+ [VCAP_AF_LOOP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 476,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_AF_COPY_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 16,
+ },
+ [VCAP_AF_COPY_PORT_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 7,
+ },
+ [VCAP_AF_MIRROR_PROBE_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 33,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_REMARK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 34,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 6,
+ },
+ [VCAP_AF_ES2_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 11,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+};
+
/* actionfield_set */
+static const struct vcap_set is0_actionfield_set[] = {
+ [VCAP_AFS_CLASSIFICATION] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_AFS_FULL] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_AFS_CLASS_REDUCED] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 12,
+ },
+};
+
static const struct vcap_set is2_actionfield_set[] = {
[VCAP_AFS_BASE_TYPE] = {
.type_id = -1,
@@ -1119,17 +3094,196 @@ static const struct vcap_set is2_actionfield_set[] = {
},
};
+static const struct vcap_set es0_actionfield_set[] = {
+ [VCAP_AFS_ES0] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
/* actionfield_set map */
+static const struct vcap_field *is0_actionfield_set_map[] = {
+ [VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
+ [VCAP_AFS_FULL] = is0_full_actionfield,
+ [VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
+};
+
static const struct vcap_field *is2_actionfield_set_map[] = {
[VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
};
+static const struct vcap_field *es0_actionfield_set_map[] = {
+ [VCAP_AFS_ES0] = es0_es0_actionfield,
+};
+
+static const struct vcap_field *es2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield,
+};
+
/* actionfield_set map size */
+static int is0_actionfield_set_map_size[] = {
+ [VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
+ [VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
+ [VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
+};
+
static int is2_actionfield_set_map_size[] = {
[VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
};
+static int es0_actionfield_set_map_size[] = {
+ [VCAP_AFS_ES0] = ARRAY_SIZE(es0_es0_actionfield),
+};
+
+static int es2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield),
+};
+
/* Type Groups */
+static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 5,
+ .value = 16,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 4,
+ .value = 0,
+ },
+ {
+ .offset = 364,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 416,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 520,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 572,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 4,
+ .value = 8,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 52,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = {
{
.offset = 0,
@@ -1176,6 +3330,70 @@ static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
{}
};
+static const struct vcap_typegroup es0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = {
+ [12] = is0_x12_keyfield_set_typegroups,
+ [6] = is0_x6_keyfield_set_typegroups,
+ [3] = is0_x3_keyfield_set_typegroups,
+ [2] = is0_x2_keyfield_set_typegroups,
+ [1] = is0_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
[12] = is2_x12_keyfield_set_typegroups,
[6] = is2_x6_keyfield_set_typegroups,
@@ -1184,6 +3402,61 @@ static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
[13] = NULL,
};
+static const struct vcap_typegroup *es0_keyfield_set_typegroups[] = {
+ [1] = es0_x1_keyfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = {
+ [12] = es2_x12_keyfield_set_typegroups,
+ [6] = es2_x6_keyfield_set_typegroups,
+ [3] = es2_x3_keyfield_set_typegroups,
+ [1] = es2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 110,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 220,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 110,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = {
{
.offset = 0,
@@ -1207,36 +3480,122 @@ static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
{}
};
+static const struct vcap_typegroup es0_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 21,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 42,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = {
+ [3] = is0_x3_actionfield_set_typegroups,
+ [2] = is0_x2_actionfield_set_typegroups,
+ [1] = is0_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
[3] = is2_x3_actionfield_set_typegroups,
[1] = is2_x1_actionfield_set_typegroups,
[13] = NULL,
};
+static const struct vcap_typegroup *es0_actionfield_set_typegroups[] = {
+ [1] = es0_x1_actionfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = {
+ [3] = es2_x3_actionfield_set_typegroups,
+ [1] = es2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
/* Keyfieldset names */
static const char * const vcap_keyfield_set_names[] = {
[VCAP_KFS_NO_VALUE] = "(None)",
[VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
[VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
[VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
[VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
[VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
[VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
};
/* Actionfieldset names */
static const char * const vcap_actionfield_set_names[] = {
[VCAP_AFS_NO_VALUE] = "(None)",
[VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
};
/* Keyfield names */
static const char * const vcap_keyfield_names[] = {
[VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
[VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
[VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
[VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
[VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
[VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
[VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
[VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
@@ -1244,25 +3603,46 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
[VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
[VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
[VCAP_KF_ETYPE] = "ETYPE",
[VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
[VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
[VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
[VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
[VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
[VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
[VCAP_KF_ISDX_CLS] = "ISDX_CLS",
[VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
[VCAP_KF_L2_BC_IS] = "L2_BC_IS",
[VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
[VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
[VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
[VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
[VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
[VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
[VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
[VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
[VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
[VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
[VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
[VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
@@ -1273,6 +3653,8 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_L3_RT_IS] = "L3_RT_IS",
[VCAP_KF_L3_TOS] = "L3_TOS",
[VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
[VCAP_KF_L4_ACK] = "L4_ACK",
[VCAP_KF_L4_DPORT] = "L4_DPORT",
[VCAP_KF_L4_FIN] = "L4_FIN",
@@ -1286,9 +3668,19 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_L4_SYN] = "L4_SYN",
[VCAP_KF_L4_URG] = "L4_URG",
[VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
[VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
[VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
[VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
[VCAP_KF_TCP_IS] = "TCP_IS",
[VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
[VCAP_KF_TYPE] = "TYPE",
@@ -1297,27 +3689,116 @@ static const char * const vcap_keyfield_names[] = {
/* Actionfield names */
static const char * const vcap_actionfield_names[] = {
[VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
[VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
[VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
[VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
[VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
[VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
[VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
[VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
[VCAP_AF_MASK_MODE] = "MASK_MODE",
[VCAP_AF_MATCH_ID] = "MATCH_ID",
[VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
[VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
[VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
[VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
[VCAP_AF_POLICE_ENA] = "POLICE_ENA",
[VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
[VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
[VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
};
/* VCAPs */
const struct vcap_info sparx5_vcaps[] = {
+ [VCAP_TYPE_IS0] = {
+ .name = "is0",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 110,
+ .default_cnt = 140,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is0_keyfield_set),
+ .actionfield_set = is0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is0_actionfield_set),
+ .keyfield_set_map = is0_keyfield_set_map,
+ .keyfield_set_map_size = is0_keyfield_set_map_size,
+ .actionfield_set_map = is0_actionfield_set_map,
+ .actionfield_set_map_size = is0_actionfield_set_map_size,
+ .keyfield_set_typegroups = is0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is0_actionfield_set_typegroups,
+ },
[VCAP_TYPE_IS2] = {
.name = "is2",
.rows = 256,
@@ -1339,11 +3820,53 @@ const struct vcap_info sparx5_vcaps[] = {
.keyfield_set_typegroups = is2_keyfield_set_typegroups,
.actionfield_set_typegroups = is2_actionfield_set_typegroups,
},
+ [VCAP_TYPE_ES0] = {
+ .name = "es0",
+ .rows = 4096,
+ .sw_count = 1,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 489,
+ .default_cnt = 70,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es0_keyfield_set),
+ .actionfield_set = es0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es0_actionfield_set),
+ .keyfield_set_map = es0_keyfield_set_map,
+ .keyfield_set_map_size = es0_keyfield_set_map_size,
+ .actionfield_set_map = es0_actionfield_set_map,
+ .actionfield_set_map_size = es0_actionfield_set_map_size,
+ .keyfield_set_typegroups = es0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES2] = {
+ .name = "es2",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 21,
+ .default_cnt = 74,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set),
+ .actionfield_set = es2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set),
+ .keyfield_set_map = es2_keyfield_set_map,
+ .keyfield_set_map_size = es2_keyfield_set_map_size,
+ .actionfield_set_map = es2_actionfield_set_map,
+ .actionfield_set_map_size = es2_actionfield_set_map_size,
+ .keyfield_set_typegroups = es2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es2_actionfield_set_typegroups,
+ },
};
const struct vcap_statistics sparx5_vcap_stats = {
.name = "sparx5",
- .count = 1,
+ .count = 4,
.keyfield_set_names = vcap_keyfield_set_names,
.actionfield_set_names = vcap_actionfield_set_names,
.keyfield_names = vcap_keyfield_names,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
index b91e05ffe2f4..07b472c84a47 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
@@ -13,10 +13,113 @@
#include "sparx5_vcap_impl.h"
#include "sparx5_vcap_ag_api.h"
-static void sparx5_vcap_port_keys(struct sparx5 *sparx5,
- struct vcap_admin *admin,
- struct sparx5_port *port,
- struct vcap_output_print *out)
+static const char *sparx5_vcap_is0_etype_str(u32 value)
+{
+ switch (value) {
+ case VCAP_IS0_PS_ETYPE_DEFAULT:
+ return "default";
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ return "normal_7tuple";
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ return "normal_5tuple_ip4";
+ case VCAP_IS0_PS_ETYPE_MLL:
+ return "mll";
+ case VCAP_IS0_PS_ETYPE_LL_FULL:
+ return "ll_full";
+ case VCAP_IS0_PS_ETYPE_PURE_5TUPLE_IP4:
+ return "pure_5tuple_ip4";
+ case VCAP_IS0_PS_ETYPE_ETAG:
+ return "etag";
+ case VCAP_IS0_PS_ETYPE_NO_LOOKUP:
+ return "no lookup";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *sparx5_vcap_is0_mpls_str(u32 value)
+{
+ switch (value) {
+ case VCAP_IS0_PS_MPLS_FOLLOW_ETYPE:
+ return "follow_etype";
+ case VCAP_IS0_PS_MPLS_NORMAL_7TUPLE:
+ return "normal_7tuple";
+ case VCAP_IS0_PS_MPLS_NORMAL_5TUPLE_IP4:
+ return "normal_5tuple_ip4";
+ case VCAP_IS0_PS_MPLS_MLL:
+ return "mll";
+ case VCAP_IS0_PS_MPLS_LL_FULL:
+ return "ll_full";
+ case VCAP_IS0_PS_MPLS_PURE_5TUPLE_IP4:
+ return "pure_5tuple_ip4";
+ case VCAP_IS0_PS_MPLS_ETAG:
+ return "etag";
+ case VCAP_IS0_PS_MPLS_NO_LOOKUP:
+ return "no lookup";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *sparx5_vcap_is0_mlbs_str(u32 value)
+{
+ switch (value) {
+ case VCAP_IS0_PS_MLBS_FOLLOW_ETYPE:
+ return "follow_etype";
+ case VCAP_IS0_PS_MLBS_NO_LOOKUP:
+ return "no lookup";
+ default:
+ return "unknown";
+ }
+}
+
+static void sparx5_vcap_is0_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value, val;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5,
+ ANA_CL_ADV_CL_CFG(port->portno, lookup));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_CL_ADV_CL_CFG_LOOKUP_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+ val = ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n etype: %s",
+ sparx5_vcap_is0_etype_str(val));
+ val = ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n ipv4: %s",
+ sparx5_vcap_is0_etype_str(val));
+ val = ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n ipv6: %s",
+ sparx5_vcap_is0_etype_str(val));
+ val = ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n mpls_uc: %s",
+ sparx5_vcap_is0_mpls_str(val));
+ val = ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n mpls_mc: %s",
+ sparx5_vcap_is0_mpls_str(val));
+ val = ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n mlbs: %s",
+ sparx5_vcap_is0_mlbs_str(val));
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_is2_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
{
int lookup;
u32 value;
@@ -29,7 +132,7 @@ static void sparx5_vcap_port_keys(struct sparx5 *sparx5,
/* Get lookup state */
value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_CFG(port->portno));
out->prf(out->dst, "\n state: ");
- if (ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(value))
+ if (ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(value) & BIT(lookup))
out->prf(out->dst, "on");
else
out->prf(out->dst, "off");
@@ -126,9 +229,9 @@ static void sparx5_vcap_port_keys(struct sparx5 *sparx5,
out->prf(out->dst, "\n");
}
-static void sparx5_vcap_port_stickies(struct sparx5 *sparx5,
- struct vcap_admin *admin,
- struct vcap_output_print *out)
+static void sparx5_vcap_is2_port_stickies(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
{
int lookup;
u32 value;
@@ -181,6 +284,157 @@ static void sparx5_vcap_port_stickies(struct sparx5 *sparx5,
out->prf(out->dst, "\n");
}
+static void sparx5_vcap_es0_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ u32 value;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ out->prf(out->dst, "\n Lookup 0: ");
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5, REW_ES0_CTRL);
+ out->prf(out->dst, "\n state: ");
+ if (REW_ES0_CTRL_ES0_LU_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ out->prf(out->dst, "\n keyset: ");
+ value = spx5_rd(sparx5, REW_RTAG_ETAG_CTRL(port->portno));
+ switch (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_GET(value)) {
+ case VCAP_ES0_PS_NORMAL_SELECTION:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_ES0_PS_FORCE_ISDX_LOOKUPS:
+ out->prf(out->dst, "isdx");
+ break;
+ case VCAP_ES0_PS_FORCE_VID_LOOKUPS:
+ out->prf(out->dst, "vid");
+ break;
+ case VCAP_ES0_PS_RESERVED:
+ out->prf(out->dst, "reserved");
+ break;
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_es2_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5, EACL_VCAP_ES2_KEY_SEL(port->portno,
+ lookup));
+ out->prf(out->dst, "\n state: ");
+ if (EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ out->prf(out->dst, "\n arp: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_ARP_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_ARP_ARP:
+ out->prf(out->dst, "arp");
+ break;
+ }
+ out->prf(out->dst, "\n ipv4: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV4_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_IPV4_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID:
+ out->prf(out->dst, "ip4_tcp_udp ip4_vid");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_VID:
+ out->prf(out->dst, "ip4_vid");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_OTHER:
+ out->prf(out->dst, "ip4_other");
+ break;
+ }
+ out->prf(out->dst, "\n ipv6: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV6_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_VID:
+ out->prf(out->dst, "ip_7tuple ip6_vid");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_STD:
+ out->prf(out->dst, "ip_7tuple ip6_std");
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_VID:
+ out->prf(out->dst, "ip6_vid");
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_STD:
+ out->prf(out->dst, "ip6_std");
+ break;
+ case VCAP_ES2_PS_IPV6_IP4_DOWNGRADE:
+ out->prf(out->dst, "ip4_downgrade");
+ break;
+ }
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_es2_port_stickies(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " Sticky bits: ");
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ value = spx5_rd(sparx5, EACL_SEC_LOOKUP_STICKY(lookup));
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(value))
+ out->prf(out->dst, " ip_7tuple");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip6_vid");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(value))
+ out->prf(out->dst, " ip6_std");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(value))
+ out->prf(out->dst, " ip4_tcp_udp");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip4_vid");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(value))
+ out->prf(out->dst, " ip4_other");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(value))
+ out->prf(out->dst, " arp");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(value))
+ out->prf(out->dst, " mac_etype");
+ /* Clear stickies */
+ spx5_wr(value, sparx5, EACL_SEC_LOOKUP_STICKY(lookup));
+ }
+ out->prf(out->dst, "\n");
+}
+
/* Provide port information via a callback interface */
int sparx5_port_info(struct net_device *ndev,
struct vcap_admin *admin,
@@ -194,7 +448,24 @@ int sparx5_port_info(struct net_device *ndev,
vctrl = sparx5->vcap_ctrl;
vcap = &vctrl->vcaps[admin->vtype];
out->prf(out->dst, "%s:\n", vcap->name);
- sparx5_vcap_port_keys(sparx5, admin, port, out);
- sparx5_vcap_port_stickies(sparx5, admin, out);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_port_keys(sparx5, admin, port, out);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_port_keys(sparx5, admin, port, out);
+ sparx5_vcap_is2_port_stickies(sparx5, admin, out);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_port_keys(sparx5, admin, port, out);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_port_keys(sparx5, admin, port, out);
+ sparx5_vcap_es2_port_stickies(sparx5, admin, out);
+ break;
+ default:
+ out->prf(out->dst, " no info\n");
+ break;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index a0c126ba9a87..d0d4e0385ac7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -27,6 +27,28 @@
ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(_v6_uc) | \
ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(_arp))
+#define SPARX5_IS0_LOOKUPS 6
+#define VCAP_IS0_KEYSEL(_ena, _etype, _ipv4, _ipv6, _mpls_uc, _mpls_mc, _mlbs) \
+ (ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(_ena) | \
+ ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(_etype) | \
+ ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(_ipv4) | \
+ ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_SET(_ipv6) | \
+ ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_SET(_mpls_uc) | \
+ ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(_mpls_mc) | \
+ ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(_mlbs))
+
+#define SPARX5_ES0_LOOKUPS 1
+#define VCAP_ES0_KEYSEL(_key) (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(_key))
+#define SPARX5_STAT_ESDX_GRN_PKTS 0x300
+#define SPARX5_STAT_ESDX_YEL_PKTS 0x301
+
+#define SPARX5_ES2_LOOKUPS 2
+#define VCAP_ES2_KEYSEL(_ena, _arp, _ipv4, _ipv6) \
+ (EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(_ena) | \
+ EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(_arp) | \
+ EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(_ipv4) | \
+ EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(_ipv6))
+
static struct sparx5_vcap_inst {
enum vcap_type vtype; /* type of vcap */
int vinst; /* instance number within the same type */
@@ -38,8 +60,45 @@ static struct sparx5_vcap_inst {
int map_id; /* id in the super vcap block mapping (if applicable) */
int blockno; /* starting block in super vcap (if applicable) */
int blocks; /* number of blocks in super vcap (if applicable) */
+ bool ingress; /* is vcap in the ingress path */
} sparx5_vcap_inst_cfg[] = {
{
+ .vtype = VCAP_TYPE_IS0, /* CLM-0 */
+ .vinst = 0,
+ .map_id = 1,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L0,
+ .last_cid = SPARX5_VCAP_CID_IS0_L2 - 1,
+ .blockno = 8, /* Maps block 8-9 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-1 */
+ .vinst = 1,
+ .map_id = 2,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L2,
+ .last_cid = SPARX5_VCAP_CID_IS0_L4 - 1,
+ .blockno = 6, /* Maps block 6-7 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-2 */
+ .vinst = 2,
+ .map_id = 3,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L4,
+ .last_cid = SPARX5_VCAP_CID_IS0_MAX,
+ .blockno = 4, /* Maps block 4-5 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
.vtype = VCAP_TYPE_IS2, /* IS2-0 */
.vinst = 0,
.map_id = 4,
@@ -49,6 +108,7 @@ static struct sparx5_vcap_inst {
.last_cid = SPARX5_VCAP_CID_IS2_L2 - 1,
.blockno = 0, /* Maps block 0-1 */
.blocks = 2,
+ .ingress = true,
},
{
.vtype = VCAP_TYPE_IS2, /* IS2-1 */
@@ -60,9 +120,59 @@ static struct sparx5_vcap_inst {
.last_cid = SPARX5_VCAP_CID_IS2_MAX,
.blockno = 2, /* Maps block 2-3 */
.blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_ES0,
+ .lookups = SPARX5_ES0_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES0_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES0_L0,
+ .last_cid = SPARX5_VCAP_CID_ES0_MAX,
+ .count = 4096, /* Addresses according to datasheet */
+ .ingress = false,
+ },
+ {
+ .vtype = VCAP_TYPE_ES2,
+ .lookups = SPARX5_ES2_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES2_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES2_L0,
+ .last_cid = SPARX5_VCAP_CID_ES2_MAX,
+ .count = 12288, /* Addresses according to datasheet */
+ .ingress = false,
},
};
+/* These protocols have dedicated keysets in IS0 and a TC dissector */
+static u16 sparx5_vcap_is0_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+/* These protocols have dedicated keysets in IS2 and a TC dissector */
+static u16 sparx5_vcap_is2_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_ARP,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+/* These protocols have dedicated keysets in ES2 and a TC dissector */
+static u16 sparx5_vcap_es2_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_ARP,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+static void sparx5_vcap_type_err(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ const char *fname)
+{
+ pr_err("%s: vcap type: %s not supported\n",
+ fname, sparx5_vcaps[admin->vtype].name);
+}
+
/* Await the super VCAP completion of the current operation */
static void sparx5_vcap_wait_super_update(struct sparx5 *sparx5)
{
@@ -73,25 +183,81 @@ static void sparx5_vcap_wait_super_update(struct sparx5 *sparx5)
false, sparx5, VCAP_SUPER_CTRL);
}
-/* Initializing a VCAP address range: only IS2 for now */
+/* Await the ES0 VCAP completion of the current operation */
+static void sparx5_vcap_wait_es0_update(struct sparx5 *sparx5)
+{
+ u32 value;
+
+ read_poll_timeout(spx5_rd, value,
+ !VCAP_ES0_CTRL_UPDATE_SHOT_GET(value), 500, 10000,
+ false, sparx5, VCAP_ES0_CTRL);
+}
+
+/* Await the ES2 VCAP completion of the current operation */
+static void sparx5_vcap_wait_es2_update(struct sparx5 *sparx5)
+{
+ u32 value;
+
+ read_poll_timeout(spx5_rd, value,
+ !VCAP_ES2_CTRL_UPDATE_SHOT_GET(value), 500, 10000,
+ false, sparx5, VCAP_ES2_CTRL);
+}
+
+/* Initializing a VCAP address range */
static void _sparx5_vcap_range_init(struct sparx5 *sparx5,
struct vcap_admin *admin,
u32 addr, u32 count)
{
u32 size = count - 1;
- spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
- VCAP_SUPER_CFG_MV_SIZE_SET(size),
- sparx5, VCAP_SUPER_CFG);
- spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
- VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
- VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) |
- VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
- sparx5, VCAP_SUPER_CTRL);
- sparx5_vcap_wait_super_update(sparx5);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+ break;
+ case VCAP_TYPE_ES0:
+ spx5_wr(VCAP_ES0_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES0_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_ES0_CFG);
+ spx5_wr(VCAP_ES0_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES0_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_ES0_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES0_CTRL);
+ sparx5_vcap_wait_es0_update(sparx5);
+ break;
+ case VCAP_TYPE_ES2:
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES2_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
/* Initializing VCAP rule data area */
@@ -112,6 +278,17 @@ static const char *sparx5_vcap_keyset_name(struct net_device *ndev,
return vcap_keyset_name(port->sparx5->vcap_ctrl, keyset);
}
+/* Check if this is the first lookup of IS0 */
+static bool sparx5_vcap_is0_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= SPARX5_VCAP_CID_IS0_L0 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS0_L1) ||
+ ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS0_L2 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS0_L3)) ||
+ ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS0_L4 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS0_L5));
+}
+
/* Check if this is the first lookup of IS2 */
static bool sparx5_vcap_is2_is_first_chain(struct vcap_rule *rule)
{
@@ -121,9 +298,15 @@ static bool sparx5_vcap_is2_is_first_chain(struct vcap_rule *rule)
rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L3));
}
+static bool sparx5_vcap_es2_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= SPARX5_VCAP_CID_ES2_L0 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_ES2_L1);
+}
+
/* Set the narrow range ingress port mask on a rule */
-static void sparx5_vcap_add_range_port_mask(struct vcap_rule *rule,
- struct net_device *ndev)
+static void sparx5_vcap_add_ingress_range_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
{
struct sparx5_port *port = netdev_priv(ndev);
u32 port_mask;
@@ -153,12 +336,51 @@ static void sparx5_vcap_add_wide_port_mask(struct vcap_rule *rule,
vcap_rule_add_key_u72(rule, VCAP_KF_IF_IGR_PORT_MASK, &port_mask);
}
-/* Convert chain id to vcap lookup id */
-static int sparx5_vcap_cid_to_lookup(int cid)
+static void sparx5_vcap_add_egress_range_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 port_mask;
+ u32 range;
+
+ /* Mask range selects:
+ * 0-2: Physical/Logical egress port number 0-31, 32–63, 64.
+ * 3-5: Virtual Interface Number 0-31, 32-63, 64.
+ * 6: CPU queue Number 0-7.
+ *
+ * Use physical/logical port ranges (0-2)
+ */
+ range = port->portno / BITS_PER_TYPE(u32);
+ /* Port bit set to match-any */
+ port_mask = ~BIT(port->portno % BITS_PER_TYPE(u32));
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_MASK_RNG, range, 0xf);
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_MASK, 0, port_mask);
+}
+
+/* Convert IS0 chain id to vcap lookup id */
+static int sparx5_vcap_is0_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= SPARX5_VCAP_CID_IS0_L1 && cid < SPARX5_VCAP_CID_IS0_L2)
+ lookup = 1;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L2 && cid < SPARX5_VCAP_CID_IS0_L3)
+ lookup = 2;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L3 && cid < SPARX5_VCAP_CID_IS0_L4)
+ lookup = 3;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L4 && cid < SPARX5_VCAP_CID_IS0_L5)
+ lookup = 4;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L5 && cid < SPARX5_VCAP_CID_IS0_MAX)
+ lookup = 5;
+
+ return lookup;
+}
+
+/* Convert IS2 chain id to vcap lookup id */
+static int sparx5_vcap_is2_cid_to_lookup(int cid)
{
int lookup = 0;
- /* For now only handle IS2 */
if (cid >= SPARX5_VCAP_CID_IS2_L1 && cid < SPARX5_VCAP_CID_IS2_L2)
lookup = 1;
else if (cid >= SPARX5_VCAP_CID_IS2_L2 && cid < SPARX5_VCAP_CID_IS2_L3)
@@ -169,6 +391,86 @@ static int sparx5_vcap_cid_to_lookup(int cid)
return lookup;
}
+/* Convert ES2 chain id to vcap lookup id */
+static int sparx5_vcap_es2_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= SPARX5_VCAP_CID_ES2_L1)
+ lookup = 1;
+
+ return lookup;
+}
+
+/* Add ethernet type IS0 keyset to a list */
+static void
+sparx5_vcap_is0_get_port_etype_keysets(struct vcap_keyset_list *keysetlist,
+ u32 value)
+{
+ switch (ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_GET(value)) {
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL_7TUPLE);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL_5TUPLE_IP4);
+ break;
+ }
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_is0_get_port_keysets(struct net_device *ndev,
+ int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, ANA_CL_ADV_CL_CFG(portno, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL)
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist, value);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP)
+ switch (ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_GET(value)) {
+ case VCAP_IS0_PS_ETYPE_DEFAULT:
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist,
+ value);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_7TUPLE);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_5TUPLE_IP4);
+ break;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6)
+ switch (ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_GET(value)) {
+ case VCAP_IS0_PS_ETYPE_DEFAULT:
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist,
+ value);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_7TUPLE);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_5TUPLE_IP4);
+ break;
+ }
+
+ if (l3_proto != ETH_P_IP && l3_proto != ETH_P_IPV6)
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist, value);
+ return 0;
+}
+
/* Return the list of keysets for the vcap port configuration */
static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
int lookup,
@@ -180,10 +482,7 @@ static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
int portno = port->portno;
u32 value;
- /* Check if the port keyset selection is enabled */
value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
- if (!ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_GET(value))
- return -ENOENT;
/* Collect all keysets for the port in a list */
if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
@@ -274,6 +573,121 @@ static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
return 0;
}
+/* Return the keysets for the vcap port IP4 traffic class configuration */
+static void
+sparx5_vcap_es2_get_port_ipv4_keysets(struct vcap_keyset_list *keysetlist,
+ u32 value)
+{
+ switch (EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV4_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_IPV4_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_VID:
+ /* Not used */
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ }
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_es0_get_port_keysets(struct net_device *ndev,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, REW_RTAG_ETAG_CTRL(portno));
+
+ /* Collect all keysets for the port in a list */
+ switch (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_GET(value)) {
+ case VCAP_ES0_PS_NORMAL_SELECTION:
+ case VCAP_ES0_PS_FORCE_ISDX_LOOKUPS:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ISDX);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_es2_get_port_keysets(struct net_device *ndev,
+ int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
+ switch (EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_ARP_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_ARP_ARP:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP)
+ sparx5_vcap_es2_get_port_ipv4_keysets(keysetlist, value);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV6_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_VID:
+ /* Not used */
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_ES2_PS_IPV6_IP4_DOWNGRADE:
+ sparx5_vcap_es2_get_port_ipv4_keysets(keysetlist,
+ value);
+ break;
+ }
+ }
+
+ if (l3_proto != ETH_P_ARP && l3_proto != ETH_P_IP &&
+ l3_proto != ETH_P_IPV6) {
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ }
+ return 0;
+}
+
/* Get the port keyset for the vcap lookup */
int sparx5_vcap_get_port_keyset(struct net_device *ndev,
struct vcap_admin *admin,
@@ -281,10 +695,64 @@ int sparx5_vcap_get_port_keyset(struct net_device *ndev,
u16 l3_proto,
struct vcap_keyset_list *kslist)
{
- int lookup;
+ int lookup, err = -EINVAL;
+ struct sparx5_port *port;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ lookup = sparx5_vcap_is0_cid_to_lookup(cid);
+ err = sparx5_vcap_is0_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = sparx5_vcap_is2_cid_to_lookup(cid);
+ err = sparx5_vcap_is2_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ err = sparx5_vcap_es0_get_port_keysets(ndev, kslist, l3_proto);
+ break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(cid);
+ err = sparx5_vcap_es2_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
+ return err;
+}
+
+/* Check if the ethertype is supported by the vcap port classification */
+bool sparx5_vcap_is_known_etype(struct vcap_admin *admin, u16 etype)
+{
+ const u16 *known_etypes;
+ int size, idx;
- lookup = sparx5_vcap_cid_to_lookup(cid);
- return sparx5_vcap_is2_get_port_keysets(ndev, lookup, kslist, l3_proto);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ known_etypes = sparx5_vcap_is0_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_is0_known_etypes);
+ break;
+ case VCAP_TYPE_IS2:
+ known_etypes = sparx5_vcap_is2_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_is2_known_etypes);
+ break;
+ case VCAP_TYPE_ES0:
+ return true;
+ case VCAP_TYPE_ES2:
+ known_etypes = sparx5_vcap_es2_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_es2_known_etypes);
+ break;
+ default:
+ return false;
+ }
+ for (idx = 0; idx < size; ++idx)
+ if (known_etypes[idx] == etype)
+ return true;
+ return false;
}
/* API callback used for validating a field keyset (check the port keysets) */
@@ -297,16 +765,40 @@ sparx5_vcap_validate_keyset(struct net_device *ndev,
{
struct vcap_keyset_list keysetlist = {};
enum vcap_keyfield_set keysets[10] = {};
+ struct sparx5_port *port;
int idx, jdx, lookup;
if (!kslist || kslist->cnt == 0)
return VCAP_KFS_NO_VALUE;
- /* Get a list of currently configured keysets in the lookups */
- lookup = sparx5_vcap_cid_to_lookup(rule->vcap_chain_id);
keysetlist.max = ARRAY_SIZE(keysets);
keysetlist.keysets = keysets;
- sparx5_vcap_is2_get_port_keysets(ndev, lookup, &keysetlist, l3_proto);
+
+ /* Get a list of currently configured keysets in the lookups */
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ lookup = sparx5_vcap_is0_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_is0_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = sparx5_vcap_is2_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_is2_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_get_port_keysets(ndev, &keysetlist, l3_proto);
+ break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_es2_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
/* Check if there is a match and return the match */
for (idx = 0; idx < kslist->cnt; ++idx)
@@ -321,27 +813,97 @@ sparx5_vcap_validate_keyset(struct net_device *ndev,
return -ENOENT;
}
-/* API callback used for adding default fields to a rule */
-static void sparx5_vcap_add_default_fields(struct net_device *ndev,
- struct vcap_admin *admin,
- struct vcap_rule *rule)
+static void sparx5_vcap_ingress_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
{
const struct vcap_field *field;
+ bool is_first;
+ /* Add ingress port mask matching the net device */
field = vcap_lookup_keyfield(rule, VCAP_KF_IF_IGR_PORT_MASK);
if (field && field->width == SPX5_PORTS)
sparx5_vcap_add_wide_port_mask(rule, ndev);
else if (field && field->width == BITS_PER_TYPE(u32))
- sparx5_vcap_add_range_port_mask(rule, ndev);
+ sparx5_vcap_add_ingress_range_port_mask(rule, ndev);
else
pr_err("%s:%d: %s: could not add an ingress port mask for: %s\n",
__func__, __LINE__, netdev_name(ndev),
sparx5_vcap_keyset_name(ndev, rule->keyset));
- /* add the lookup bit */
- if (sparx5_vcap_is2_is_first_chain(rule))
- vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
+
+ if (admin->vtype == VCAP_TYPE_IS0)
+ is_first = sparx5_vcap_is0_is_first_chain(rule);
else
- vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0);
+ is_first = sparx5_vcap_is2_is_first_chain(rule);
+
+ /* Add key that selects the first/second lookup */
+ if (is_first)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+static void sparx5_vcap_es0_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_NO, port->portno, ~0);
+ /* Match untagged frames if there was no VLAN key */
+ vcap_rule_add_key_u32(rule, VCAP_KF_8021Q_TPID, SPX5_TPID_SEL_UNTAGGED,
+ ~0);
+}
+
+static void sparx5_vcap_es2_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ const struct vcap_field *field;
+ bool is_first;
+
+ /* Add egress port mask matching the net device */
+ field = vcap_lookup_keyfield(rule, VCAP_KF_IF_EGR_PORT_MASK);
+ if (field)
+ sparx5_vcap_add_egress_range_port_mask(rule, ndev);
+
+ /* Add key that selects the first/second lookup */
+ is_first = sparx5_vcap_es2_is_first_chain(rule);
+
+ if (is_first)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+/* API callback used for adding default fields to a rule */
+static void sparx5_vcap_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct sparx5_port *port;
+
+ /* add the lookup bit */
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_ingress_add_default_fields(ndev, admin, rule);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_add_default_fields(ndev, admin, rule);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_add_default_fields(ndev, admin, rule);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
}
/* API callback used for erasing the vcap cache area (not the register area) */
@@ -353,21 +915,60 @@ static void sparx5_vcap_cache_erase(struct vcap_admin *admin)
memset(&admin->cache.counter, 0, sizeof(admin->cache.counter));
}
-/* API callback used for writing to the VCAP cache */
-static void sparx5_vcap_cache_write(struct net_device *ndev,
- struct vcap_admin *admin,
- enum vcap_selection sel,
- u32 start,
- u32 count)
+static void sparx5_vcap_is0_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+
+ if (sel & VCAP_SEL_COUNTER)
+ spx5_wr(admin->cache.counter, sparx5,
+ VCAP_SUPER_VCAP_CNT_DAT(0));
+}
+
+static void sparx5_vcap_is2_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
{
- struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5 = port->sparx5;
u32 *keystr, *mskstr, *actstr;
int idx;
keystr = &admin->cache.keystream[start];
mskstr = &admin->cache.maskstream[start];
actstr = &admin->cache.actionstream[start];
+
switch (sel) {
case VCAP_SEL_ENTRY:
for (idx = 0; idx < count; ++idx) {
@@ -403,21 +1004,143 @@ static void sparx5_vcap_cache_write(struct net_device *ndev,
}
}
-/* API callback used for reading from the VCAP into the VCAP cache */
-static void sparx5_vcap_cache_read(struct net_device *ndev,
- struct vcap_admin *admin,
- enum vcap_selection sel,
- u32 start,
- u32 count)
+/* Use ESDX counters located in the XQS */
+static void sparx5_es0_write_esdx_counter(struct sparx5 *sparx5,
+ struct vcap_admin *admin, u32 id)
+{
+ mutex_lock(&sparx5->queue_stats_lock);
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(id), sparx5, XQS_STAT_CFG);
+ spx5_wr(admin->cache.counter, sparx5,
+ XQS_CNT(SPARX5_STAT_ESDX_GRN_PKTS));
+ spx5_wr(0, sparx5, XQS_CNT(SPARX5_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&sparx5->queue_stats_lock);
+}
+
+static void sparx5_vcap_es0_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_ES0_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_ES0_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_ES0_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ spx5_wr(admin->cache.counter, sparx5, VCAP_ES0_VCAP_CNT_DAT(0));
+ sparx5_es0_write_esdx_counter(sparx5, admin, start);
+ }
+}
+
+static void sparx5_vcap_es2_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_ES2_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_ES2_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_ES2_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0x7ff; /* counter limit */
+ spx5_wr(admin->cache.counter, sparx5, EACL_ES2_CNT(start));
+ spx5_wr(admin->cache.sticky, sparx5, VCAP_ES2_VCAP_CNT_DAT(0));
+ }
+}
+
+/* API callback used for writing to the VCAP cache */
+static void sparx5_vcap_cache_write(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
{
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_cache_write(sparx5, admin, sel, start, count);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+static void sparx5_vcap_is0_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
u32 *keystr, *mskstr, *actstr;
int idx;
keystr = &admin->cache.keystream[start];
mskstr = &admin->cache.maskstream[start];
actstr = &admin->cache.actionstream[start];
+
if (sel & VCAP_SEL_ENTRY) {
for (idx = 0; idx < count; ++idx) {
keystr[idx] = spx5_rd(sparx5,
@@ -426,11 +1149,47 @@ static void sparx5_vcap_cache_read(struct net_device *ndev,
VCAP_SUPER_VCAP_MASK_DAT(idx));
}
}
- if (sel & VCAP_SEL_ACTION) {
+
+ if (sel & VCAP_SEL_ACTION)
for (idx = 0; idx < count; ++idx)
actstr[idx] = spx5_rd(sparx5,
VCAP_SUPER_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+static void sparx5_vcap_is2_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] = ~spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
}
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+
if (sel & VCAP_SEL_COUNTER) {
start = start & 0xfff; /* counter limit */
if (admin->vinst == 0)
@@ -444,6 +1203,121 @@ static void sparx5_vcap_cache_read(struct net_device *ndev,
}
}
+/* Use ESDX counters located in the XQS */
+static void sparx5_es0_read_esdx_counter(struct sparx5 *sparx5,
+ struct vcap_admin *admin, u32 id)
+{
+ u32 counter;
+
+ mutex_lock(&sparx5->queue_stats_lock);
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(id), sparx5, XQS_STAT_CFG);
+ counter = spx5_rd(sparx5, XQS_CNT(SPARX5_STAT_ESDX_GRN_PKTS)) +
+ spx5_rd(sparx5, XQS_CNT(SPARX5_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&sparx5->queue_stats_lock);
+ if (counter)
+ admin->cache.counter = counter;
+}
+
+static void sparx5_vcap_es0_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] =
+ spx5_rd(sparx5, VCAP_ES0_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] =
+ ~spx5_rd(sparx5, VCAP_ES0_VCAP_MASK_DAT(idx));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] =
+ spx5_rd(sparx5, VCAP_ES0_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ spx5_rd(sparx5, VCAP_ES0_VCAP_CNT_DAT(0));
+ admin->cache.sticky = admin->cache.counter;
+ sparx5_es0_read_esdx_counter(sparx5, admin, start);
+ }
+}
+
+static void sparx5_vcap_es2_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] =
+ ~spx5_rd(sparx5, VCAP_ES2_VCAP_MASK_DAT(idx));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0x7ff; /* counter limit */
+ admin->cache.counter =
+ spx5_rd(sparx5, EACL_ES2_CNT(start));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_CNT_DAT(0));
+ }
+}
+
+/* API callback used for reading from the VCAP into the VCAP cache */
+static void sparx5_vcap_cache_read(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_cache_read(sparx5, admin, sel, start, count);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
/* API callback used for initializing a VCAP address range */
static void sparx5_vcap_range_init(struct net_device *ndev,
struct vcap_admin *admin, u32 addr,
@@ -455,16 +1329,12 @@ static void sparx5_vcap_range_init(struct net_device *ndev,
_sparx5_vcap_range_init(sparx5, admin, addr, count);
}
-/* API callback used for updating the VCAP cache */
-static void sparx5_vcap_update(struct net_device *ndev,
- struct vcap_admin *admin, enum vcap_command cmd,
- enum vcap_selection sel, u32 addr)
+static void sparx5_vcap_super_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
{
- struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5 = port->sparx5;
- bool clear;
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
- clear = (cmd == VCAP_CMD_INITIALIZE);
spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
VCAP_SUPER_CFG_MV_SIZE_SET(0), sparx5, VCAP_SUPER_CFG);
spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) |
@@ -478,24 +1348,75 @@ static void sparx5_vcap_update(struct net_device *ndev,
sparx5_vcap_wait_super_update(sparx5);
}
-/* API callback used for moving a block of rules in the VCAP */
-static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
- u32 addr, int offset, int count)
+static void sparx5_vcap_es0_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ spx5_wr(VCAP_ES0_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES0_CFG_MV_SIZE_SET(0), sparx5, VCAP_ES0_CFG);
+ spx5_wr(VCAP_ES0_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_ES0_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES0_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_ES0_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES0_CTRL);
+ sparx5_vcap_wait_es0_update(sparx5);
+}
+
+static void sparx5_vcap_es2_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES2_CFG_MV_SIZE_SET(0), sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+}
+
+/* API callback used for updating the VCAP cache */
+static void sparx5_vcap_update(struct net_device *ndev,
+ struct vcap_admin *admin, enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
{
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5 *sparx5 = port->sparx5;
- enum vcap_command cmd;
- u16 mv_num_pos;
- u16 mv_size;
- mv_size = count - 1;
- if (offset > 0) {
- mv_num_pos = offset - 1;
- cmd = VCAP_CMD_MOVE_DOWN;
- } else {
- mv_num_pos = -offset - 1;
- cmd = VCAP_CMD_MOVE_UP;
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_super_update(sparx5, cmd, sel, addr);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_update(sparx5, cmd, sel, addr);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_update(sparx5, cmd, sel, addr);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
}
+}
+
+static void sparx5_vcap_super_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(mv_num_pos) |
VCAP_SUPER_CFG_MV_SIZE_SET(mv_size),
sparx5, VCAP_SUPER_CFG);
@@ -510,29 +1431,82 @@ static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
sparx5_vcap_wait_super_update(sparx5);
}
-/* Enable all lookups in the VCAP instance */
-static int sparx5_vcap_enable(struct net_device *ndev,
- struct vcap_admin *admin,
- bool enable)
+static void sparx5_vcap_es0_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
+ spx5_wr(VCAP_ES0_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_ES0_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_ES0_CFG);
+ spx5_wr(VCAP_ES0_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES0_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_ES0_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES0_CTRL);
+ sparx5_vcap_wait_es0_update(sparx5);
+}
+
+static void sparx5_vcap_es2_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_ES2_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+}
+
+/* API callback used for moving a block of rules in the VCAP */
+static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
+ u32 addr, int offset, int count)
{
struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5;
- int portno;
+ struct sparx5 *sparx5 = port->sparx5;
+ enum vcap_command cmd;
+ u16 mv_num_pos;
+ u16 mv_size;
- sparx5 = port->sparx5;
- portno = port->portno;
+ mv_size = count - 1;
+ if (offset > 0) {
+ mv_num_pos = offset - 1;
+ cmd = VCAP_CMD_MOVE_DOWN;
+ } else {
+ mv_num_pos = -offset - 1;
+ cmd = VCAP_CMD_MOVE_UP;
+ }
- /* For now we only consider IS2 */
- if (enable)
- spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf), sparx5,
- ANA_ACL_VCAP_S2_CFG(portno));
- else
- spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0), sparx5,
- ANA_ACL_VCAP_S2_CFG(portno));
- return 0;
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_super_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
-/* API callback operations: only IS2 is supported for now */
static struct vcap_operations sparx5_vcap_ops = {
.validate_keyset = sparx5_vcap_validate_keyset,
.add_default_fields = sparx5_vcap_add_default_fields,
@@ -543,19 +1517,41 @@ static struct vcap_operations sparx5_vcap_ops = {
.update = sparx5_vcap_update,
.move = sparx5_vcap_move,
.port_info = sparx5_port_info,
- .enable = sparx5_vcap_enable,
};
-/* Enable lookups per port and set the keyset generation: only IS2 for now */
-static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
- struct vcap_admin *admin)
+/* Enable IS0 lookups per port and set the keyset generation */
+static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+ u32 keysel;
+
+ keysel = VCAP_IS0_KEYSEL(false,
+ VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE,
+ VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4,
+ VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE,
+ VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MLBS_FOLLOW_ETYPE);
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ spx5_wr(keysel, sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
+ ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ }
+ }
+}
+
+/* Enable IS2 lookups per port and set the keyset generation */
+static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
{
int portno, lookup;
u32 keysel;
- /* all traffic types generate the MAC_ETYPE keyset for now in all
- * lookups on all ports
- */
keysel = VCAP_IS2_KEYSEL(true, VCAP_IS2_PS_NONETH_MAC_ETYPE,
VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER,
VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER,
@@ -568,19 +1564,107 @@ static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
}
}
+ /* IS2 lookups are in bit 0:3 */
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf),
+ ANA_ACL_VCAP_S2_CFG_SEC_ENA,
+ sparx5,
+ ANA_ACL_VCAP_S2_CFG(portno));
}
-/* Disable lookups per port and set the keyset generation: only IS2 for now */
-static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
- struct vcap_admin *admin)
+/* Enable ES0 lookups per port and set the keyset generation */
+static void sparx5_vcap_es0_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
{
int portno;
+ u32 keysel;
+ keysel = VCAP_ES0_KEYSEL(VCAP_ES0_PS_FORCE_ISDX_LOOKUPS);
for (portno = 0; portno < SPX5_PORTS; ++portno)
- spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0),
- ANA_ACL_VCAP_S2_CFG_SEC_ENA,
- sparx5,
- ANA_ACL_VCAP_S2_CFG(portno));
+ spx5_rmw(keysel, REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA,
+ sparx5, REW_RTAG_ETAG_CTRL(portno));
+
+ spx5_rmw(REW_ES0_CTRL_ES0_LU_ENA_SET(1), REW_ES0_CTRL_ES0_LU_ENA,
+ sparx5, REW_ES0_CTRL);
+}
+
+/* Enable ES2 lookups per port and set the keyset generation */
+static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+ u32 keysel;
+
+ keysel = VCAP_ES2_KEYSEL(true, VCAP_ES2_PS_ARP_MAC_ETYPE,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE);
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_wr(keysel, sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+}
+
+/* Enable lookups per port and set the keyset generation */
+static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_port_key_selection(sparx5, admin);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_port_key_selection(sparx5, admin);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_port_key_selection(sparx5, admin);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_port_key_selection(sparx5, admin);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+/* Disable lookups per port */
+static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(0),
+ ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ break;
+ case VCAP_TYPE_IS2:
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0),
+ ANA_ACL_VCAP_S2_CFG_SEC_ENA,
+ sparx5,
+ ANA_ACL_VCAP_S2_CFG(portno));
+ break;
+ case VCAP_TYPE_ES0:
+ spx5_rmw(REW_ES0_CTRL_ES0_LU_ENA_SET(0),
+ REW_ES0_CTRL_ES0_LU_ENA, sparx5, REW_ES0_CTRL);
+ break;
+ case VCAP_TYPE_ES2:
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(0),
+ EACL_VCAP_ES2_KEY_SEL_KEY_ENA,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
static void sparx5_vcap_admin_free(struct vcap_admin *admin)
@@ -610,6 +1694,7 @@ sparx5_vcap_admin_alloc(struct sparx5 *sparx5, struct vcap_control *ctrl,
mutex_init(&admin->lock);
admin->vtype = cfg->vtype;
admin->vinst = cfg->vinst;
+ admin->ingress = cfg->ingress;
admin->lookups = cfg->lookups;
admin->lookups_per_instance = cfg->lookups_per_instance;
admin->first_cid = cfg->first_cid;
@@ -633,22 +1718,55 @@ static void sparx5_vcap_block_alloc(struct sparx5 *sparx5,
struct vcap_admin *admin,
const struct sparx5_vcap_inst *cfg)
{
- int idx;
-
- /* Super VCAP block mapping and address configuration. Block 0
- * is assigned addresses 0 through 3071, block 1 is assigned
- * addresses 3072 though 6143, and so on.
- */
- for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks; ++idx) {
- spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5,
- VCAP_SUPER_IDX);
- spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id), sparx5,
- VCAP_SUPER_MAP);
- }
- admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE;
- admin->last_used_addr = admin->first_valid_addr +
- cfg->blocks * SUPER_VCAP_BLK_SIZE;
- admin->last_valid_addr = admin->last_used_addr - 1;
+ int idx, cores;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ /* Super VCAP block mapping and address configuration. Block 0
+ * is assigned addresses 0 through 3071, block 1 is assigned
+ * addresses 3072 though 6143, and so on.
+ */
+ for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks;
+ ++idx) {
+ spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_SUPER_IDX);
+ spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id),
+ sparx5, VCAP_SUPER_MAP);
+ }
+ admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE;
+ admin->last_used_addr = admin->first_valid_addr +
+ cfg->blocks * SUPER_VCAP_BLK_SIZE;
+ admin->last_valid_addr = admin->last_used_addr - 1;
+ break;
+ case VCAP_TYPE_ES0:
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+ cores = spx5_rd(sparx5, VCAP_ES0_CORE_CNT);
+ for (idx = 0; idx < cores; ++idx) {
+ spx5_wr(VCAP_ES0_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_ES0_IDX);
+ spx5_wr(VCAP_ES0_MAP_CORE_MAP_SET(1), sparx5,
+ VCAP_ES0_MAP);
+ }
+ break;
+ case VCAP_TYPE_ES2:
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+ cores = spx5_rd(sparx5, VCAP_ES2_CORE_CNT);
+ for (idx = 0; idx < cores; ++idx) {
+ spx5_wr(VCAP_ES2_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_ES2_IDX);
+ spx5_wr(VCAP_ES2_MAP_CORE_MAP_SET(1), sparx5,
+ VCAP_ES2_MAP);
+ }
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
/* Allocate a vcap control and vcap instances and configure the system */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
index 0a0f2412c980..3260ab5e3a82 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -16,6 +16,15 @@
#include "vcap_api.h"
#include "vcap_api_client.h"
+#define SPARX5_VCAP_CID_IS0_L0 VCAP_CID_INGRESS_L0 /* IS0/CLM lookup 0 */
+#define SPARX5_VCAP_CID_IS0_L1 VCAP_CID_INGRESS_L1 /* IS0/CLM lookup 1 */
+#define SPARX5_VCAP_CID_IS0_L2 VCAP_CID_INGRESS_L2 /* IS0/CLM lookup 2 */
+#define SPARX5_VCAP_CID_IS0_L3 VCAP_CID_INGRESS_L3 /* IS0/CLM lookup 3 */
+#define SPARX5_VCAP_CID_IS0_L4 VCAP_CID_INGRESS_L4 /* IS0/CLM lookup 4 */
+#define SPARX5_VCAP_CID_IS0_L5 VCAP_CID_INGRESS_L5 /* IS0/CLM lookup 5 */
+#define SPARX5_VCAP_CID_IS0_MAX \
+ (VCAP_CID_INGRESS_L5 + VCAP_CID_LOOKUP_SIZE - 1) /* IS0/CLM Max */
+
#define SPARX5_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */
#define SPARX5_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */
#define SPARX5_VCAP_CID_IS2_L2 VCAP_CID_INGRESS_STAGE2_L2 /* IS2 lookup 2 */
@@ -23,6 +32,63 @@
#define SPARX5_VCAP_CID_IS2_MAX \
(VCAP_CID_INGRESS_STAGE2_L3 + VCAP_CID_LOOKUP_SIZE - 1) /* IS2 Max */
+#define SPARX5_VCAP_CID_ES0_L0 VCAP_CID_EGRESS_L0 /* ES0 lookup 0 */
+#define SPARX5_VCAP_CID_ES0_MAX (VCAP_CID_EGRESS_L1 - 1) /* ES0 Max */
+
+#define SPARX5_VCAP_CID_ES2_L0 VCAP_CID_EGRESS_STAGE2_L0 /* ES2 lookup 0 */
+#define SPARX5_VCAP_CID_ES2_L1 VCAP_CID_EGRESS_STAGE2_L1 /* ES2 lookup 1 */
+#define SPARX5_VCAP_CID_ES2_MAX \
+ (VCAP_CID_EGRESS_STAGE2_L1 + VCAP_CID_LOOKUP_SIZE - 1) /* ES2 Max */
+
+/* IS0 port keyset selection control */
+
+/* IS0 ethernet, IPv4, IPv6 traffic type keyset generation */
+enum vcap_is0_port_sel_etype {
+ VCAP_IS0_PS_ETYPE_DEFAULT, /* None or follow depending on class */
+ VCAP_IS0_PS_ETYPE_MLL,
+ VCAP_IS0_PS_ETYPE_SGL_MLBS,
+ VCAP_IS0_PS_ETYPE_DBL_MLBS,
+ VCAP_IS0_PS_ETYPE_TRI_MLBS,
+ VCAP_IS0_PS_ETYPE_TRI_VID,
+ VCAP_IS0_PS_ETYPE_LL_FULL,
+ VCAP_IS0_PS_ETYPE_NORMAL_SRC,
+ VCAP_IS0_PS_ETYPE_NORMAL_DST,
+ VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE,
+ VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4,
+ VCAP_IS0_PS_ETYPE_PURE_5TUPLE_IP4,
+ VCAP_IS0_PS_ETYPE_DBL_VID_IDX,
+ VCAP_IS0_PS_ETYPE_ETAG,
+ VCAP_IS0_PS_ETYPE_NO_LOOKUP,
+};
+
+/* IS0 MPLS traffic type keyset generation */
+enum vcap_is0_port_sel_mpls_uc_mc {
+ VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MPLS_MLL,
+ VCAP_IS0_PS_MPLS_SGL_MLBS,
+ VCAP_IS0_PS_MPLS_DBL_MLBS,
+ VCAP_IS0_PS_MPLS_TRI_MLBS,
+ VCAP_IS0_PS_MPLS_TRI_VID,
+ VCAP_IS0_PS_MPLS_LL_FULL,
+ VCAP_IS0_PS_MPLS_NORMAL_SRC,
+ VCAP_IS0_PS_MPLS_NORMAL_DST,
+ VCAP_IS0_PS_MPLS_NORMAL_7TUPLE,
+ VCAP_IS0_PS_MPLS_NORMAL_5TUPLE_IP4,
+ VCAP_IS0_PS_MPLS_PURE_5TUPLE_IP4,
+ VCAP_IS0_PS_MPLS_DBL_VID_IDX,
+ VCAP_IS0_PS_MPLS_ETAG,
+ VCAP_IS0_PS_MPLS_NO_LOOKUP,
+};
+
+/* IS0 MBLS traffic type keyset generation */
+enum vcap_is0_port_sel_mlbs {
+ VCAP_IS0_PS_MLBS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MLBS_SGL_MLBS,
+ VCAP_IS0_PS_MLBS_DBL_MLBS,
+ VCAP_IS0_PS_MLBS_TRI_MLBS,
+ VCAP_IS0_PS_MLBS_NO_LOOKUP = 17,
+};
+
/* IS2 port keyset selection control */
/* IS2 non-ethernet traffic type keyset generation */
@@ -71,6 +137,57 @@ enum vcap_is2_port_sel_arp {
VCAP_IS2_PS_ARP_ARP,
};
+/* ES0 port keyset selection control */
+
+/* ES0 Egress port traffic type classification */
+enum vcap_es0_port_sel {
+ VCAP_ES0_PS_NORMAL_SELECTION,
+ VCAP_ES0_PS_FORCE_ISDX_LOOKUPS,
+ VCAP_ES0_PS_FORCE_VID_LOOKUPS,
+ VCAP_ES0_PS_RESERVED,
+};
+
+/* ES2 port keyset selection control */
+
+/* ES2 IPv4 traffic type keyset generation */
+enum vcap_es2_port_sel_ipv4 {
+ VCAP_ES2_PS_IPV4_MAC_ETYPE,
+ VCAP_ES2_PS_IPV4_IP_7TUPLE,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
+ VCAP_ES2_PS_IPV4_IP4_VID,
+ VCAP_ES2_PS_IPV4_IP4_OTHER,
+};
+
+/* ES2 IPv6 traffic type keyset generation */
+enum vcap_es2_port_sel_ipv6 {
+ VCAP_ES2_PS_IPV6_MAC_ETYPE,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE_VID,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE_STD,
+ VCAP_ES2_PS_IPV6_IP6_VID,
+ VCAP_ES2_PS_IPV6_IP6_STD,
+ VCAP_ES2_PS_IPV6_IP4_DOWNGRADE,
+};
+
+/* ES2 ARP traffic type keyset generation */
+enum vcap_es2_port_sel_arp {
+ VCAP_ES2_PS_ARP_MAC_ETYPE,
+ VCAP_ES2_PS_ARP_ARP,
+};
+
+/* Selects TPID for ES0 matching */
+enum SPX5_TPID_SEL {
+ SPX5_TPID_SEL_UNTAGGED,
+ SPX5_TPID_SEL_8100,
+ SPX5_TPID_SEL_UNUSED_0,
+ SPX5_TPID_SEL_UNUSED_1,
+ SPX5_TPID_SEL_88A8,
+ SPX5_TPID_SEL_TPIDCFG_1,
+ SPX5_TPID_SEL_TPIDCFG_2,
+ SPX5_TPID_SEL_TPIDCFG_3,
+};
+
/* Get the port keyset for the vcap lookup */
int sparx5_vcap_get_port_keyset(struct net_device *ndev,
struct vcap_admin *admin,
@@ -78,4 +195,7 @@ int sparx5_vcap_get_port_keyset(struct net_device *ndev,
u16 l3_proto,
struct vcap_keyset_list *kslist);
+/* Check if the ethertype is supported by the vcap port classification */
+bool sparx5_vcap_is_known_etype(struct vcap_admin *admin, u16 etype);
+
#endif /* __SPARX5_VCAP_IMPL_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index 34f954bbf815..ac001ae59a38 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -219,8 +219,8 @@ void sparx5_vlan_port_apply(struct sparx5 *sparx5,
spx5_wr(val, sparx5,
ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
- /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
- val = REW_TAG_CTRL_TAG_TPID_CFG_SET(0);
+ /* Egress configuration (REW_TAG_CFG): VLAN tag selected via IFH */
+ val = REW_TAG_CTRL_TAG_TPID_CFG_SET(5);
if (port->vlan_aware) {
if (port->vid)
/* Tag all frames except when VID == DEFAULT_VLAN */
diff --git a/drivers/net/ethernet/microchip/vcap/Makefile b/drivers/net/ethernet/microchip/vcap/Makefile
index 0adb8f5a8735..c86f20e6491f 100644
--- a/drivers/net/ethernet/microchip/vcap/Makefile
+++ b/drivers/net/ethernet/microchip/vcap/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_VCAP) += vcap.o
obj-$(CONFIG_VCAP_KUNIT_TEST) += vcap_model_kunit.o
vcap-$(CONFIG_DEBUG_FS) += vcap_api_debugfs.o
-vcap-y += vcap_api.o
+vcap-y += vcap_api.o vcap_tc.o
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
index 84de2aee4169..0844fcaeee68 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
@@ -1,16 +1,17 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
* Microchip VCAP API
*/
-/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200.
- * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86
+/* This file is autogenerated by cml-utils 2023-02-10 11:15:56 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
*/
#ifndef __VCAP_AG_API__
#define __VCAP_AG_API__
enum vcap_type {
+ VCAP_TYPE_ES0,
VCAP_TYPE_ES2,
VCAP_TYPE_IS0,
VCAP_TYPE_IS2,
@@ -20,27 +21,25 @@ enum vcap_type {
/* Keyfieldset names with origin information */
enum vcap_keyfield_set {
VCAP_KFS_NO_VALUE, /* initial value */
- VCAP_KFS_ARP, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_ARP, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
VCAP_KFS_ETAG, /* sparx5 is0 X2 */
- VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6, sparx5 es2 X6 */
- VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
VCAP_KFS_IP4_VID, /* sparx5 es2 X3 */
- VCAP_KFS_IP6_STD, /* sparx5 is2 X6 */
- VCAP_KFS_IP6_VID, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_IP6_OTHER, /* lan966x is2 X4 */
+ VCAP_KFS_IP6_STD, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_IP6_TCP_UDP, /* lan966x is2 X4 */
+ VCAP_KFS_IP6_VID, /* sparx5 es2 X6 */
VCAP_KFS_IP_7TUPLE, /* sparx5 is2 X12, sparx5 es2 X12 */
+ VCAP_KFS_ISDX, /* sparx5 es0 X1 */
VCAP_KFS_LL_FULL, /* sparx5 is0 X6 */
- VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6, sparx5 es2 X6 */
- VCAP_KFS_MLL, /* sparx5 is0 X3 */
- VCAP_KFS_NORMAL, /* sparx5 is0 X6 */
- VCAP_KFS_NORMAL_5TUPLE_IP4, /* sparx5 is0 X6 */
- VCAP_KFS_NORMAL_7TUPLE, /* sparx5 is0 X12 */
- VCAP_KFS_PURE_5TUPLE_IP4, /* sparx5 is0 X3 */
- VCAP_KFS_TRI_VID, /* sparx5 is0 X2 */
+ VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
VCAP_KFS_MAC_LLC, /* lan966x is2 X2 */
VCAP_KFS_MAC_SNAP, /* lan966x is2 X2 */
+ VCAP_KFS_NORMAL_5TUPLE_IP4, /* sparx5 is0 X6 */
+ VCAP_KFS_NORMAL_7TUPLE, /* sparx5 is0 X12 */
VCAP_KFS_OAM, /* lan966x is2 X2 */
- VCAP_KFS_IP6_TCP_UDP, /* lan966x is2 X4 */
- VCAP_KFS_IP6_OTHER, /* lan966x is2 X4 */
+ VCAP_KFS_PURE_5TUPLE_IP4, /* sparx5 is0 X3 */
VCAP_KFS_SMAC_SIP4, /* lan966x is2 X1 */
VCAP_KFS_SMAC_SIP6, /* lan966x is2 X2 */
};
@@ -78,6 +77,8 @@ enum vcap_keyfield_set {
* Third PCP in multiple vlan tags (not always available)
* VCAP_KF_8021Q_PCP_CLS: W3, sparx5: is2/es2, lan966x: is2
* Classified PCP
+ * VCAP_KF_8021Q_TPID: W3, sparx5: es0
+ * TPID for outer tag: 0: Customer TPID 1: Service TPID (88A8 or programmable)
* VCAP_KF_8021Q_TPID0: W3, sparx5: is0
* First TPIC in multiple vlan tags (outer tag or default port tag)
* VCAP_KF_8021Q_TPID1: W3, sparx5: is0
@@ -90,7 +91,8 @@ enum vcap_keyfield_set {
* Second VID in multiple vlan tags (inner tag)
* VCAP_KF_8021Q_VID2: W12, sparx5: is0
* Third VID in multiple vlan tags (not always available)
- * VCAP_KF_8021Q_VID_CLS: W13, sparx5: is2/es2, lan966x is2 W12
+ * VCAP_KF_8021Q_VID_CLS: sparx5 is2 W13, sparx5 es0 W13, sparx5 es2 W13,
+ * lan966x is2 W12
* Classified VID
* VCAP_KF_8021Q_VLAN_TAGGED_IS: W1, sparx5: is2/es2, lan966x: is2
* Sparx5: Set if frame was received with a VLAN tag, LAN966x: Set if frame has
@@ -104,7 +106,7 @@ enum vcap_keyfield_set {
* Set if hardware address is Ethernet
* VCAP_KF_ARP_LEN_OK_IS: W1, sparx5: is2/es2, lan966x: is2
* Set if hardware address length = 6 (Ethernet) and IP address length = 4 (IP).
- * VCAP_KF_ARP_OPCODE: W2, sparx5: is2/es2, lan966x: i2
+ * VCAP_KF_ARP_OPCODE: W2, sparx5: is2/es2, lan966x: is2
* ARP opcode
* VCAP_KF_ARP_OPCODE_UNKNOWN_IS: W1, sparx5: is2/es2, lan966x: is2
* Set if not one of the codes defined in VCAP_KF_ARP_OPCODE
@@ -114,25 +116,25 @@ enum vcap_keyfield_set {
* Sender Hardware Address = SMAC (ARP)
* VCAP_KF_ARP_TGT_MATCH_IS: W1, sparx5: is2/es2, lan966x: is2
* Target Hardware Address = SMAC (RARP)
- * VCAP_KF_COSID_CLS: W3, sparx5: es2
+ * VCAP_KF_COSID_CLS: W3, sparx5: es0/es2
* Class of service
- * VCAP_KF_DST_ENTRY: W1, sparx5: is0
- * Selects whether the frame’s destination or source information is used for
- * fields L2_SMAC and L3_IP4_SIP
* VCAP_KF_ES0_ISDX_KEY_ENA: W1, sparx5: es2
* The value taken from the IFH .FWD.ES0_ISDX_KEY_ENA
* VCAP_KF_ETYPE: W16, sparx5: is0/is2/es2, lan966x: is2
* Ethernet type
* VCAP_KF_ETYPE_LEN_IS: W1, sparx5: is0/is2/es2
* Set if frame has EtherType >= 0x600
- * VCAP_KF_ETYPE_MPLS: W2, sparx5: is0
- * Type of MPLS Ethertype (or not)
+ * VCAP_KF_HOST_MATCH: W1, lan966x: is2
+ * The action from the SMAC_SIP4 or SMAC_SIP6 lookups. Used for IP source
+ * guarding.
* VCAP_KF_IF_EGR_PORT_MASK: W32, sparx5: es2
* Egress port mask, one bit per port
* VCAP_KF_IF_EGR_PORT_MASK_RNG: W3, sparx5: es2
* Select which 32 port group is available in IF_EGR_PORT (or virtual ports or
* CPU queue)
- * VCAP_KF_IF_IGR_PORT: sparx5 is0 W7, sparx5 es2 W9
+ * VCAP_KF_IF_EGR_PORT_NO: W7, sparx5: es0
+ * Egress port number
+ * VCAP_KF_IF_IGR_PORT: sparx5 is0 W7, sparx5 es2 W9, lan966x is2 W4
* Sparx5: Logical ingress port number retrieved from
* ANA_CL::PORT_ID_CFG.LPORT_NUM or ERLEG, LAN966x: ingress port nunmber
* VCAP_KF_IF_IGR_PORT_MASK: sparx5 is0 W65, sparx5 is2 W32, sparx5 is2 W65,
@@ -152,45 +154,61 @@ enum vcap_keyfield_set {
* VCAP_KF_IP4_IS: W1, sparx5: is0/is2/es2, lan966x: is2
* Set if frame has EtherType = 0x800 and IP version = 4
* VCAP_KF_IP_MC_IS: W1, sparx5: is0
- * Set if frame is IPv4 frame and frame’s destination MAC address is an IPv4
- * multicast address (0x01005E0 /25). Set if frame is IPv6 frame and frame’s
+ * Set if frame is IPv4 frame and frame's destination MAC address is an IPv4
+ * multicast address (0x01005E0 /25). Set if frame is IPv6 frame and frame's
* destination MAC address is an IPv6 multicast address (0x3333/16).
* VCAP_KF_IP_PAYLOAD_5TUPLE: W32, sparx5: is0
* Payload bytes after IP header
* VCAP_KF_IP_SNAP_IS: W1, sparx5: is0
* Set if frame is IPv4, IPv6, or SNAP frame
- * VCAP_KF_ISDX_CLS: W12, sparx5: is2/es2
+ * VCAP_KF_ISDX_CLS: W12, sparx5: is2/es0/es2
* Classified ISDX
- * VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2/es2, lan966x: is2
+ * VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2/es0/es2, lan966x: is2
* Set if classified ISDX > 0
* VCAP_KF_L2_BC_IS: W1, sparx5: is0/is2/es2, lan966x: is2
- * Set if frame’s destination MAC address is the broadcast address
+ * Set if frame's destination MAC address is the broadcast address
* (FF-FF-FF-FF-FF-FF).
* VCAP_KF_L2_DMAC: W48, sparx5: is0/is2/es2, lan966x: is2
* Destination MAC address
+ * VCAP_KF_L2_FRM_TYPE: W4, lan966x: is2
+ * Frame subtype for specific EtherTypes (MRP, DLR)
* VCAP_KF_L2_FWD_IS: W1, sparx5: is2
* Set if the frame is allowed to be forwarded to front ports
- * VCAP_KF_L2_MC_IS: W1, sparx5: is0/is2/es2, lan9966x is2
- * Set if frame’s destination MAC address is a multicast address (bit 40 = 1).
+ * VCAP_KF_L2_LLC: W40, lan966x: is2
+ * LLC header and data after up to two VLAN tags and the type/length field
+ * VCAP_KF_L2_MC_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * Set if frame's destination MAC address is a multicast address (bit 40 = 1).
+ * VCAP_KF_L2_PAYLOAD0: W16, lan966x: is2
+ * Payload bytes 0-1 after the frame's EtherType
+ * VCAP_KF_L2_PAYLOAD1: W8, lan966x: is2
+ * Payload byte 4 after the frame's EtherType. This is specifically for PTP
+ * frames.
+ * VCAP_KF_L2_PAYLOAD2: W3, lan966x: is2
+ * Bits 7, 2, and 1 from payload byte 6 after the frame's EtherType. This is
+ * specifically for PTP frames.
* VCAP_KF_L2_PAYLOAD_ETYPE: W64, sparx5: is2/es2
* Byte 0-7 of L2 payload after Type/Len field and overloading for OAM
- * VCAP_KF_L2_SMAC: W48, sparx5: is0/is2/es2, lan966x is2
+ * VCAP_KF_L2_SMAC: W48, sparx5: is0/is2/es2, lan966x: is2
* Source MAC address
+ * VCAP_KF_L2_SNAP: W40, lan966x: is2
+ * SNAP header after LLC header (AA-AA-03)
* VCAP_KF_L3_DIP_EQ_SIP_IS: W1, sparx5: is2/es2, lan966x: is2
* Set if Src IP matches Dst IP address
- * VCAP_KF_L3_DMAC_DIP_MATCH: W1, sparx5: is2
- * Match found in DIP security lookup in ANA_L3
- * VCAP_KF_L3_DPL_CLS: W1, sparx5: es2
+ * VCAP_KF_L3_DPL_CLS: W1, sparx5: es0/es2
* The frames drop precedence level
* VCAP_KF_L3_DSCP: W6, sparx5: is0
- * Frame’s DSCP value
+ * Frame's DSCP value
* VCAP_KF_L3_DST_IS: W1, sparx5: is2
* Set if lookup is done for egress router leg
+ * VCAP_KF_L3_FRAGMENT: W1, lan966x: is2
+ * Set if IPv4 frame is fragmented
* VCAP_KF_L3_FRAGMENT_TYPE: W2, sparx5: is0/is2/es2
* L3 Fragmentation type (none, initial, suspicious, valid follow up)
* VCAP_KF_L3_FRAG_INVLD_L4_LEN: W1, sparx5: is0/is2
* Set if frame's L4 length is less than ANA_CL:COMMON:CLM_FRAGMENT_CFG.L4_MIN_L
* EN
+ * VCAP_KF_L3_FRAG_OFS_GT0: W1, lan966x: is2
+ * Set if IPv4 frame is fragmented and it is not the first fragment
* VCAP_KF_L3_IP4_DIP: W32, sparx5: is0/is2/es2, lan966x: is2
* Destination IPv4 Address
* VCAP_KF_L3_IP4_SIP: W32, sparx5: is0/is2/es2, lan966x: is2
@@ -205,36 +223,38 @@ enum vcap_keyfield_set {
* IPv4 frames: IP protocol. IPv6 frames: Next header, same as for IPV4
* VCAP_KF_L3_OPTIONS_IS: W1, sparx5: is0/is2/es2, lan966x: is2
* Set if IPv4 frame contains options (IP len > 5)
- * VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40, sparx5 es2 W96,
- * lan966x is2 W56
+ * VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40, sparx5 es2 W96, sparx5
+ * es2 W40, lan966x is2 W56
* Sparx5: Payload bytes after IP header. IPv4: IPv4 options are not parsed so
* payload is always taken 20 bytes after the start of the IPv4 header, LAN966x:
* Bytes 0-6 after IP header
* VCAP_KF_L3_RT_IS: W1, sparx5: is2/es2
* Set if frame has hit a router leg
- * VCAP_KF_L3_SMAC_SIP_MATCH: W1, sparx5: is2
- * Match found in SIP security lookup in ANA_L3
* VCAP_KF_L3_TOS: W8, sparx5: is2/es2, lan966x: is2
* Sparx5: Frame's IPv4/IPv6 DSCP and ECN fields, LAN966x: IP TOS field
* VCAP_KF_L3_TTL_GT0: W1, sparx5: is2/es2, lan966x: is2
* Set if IPv4 TTL / IPv6 hop limit is greater than 0
+ * VCAP_KF_L4_1588_DOM: W8, lan966x: is2
+ * PTP over UDP: domainNumber
+ * VCAP_KF_L4_1588_VER: W4, lan966x: is2
+ * PTP over UDP: version
* VCAP_KF_L4_ACK: W1, sparx5: is2/es2, lan966x: is2
* Sparx5 and LAN966x: TCP flag ACK, LAN966x only: PTP over UDP: flagField bit 2
* (unicastFlag)
* VCAP_KF_L4_DPORT: W16, sparx5: is2/es2, lan966x: is2
* Sparx5: TCP/UDP destination port. Overloading for IP_7TUPLE: Non-TCP/UDP IP
* frames: L4_DPORT = L3_IP_PROTO, LAN966x: TCP/UDP destination port
- * VCAP_KF_L4_FIN: W1, sparx5: is2/es2
+ * VCAP_KF_L4_FIN: W1, sparx5: is2/es2, lan966x: is2
* TCP flag FIN, LAN966x: TCP flag FIN, and for PTP over UDP: messageType bit 1
* VCAP_KF_L4_PAYLOAD: W64, sparx5: is2/es2
* Payload bytes after TCP/UDP header Overloading for IP_7TUPLE: Non TCP/UDP
- * frames: Payload bytes 0–7 after IP header. IPv4 options are not parsed so
+ * frames: Payload bytes 0-7 after IP header. IPv4 options are not parsed so
* payload is always taken 20 bytes after the start of the IPv4 header for non
* TCP/UDP IPv4 frames
* VCAP_KF_L4_PSH: W1, sparx5: is2/es2, lan966x: is2
* Sparx5: TCP flag PSH, LAN966x: TCP: TCP flag PSH. PTP over UDP: flagField bit
* 1 (twoStepFlag)
- * VCAP_KF_L4_RNG: sparx5 is0 W8, sparx5 is2 W16, sparx5 es2 W16, lan966x: is2
+ * VCAP_KF_L4_RNG: sparx5 is0 W8, sparx5 is2 W16, sparx5 es2 W16, lan966x is2 W8
* Range checker bitmask (one for each range checker). Input into range checkers
* is taken from classified results (VID, DSCP) and frame (SPORT, DPORT, ETYPE,
* outer VID, inner VID)
@@ -263,58 +283,35 @@ enum vcap_keyfield_set {
* Select the mode of the Generic Index
* VCAP_KF_LOOKUP_PAG: W8, sparx5: is2, lan966x: is2
* Classified Policy Association Group: chains rules from IS1/CLM to IS2
+ * VCAP_KF_MIRROR_PROBE: W2, sparx5: es2
+ * Identifies frame copies generated as a result of mirroring
* VCAP_KF_OAM_CCM_CNTS_EQ0: W1, sparx5: is2/es2, lan966x: is2
* Dual-ended loss measurement counters in CCM frames are all zero
- * VCAP_KF_OAM_MEL_FLAGS: W7, sparx5: is0, lan966x: is2
+ * VCAP_KF_OAM_DETECTED: W1, lan966x: is2
+ * This is missing in the datasheet, but present in the OAM keyset in XML
+ * VCAP_KF_OAM_FLAGS: W8, lan966x: is2
+ * Frame's OAM flags
+ * VCAP_KF_OAM_MEL_FLAGS: W7, lan966x: is2
* Encoding of MD level/MEG level (MEL)
- * VCAP_KF_OAM_Y1731_IS: W1, sparx5: is0/is2/es2, lan966x: is2
- * Set if frame’s EtherType = 0x8902
- * VCAP_KF_PROT_ACTIVE: W1, sparx5: es2
+ * VCAP_KF_OAM_MEPID: W16, lan966x: is2
+ * CCM frame's OAM MEP ID
+ * VCAP_KF_OAM_OPCODE: W8, lan966x: is2
+ * Frame's OAM opcode
+ * VCAP_KF_OAM_VER: W5, lan966x: is2
+ * Frame's OAM version
+ * VCAP_KF_OAM_Y1731_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if frame's EtherType = 0x8902
+ * VCAP_KF_PROT_ACTIVE: W1, sparx5: es0/es2
* Protection is active
* VCAP_KF_TCP_IS: W1, sparx5: is0/is2/es2, lan966x: is2
* Set if frame is IPv4 TCP frame (IP protocol = 6) or IPv6 TCP frames (Next
* header = 6)
- * VCAP_KF_TCP_UDP_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_TCP_UDP_IS: W1, sparx5: is0/is2/es2
* Set if frame is IPv4/IPv6 TCP or UDP frame (IP protocol/next header equals 6
* or 17)
* VCAP_KF_TYPE: sparx5 is0 W2, sparx5 is0 W1, sparx5 is2 W4, sparx5 is2 W2,
- * sparx5 es2 W3, lan966x: is2
+ * sparx5 es0 W1, sparx5 es2 W3, lan966x is2 W4, lan966x is2 W2
* Keyset type id - set by the API
- * VCAP_KF_HOST_MATCH: W1, lan966x: is2
- * The action from the SMAC_SIP4 or SMAC_SIP6 lookups. Used for IP source
- * guarding.
- * VCAP_KF_L2_FRM_TYPE: W4, lan966x: is2
- * Frame subtype for specific EtherTypes (MRP, DLR)
- * VCAP_KF_L2_PAYLOAD0: W16, lan966x: is2
- * Payload bytes 0-1 after the frame’s EtherType
- * VCAP_KF_L2_PAYLOAD1: W8, lan966x: is2
- * Payload byte 4 after the frame’s EtherType. This is specifically for PTP
- * frames.
- * VCAP_KF_L2_PAYLOAD2: W3, lan966x: is2
- * Bits 7, 2, and 1 from payload byte 6 after the frame’s EtherType. This is
- * specifically for PTP frames.
- * VCAP_KF_L2_LLC: W40, lan966x: is2
- * LLC header and data after up to two VLAN tags and the type/length field
- * VCAP_KF_L3_FRAGMENT: W1, lan966x: is2
- * Set if IPv4 frame is fragmented
- * VCAP_KF_L3_FRAG_OFS_GT0: W1, lan966x: is2
- * Set if IPv4 frame is fragmented and it is not the first fragment
- * VCAP_KF_L2_SNAP: W40, lan966x: is2
- * SNAP header after LLC header (AA-AA-03)
- * VCAP_KF_L4_1588_DOM: W8, lan966x: is2
- * PTP over UDP: domainNumber
- * VCAP_KF_L4_1588_VER: W4, lan966x: is2
- * PTP over UDP: version
- * VCAP_KF_OAM_MEPID: W16, lan966x: is2
- * CCM frame’s OAM MEP ID
- * VCAP_KF_OAM_OPCODE: W8, lan966x: is2
- * Frame’s OAM opcode
- * VCAP_KF_OAM_VER: W5, lan966x: is2
- * Frame’s OAM version
- * VCAP_KF_OAM_FLAGS: W8, lan966x: is2
- * Frame’s OAM flags
- * VCAP_KF_OAM_DETECTED: W1, lan966x: is2
- * This is missing in the datasheet, but present in the OAM keyset in XML
*/
/* Keyfield names */
@@ -334,6 +331,7 @@ enum vcap_key_field {
VCAP_KF_8021Q_PCP1,
VCAP_KF_8021Q_PCP2,
VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_TPID,
VCAP_KF_8021Q_TPID0,
VCAP_KF_8021Q_TPID1,
VCAP_KF_8021Q_TPID2,
@@ -352,13 +350,13 @@ enum vcap_key_field {
VCAP_KF_ARP_SENDER_MATCH_IS,
VCAP_KF_ARP_TGT_MATCH_IS,
VCAP_KF_COSID_CLS,
- VCAP_KF_DST_ENTRY,
VCAP_KF_ES0_ISDX_KEY_ENA,
VCAP_KF_ETYPE,
VCAP_KF_ETYPE_LEN_IS,
- VCAP_KF_ETYPE_MPLS,
+ VCAP_KF_HOST_MATCH,
VCAP_KF_IF_EGR_PORT_MASK,
VCAP_KF_IF_EGR_PORT_MASK_RNG,
+ VCAP_KF_IF_EGR_PORT_NO,
VCAP_KF_IF_IGR_PORT,
VCAP_KF_IF_IGR_PORT_MASK,
VCAP_KF_IF_IGR_PORT_MASK_L3,
@@ -373,17 +371,24 @@ enum vcap_key_field {
VCAP_KF_ISDX_GT0_IS,
VCAP_KF_L2_BC_IS,
VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_FRM_TYPE,
VCAP_KF_L2_FWD_IS,
+ VCAP_KF_L2_LLC,
VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_PAYLOAD0,
+ VCAP_KF_L2_PAYLOAD1,
+ VCAP_KF_L2_PAYLOAD2,
VCAP_KF_L2_PAYLOAD_ETYPE,
VCAP_KF_L2_SMAC,
+ VCAP_KF_L2_SNAP,
VCAP_KF_L3_DIP_EQ_SIP_IS,
- VCAP_KF_L3_DMAC_DIP_MATCH,
VCAP_KF_L3_DPL_CLS,
VCAP_KF_L3_DSCP,
VCAP_KF_L3_DST_IS,
+ VCAP_KF_L3_FRAGMENT,
VCAP_KF_L3_FRAGMENT_TYPE,
VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_FRAG_OFS_GT0,
VCAP_KF_L3_IP4_DIP,
VCAP_KF_L3_IP4_SIP,
VCAP_KF_L3_IP6_DIP,
@@ -392,9 +397,10 @@ enum vcap_key_field {
VCAP_KF_L3_OPTIONS_IS,
VCAP_KF_L3_PAYLOAD,
VCAP_KF_L3_RT_IS,
- VCAP_KF_L3_SMAC_SIP_MATCH,
VCAP_KF_L3_TOS,
VCAP_KF_L3_TTL_GT0,
+ VCAP_KF_L4_1588_DOM,
+ VCAP_KF_L4_1588_VER,
VCAP_KF_L4_ACK,
VCAP_KF_L4_DPORT,
VCAP_KF_L4_FIN,
@@ -411,30 +417,19 @@ enum vcap_key_field {
VCAP_KF_LOOKUP_GEN_IDX,
VCAP_KF_LOOKUP_GEN_IDX_SEL,
VCAP_KF_LOOKUP_PAG,
- VCAP_KF_MIRROR_ENA,
+ VCAP_KF_MIRROR_PROBE,
VCAP_KF_OAM_CCM_CNTS_EQ0,
+ VCAP_KF_OAM_DETECTED,
+ VCAP_KF_OAM_FLAGS,
VCAP_KF_OAM_MEL_FLAGS,
+ VCAP_KF_OAM_MEPID,
+ VCAP_KF_OAM_OPCODE,
+ VCAP_KF_OAM_VER,
VCAP_KF_OAM_Y1731_IS,
VCAP_KF_PROT_ACTIVE,
VCAP_KF_TCP_IS,
VCAP_KF_TCP_UDP_IS,
VCAP_KF_TYPE,
- VCAP_KF_HOST_MATCH,
- VCAP_KF_L2_FRM_TYPE,
- VCAP_KF_L2_PAYLOAD0,
- VCAP_KF_L2_PAYLOAD1,
- VCAP_KF_L2_PAYLOAD2,
- VCAP_KF_L2_LLC,
- VCAP_KF_L3_FRAGMENT,
- VCAP_KF_L3_FRAG_OFS_GT0,
- VCAP_KF_L2_SNAP,
- VCAP_KF_L4_1588_DOM,
- VCAP_KF_L4_1588_VER,
- VCAP_KF_OAM_MEPID,
- VCAP_KF_OAM_OPCODE,
- VCAP_KF_OAM_VER,
- VCAP_KF_OAM_FLAGS,
- VCAP_KF_OAM_DETECTED,
};
/* Actionset names with origin information */
@@ -443,14 +438,17 @@ enum vcap_actionfield_set {
VCAP_AFS_BASE_TYPE, /* sparx5 is2 X3, sparx5 es2 X3, lan966x is2 X2 */
VCAP_AFS_CLASSIFICATION, /* sparx5 is0 X2 */
VCAP_AFS_CLASS_REDUCED, /* sparx5 is0 X1 */
+ VCAP_AFS_ES0, /* sparx5 es0 X1 */
VCAP_AFS_FULL, /* sparx5 is0 X3 */
- VCAP_AFS_MLBS, /* sparx5 is0 X2 */
- VCAP_AFS_MLBS_REDUCED, /* sparx5 is0 X1 */
- VCAP_AFS_SMAC_SIP, /* lan966x is2 x1 */
+ VCAP_AFS_SMAC_SIP, /* lan966x is2 X1 */
};
/* List of actionfields with description
*
+ * VCAP_AF_ACL_ID: W6, lan966x: is2
+ * Logical ID for the entry. This ID is extracted together with the frame in the
+ * CPU extraction header. Only applicable to actions with CPU_COPY_ENA or
+ * HIT_ME_ONCE set.
* VCAP_AF_CLS_VID_SEL: W3, sparx5: is0
* Controls the classified VID: 0: VID_NONE: No action. 1: VID_ADD: New VID =
* old VID + VID_VAL. 2: VID_REPLACE: New VID = VID_VAL. 3: VID_FIRST_TAG: New
@@ -468,8 +466,16 @@ enum vcap_actionfield_set {
* VCAP_AF_CPU_COPY_ENA: W1, sparx5: is2/es2, lan966x: is2
* Setting this bit to 1 causes all frames that hit this action to be copied to
* the CPU extraction queue specified in CPU_QUEUE_NUM.
+ * VCAP_AF_CPU_QU: W3, sparx5: es0
+ * CPU extraction queue. Used when FWD_SEL >0 and PIPELINE_ACT = XTR.
* VCAP_AF_CPU_QUEUE_NUM: W3, sparx5: is2/es2, lan966x: is2
* CPU queue number. Used when CPU_COPY_ENA is set.
+ * VCAP_AF_DEI_A_VAL: W1, sparx5: es0
+ * DEI used in ES0 tag A. See TAG_A_DEI_SEL.
+ * VCAP_AF_DEI_B_VAL: W1, sparx5: es0
+ * DEI used in ES0 tag B. See TAG_B_DEI_SEL.
+ * VCAP_AF_DEI_C_VAL: W1, sparx5: es0
+ * DEI used in ES0 tag C. See TAG_C_DEI_SEL.
* VCAP_AF_DEI_ENA: W1, sparx5: is0
* If set, use DEI_VAL as classified DEI value. Otherwise, DEI from basic
* classification is used
@@ -483,19 +489,38 @@ enum vcap_actionfield_set {
* VCAP_AF_DSCP_ENA: W1, sparx5: is0
* If set, use DSCP_VAL as classified DSCP value. Otherwise, DSCP value from
* basic classification is used.
- * VCAP_AF_DSCP_VAL: W6, sparx5: is0
+ * VCAP_AF_DSCP_SEL: W3, sparx5: es0
+ * Selects source for DSCP. 0: Controlled by port configuration and IFH. 1:
+ * Classified DSCP via IFH. 2: DSCP_VAL. 3: Reserved. 4: Mapped using mapping
+ * table 0, otherwise use DSCP_VAL. 5: Mapped using mapping table 1, otherwise
+ * use mapping table 0. 6: Mapped using mapping table 2, otherwise use DSCP_VAL.
+ * 7: Mapped using mapping table 3, otherwise use mapping table 2
+ * VCAP_AF_DSCP_VAL: W6, sparx5: is0/es0
* See DSCP_ENA.
* VCAP_AF_ES2_REW_CMD: W3, sparx5: es2
* Command forwarded to REW: 0: No action. 1: SWAP MAC addresses. 2: Do L2CP
* DMAC translation when entering or leaving a tunnel.
+ * VCAP_AF_ESDX: W13, sparx5: es0
+ * Egress counter index. Used to index egress counter set as defined in
+ * REW::STAT_CFG.
+ * VCAP_AF_FWD_KILL_ENA: W1, lan966x: is2
+ * Setting this bit to 1 denies forwarding of the frame forwarding to any front
+ * port. The frame can still be copied to the CPU by other actions.
* VCAP_AF_FWD_MODE: W2, sparx5: es2
* Forward selector: 0: Forward. 1: Discard. 2: Redirect. 3: Copy.
+ * VCAP_AF_FWD_SEL: W2, sparx5: es0
+ * ES0 Forward selector. 0: No action. 1: Copy to loopback interface. 2:
+ * Redirect to loopback interface. 3: Discard
* VCAP_AF_HIT_ME_ONCE: W1, sparx5: is2/es2, lan966x: is2
* Setting this bit to 1 causes the first frame that hits this action where the
* HIT_CNT counter is zero to be copied to the CPU extraction queue specified in
* CPU_QUEUE_NUM. The HIT_CNT counter is then incremented and any frames that
* hit this action later are not copied to the CPU. To re-enable the HIT_ME_ONCE
* functionality, the HIT_CNT counter must be cleared.
+ * VCAP_AF_HOST_MATCH: W1, lan966x: is2
+ * Used for IP source guarding. If set, it signals that the host is a valid (for
+ * instance a valid combination of source MAC address and source IP address).
+ * HOST_MATCH is input to the IS2 keys.
* VCAP_AF_IGNORE_PIPELINE_CTRL: W1, sparx5: is2/es2
* Ignore ingress pipeline control. This enforces the use of the VCAP IS2 action
* even when the pipeline control has terminated the frame before VCAP IS2.
@@ -504,8 +529,13 @@ enum vcap_actionfield_set {
* VCAP_AF_ISDX_ADD_REPLACE_SEL: W1, sparx5: is0
* Controls the classified ISDX. 0: New ISDX = old ISDX + ISDX_VAL. 1: New ISDX
* = ISDX_VAL.
+ * VCAP_AF_ISDX_ENA: W1, lan966x: is2
+ * Setting this bit to 1 causes the classified ISDX to be set to the value of
+ * POLICE_IDX[8:0].
* VCAP_AF_ISDX_VAL: W12, sparx5: is0
* See isdx_add_replace_sel
+ * VCAP_AF_LOOP_ENA: W1, sparx5: es0
+ * 0: Forward based on PIPELINE_PT and FWD_SEL
* VCAP_AF_LRN_DIS: W1, sparx5: is2, lan966x: is2
* Setting this bit to 1 disables learning of frames hitting this action.
* VCAP_AF_MAP_IDX: W9, sparx5: is0
@@ -521,136 +551,190 @@ enum vcap_actionfield_set {
* are applied to. 0: No changes to the QoS Mapping Table lookup. 1: Update key
* type and index for QoS Mapping Table lookup #0. 2: Update key type and index
* for QoS Mapping Table lookup #1. 3: Reserved.
- * VCAP_AF_MASK_MODE: W3, sparx5: is0/is2, lan966x is2 W2
+ * VCAP_AF_MASK_MODE: sparx5 is0 W3, sparx5 is2 W3, lan966x is2 W2
* Controls the PORT_MASK use. Sparx5: 0: OR_DSTMASK, 1: AND_VLANMASK, 2:
* REPLACE_PGID, 3: REPLACE_ALL, 4: REDIR_PGID, 5: OR_PGID_MASK, 6: VSTAX, 7:
* Not applicable. LAN966X: 0: No action, 1: Permit/deny (AND), 2: Policy
* forwarding (DMAC lookup), 3: Redirect. The CPU port is untouched by
* MASK_MODE.
- * VCAP_AF_MATCH_ID: W16, sparx5: is0/is2
+ * VCAP_AF_MATCH_ID: W16, sparx5: is2
* Logical ID for the entry. The MATCH_ID is extracted together with the frame
* if the frame is forwarded to the CPU (CPU_COPY_ENA). The result is placed in
* IFH.CL_RSLT.
- * VCAP_AF_MATCH_ID_MASK: W16, sparx5: is0/is2
+ * VCAP_AF_MATCH_ID_MASK: W16, sparx5: is2
* Mask used by MATCH_ID.
+ * VCAP_AF_MIRROR_ENA: W1, lan966x: is2
+ * Setting this bit to 1 causes frames to be mirrored to the mirror target port
+ * (ANA::MIRRPORPORTS).
* VCAP_AF_MIRROR_PROBE: W2, sparx5: is2
* Mirroring performed according to configuration of a mirror probe. 0: No
* mirroring. 1: Mirror probe 0. 2: Mirror probe 1. 3: Mirror probe 2
* VCAP_AF_MIRROR_PROBE_ID: W2, sparx5: es2
* Signals a mirror probe to be placed in the IFH. Only possible when FWD_MODE
- * is copy. 0: No mirroring. 1–3: Use mirror probe 0-2.
+ * is copy. 0: No mirroring. 1-3: Use mirror probe 0-2.
* VCAP_AF_NXT_IDX: W12, sparx5: is0
* Index used as part of key (field G_IDX) in the next lookup.
* VCAP_AF_NXT_IDX_CTRL: W3, sparx5: is0
* Controls the generation of the G_IDX used in the VCAP CLM next lookup
* VCAP_AF_PAG_OVERRIDE_MASK: W8, sparx5: is0
- * Bits set in this mask will override PAG_VAL from port profile.  New PAG =
- * (PAG (input) AND ~PAG_OVERRIDE_MASK) OR (PAG_VAL AND PAG_OVERRIDE_MASK)
+ * Bits set in this mask will override PAG_VAL from port profile. New PAG = (PAG
+ * (input) AND ~PAG_OVERRIDE_MASK) OR (PAG_VAL AND PAG_OVERRIDE_MASK)
* VCAP_AF_PAG_VAL: W8, sparx5: is0
* See PAG_OVERRIDE_MASK.
+ * VCAP_AF_PCP_A_VAL: W3, sparx5: es0
+ * PCP used in ES0 tag A. See TAG_A_PCP_SEL.
+ * VCAP_AF_PCP_B_VAL: W3, sparx5: es0
+ * PCP used in ES0 tag B. See TAG_B_PCP_SEL.
+ * VCAP_AF_PCP_C_VAL: W3, sparx5: es0
+ * PCP used in ES0 tag C. See TAG_C_PCP_SEL.
* VCAP_AF_PCP_ENA: W1, sparx5: is0
* If set, use PCP_VAL as classified PCP value. Otherwise, PCP from basic
* classification is used.
* VCAP_AF_PCP_VAL: W3, sparx5: is0
* See PCP_ENA.
- * VCAP_AF_PIPELINE_FORCE_ENA: sparx5 is0 W2, sparx5 is2 W1
+ * VCAP_AF_PIPELINE_ACT: W1, sparx5: es0
+ * Pipeline action when FWD_SEL > 0. 0: XTR. CPU_QU selects CPU extraction queue
+ * 1: LBK_ASM.
+ * VCAP_AF_PIPELINE_FORCE_ENA: W1, sparx5: is2
* If set, use PIPELINE_PT unconditionally and set PIPELINE_ACT = NONE if
* PIPELINE_PT == NONE. Overrules previous settings of pipeline point.
- * VCAP_AF_PIPELINE_PT: W5, sparx5: is0/is2
+ * VCAP_AF_PIPELINE_PT: sparx5 is2 W5, sparx5 es0 W2
* Pipeline point used if PIPELINE_FORCE_ENA is set
* VCAP_AF_POLICE_ENA: W1, sparx5: is2/es2, lan966x: is2
* Setting this bit to 1 causes frames that hit this action to be policed by the
* ACL policer specified in POLICE_IDX. Only applies to the first lookup.
- * VCAP_AF_POLICE_IDX: W6, sparx5: is2/es2, lan966x: is2 W9
+ * VCAP_AF_POLICE_IDX: sparx5 is2 W6, sparx5 es2 W6, lan966x is2 W9
* Selects VCAP policer used when policing frames (POLICE_ENA)
* VCAP_AF_POLICE_REMARK: W1, sparx5: es2
* If set, frames exceeding policer rates are marked as yellow but not
* discarded.
+ * VCAP_AF_POLICE_VCAP_ONLY: W1, lan966x: is2
+ * Disable policing from QoS, and port policers. Only the VCAP policer selected
+ * by POLICE_IDX is active. Only applies to the second lookup.
+ * VCAP_AF_POP_VAL: W2, sparx5: es0
+ * Controls popping of Q-tags. The final number of Q-tags popped is calculated
+ * as shown in section 4.28.7.2 VLAN Pop Decision.
* VCAP_AF_PORT_MASK: sparx5 is0 W65, sparx5 is2 W68, lan966x is2 W8
* Port mask applied to the forwarding decision based on MASK_MODE.
+ * VCAP_AF_PUSH_CUSTOMER_TAG: W2, sparx5: es0
+ * Selects tag C mode: 0: Do not push tag C. 1: Push tag C if
+ * IFH.VSTAX.TAG.WAS_TAGGED = 1. 2: Push tag C if IFH.VSTAX.TAG.WAS_TAGGED = 0.
+ * 3: Push tag C if UNTAG_VID_ENA = 0 or (C-TAG.VID ! = VID_C_VAL).
+ * VCAP_AF_PUSH_INNER_TAG: W1, sparx5: es0
+ * Controls inner tagging. 0: Do not push ES0 tag B as inner tag. 1: Push ES0
+ * tag B as inner tag.
+ * VCAP_AF_PUSH_OUTER_TAG: W2, sparx5: es0
+ * Controls outer tagging. 0: No ES0 tag A: Port tag is allowed if enabled on
+ * port. 1: ES0 tag A: Push ES0 tag A. No port tag. 2: Force port tag: Always
+ * push port tag. No ES0 tag A. 3: Force untag: Never push port tag or ES0 tag
+ * A.
* VCAP_AF_QOS_ENA: W1, sparx5: is0
* If set, use QOS_VAL as classified QoS class. Otherwise, QoS class from basic
* classification is used.
* VCAP_AF_QOS_VAL: W3, sparx5: is0
* See QOS_ENA.
+ * VCAP_AF_REW_OP: W16, lan966x: is2
+ * Rewriter operation command.
* VCAP_AF_RT_DIS: W1, sparx5: is2
* If set, routing is disallowed. Only applies when IS_INNER_ACL is 0. See also
* IGR_ACL_ENA, EGR_ACL_ENA, and RLEG_STAT_IDX.
+ * VCAP_AF_SWAP_MACS_ENA: W1, sparx5: es0
+ * This setting is only active when FWD_SEL = 1 or FWD_SEL = 2 and PIPELINE_ACT
+ * = LBK_ASM. 0: No action. 1: Swap MACs and clear bit 40 in new SMAC.
+ * VCAP_AF_TAG_A_DEI_SEL: W3, sparx5: es0
+ * Selects PCP for ES0 tag A. 0: Classified DEI. 1: DEI_A_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: DP.
+ * VCAP_AF_TAG_A_PCP_SEL: W3, sparx5: es0
+ * Selects PCP for ES0 tag A. 0: Classified PCP. 1: PCP_A_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: QoS class.
+ * VCAP_AF_TAG_A_TPID_SEL: W3, sparx5: es0
+ * Selects TPID for ES0 tag A: 0: 0x8100. 1: 0x88A8. 2: Custom
+ * (REW:PORT:PORT_VLAN_CFG.PORT_TPID). 3: If IFH.TAG_TYPE = 0 then 0x8100 else
+ * custom.
+ * VCAP_AF_TAG_A_VID_SEL: W2, sparx5: es0
+ * Selects VID for ES0 tag A. 0: Classified VID + VID_A_VAL. 1: VID_A_VAL.
+ * VCAP_AF_TAG_B_DEI_SEL: W3, sparx5: es0
+ * Selects PCP for ES0 tag B. 0: Classified DEI. 1: DEI_B_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: DP.
+ * VCAP_AF_TAG_B_PCP_SEL: W3, sparx5: es0
+ * Selects PCP for ES0 tag B. 0: Classified PCP. 1: PCP_B_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: QoS class.
+ * VCAP_AF_TAG_B_TPID_SEL: W3, sparx5: es0
+ * Selects TPID for ES0 tag B. 0: 0x8100. 1: 0x88A8. 2: Custom
+ * (REW:PORT:PORT_VLAN_CFG.PORT_TPID). 3: If IFH.TAG_TYPE = 0 then 0x8100 else
+ * custom.
+ * VCAP_AF_TAG_B_VID_SEL: W2, sparx5: es0
+ * Selects VID for ES0 tag B. 0: Classified VID + VID_B_VAL. 1: VID_B_VAL.
+ * VCAP_AF_TAG_C_DEI_SEL: W3, sparx5: es0
+ * Selects DEI source for ES0 tag C. 0: Classified DEI. 1: DEI_C_VAL. 2:
+ * REW::DP_MAP.DP [IFH.VSTAX.QOS.DP]. 3: DEI of popped VLAN tag if available
+ * (IFH.VSTAX.TAG.WAS_TAGGED = 1 and tot_pop_cnt>0) else DEI_C_VAL. 4: Mapped
+ * using mapping table 0, otherwise use DEI_C_VAL. 5: Mapped using mapping table
+ * 1, otherwise use mapping table 0. 6: Mapped using mapping table 2, otherwise
+ * use DEI_C_VAL. 7: Mapped using mapping table 3, otherwise use mapping table
+ * 2.
+ * VCAP_AF_TAG_C_PCP_SEL: W3, sparx5: es0
+ * Selects PCP source for ES0 tag C. 0: Classified PCP. 1: PCP_C_VAL. 2:
+ * Reserved. 3: PCP of popped VLAN tag if available (IFH.VSTAX.TAG.WAS_TAGGED=1
+ * and tot_pop_cnt>0) else PCP_C_VAL. 4: Mapped using mapping table 0, otherwise
+ * use PCP_C_VAL. 5: Mapped using mapping table 1, otherwise use mapping table
+ * 0. 6: Mapped using mapping table 2, otherwise use PCP_C_VAL. 7: Mapped using
+ * mapping table 3, otherwise use mapping table 2.
+ * VCAP_AF_TAG_C_TPID_SEL: W3, sparx5: es0
+ * Selects TPID for ES0 tag C. 0: 0x8100. 1: 0x88A8. 2: Custom 1. 3: Custom 2.
+ * 4: Custom 3. 5: See TAG_A_TPID_SEL.
+ * VCAP_AF_TAG_C_VID_SEL: W2, sparx5: es0
+ * Selects VID for ES0 tag C. The resulting VID is termed C-TAG.VID. 0:
+ * Classified VID. 1: VID_C_VAL. 2: IFH.ENCAP.GVID. 3: Reserved.
* VCAP_AF_TYPE: W1, sparx5: is0
* Actionset type id - Set by the API
+ * VCAP_AF_UNTAG_VID_ENA: W1, sparx5: es0
+ * Controls insertion of tag C. Untag or insert mode can be selected. See
+ * PUSH_CUSTOMER_TAG.
+ * VCAP_AF_VID_A_VAL: W12, sparx5: es0
+ * VID used in ES0 tag A. See TAG_A_VID_SEL.
+ * VCAP_AF_VID_B_VAL: W12, sparx5: es0
+ * VID used in ES0 tag B. See TAG_B_VID_SEL.
+ * VCAP_AF_VID_C_VAL: W12, sparx5: es0
+ * VID used in ES0 tag C. See TAG_C_VID_SEL.
* VCAP_AF_VID_VAL: W13, sparx5: is0
* New VID Value
- * VCAP_AF_MIRROR_ENA: W1, lan966x: is2
- * Setting this bit to 1 causes frames to be mirrored to the mirror target
- * port (ANA::MIRRPORPORTS).
- * VCAP_AF_POLICE_VCAP_ONLY: W1, lan966x: is2
- * Disable policing from QoS, and port policers. Only the VCAP policer
- * selected by POLICE_IDX is active. Only applies to the second lookup.
- * VCAP_AF_REW_OP: W16, lan966x: is2
- * Rewriter operation command.
- * VCAP_AF_ISDX_ENA: W1, lan966x: is2
- * Setting this bit to 1 causes the classified ISDX to be set to the value of
- * POLICE_IDX[8:0].
- * VCAP_AF_ACL_ID: W6, lan966x: is2
- * Logical ID for the entry. This ID is extracted together with the frame in
- * the CPU extraction header. Only applicable to actions with CPU_COPY_ENA or
- * HIT_ME_ONCE set.
- * VCAP_AF_FWD_KILL_ENA: W1, lan966x: is2
- * Setting this bit to 1 denies forwarding of the frame forwarding to any
- * front port. The frame can still be copied to the CPU by other actions.
- * VCAP_AF_HOST_MATCH: W1, lan966x: is2
- * Used for IP source guarding. If set, it signals that the host is a valid
- * (for instance a valid combination of source MAC address and source IP
- * address). HOST_MATCH is input to the IS2 keys.
*/
/* Actionfield names */
enum vcap_action_field {
VCAP_AF_NO_VALUE, /* initial value */
- VCAP_AF_ACL_MAC,
- VCAP_AF_ACL_RT_MODE,
+ VCAP_AF_ACL_ID,
VCAP_AF_CLS_VID_SEL,
VCAP_AF_CNT_ID,
VCAP_AF_COPY_PORT_NUM,
VCAP_AF_COPY_QUEUE_NUM,
- VCAP_AF_COSID_ENA,
- VCAP_AF_COSID_VAL,
VCAP_AF_CPU_COPY_ENA,
- VCAP_AF_CPU_DIS,
- VCAP_AF_CPU_ENA,
- VCAP_AF_CPU_Q,
+ VCAP_AF_CPU_QU,
VCAP_AF_CPU_QUEUE_NUM,
- VCAP_AF_CUSTOM_ACE_ENA,
- VCAP_AF_CUSTOM_ACE_OFFSET,
+ VCAP_AF_DEI_A_VAL,
+ VCAP_AF_DEI_B_VAL,
+ VCAP_AF_DEI_C_VAL,
VCAP_AF_DEI_ENA,
VCAP_AF_DEI_VAL,
- VCAP_AF_DLB_OFFSET,
- VCAP_AF_DMAC_OFFSET_ENA,
VCAP_AF_DP_ENA,
VCAP_AF_DP_VAL,
VCAP_AF_DSCP_ENA,
+ VCAP_AF_DSCP_SEL,
VCAP_AF_DSCP_VAL,
- VCAP_AF_EGR_ACL_ENA,
VCAP_AF_ES2_REW_CMD,
- VCAP_AF_FWD_DIS,
+ VCAP_AF_ESDX,
+ VCAP_AF_FWD_KILL_ENA,
VCAP_AF_FWD_MODE,
- VCAP_AF_FWD_TYPE,
- VCAP_AF_GVID_ADD_REPLACE_SEL,
+ VCAP_AF_FWD_SEL,
VCAP_AF_HIT_ME_ONCE,
+ VCAP_AF_HOST_MATCH,
VCAP_AF_IGNORE_PIPELINE_CTRL,
- VCAP_AF_IGR_ACL_ENA,
- VCAP_AF_INJ_MASQ_ENA,
- VCAP_AF_INJ_MASQ_LPORT,
- VCAP_AF_INJ_MASQ_PORT,
VCAP_AF_INTR_ENA,
VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_AF_ISDX_ENA,
VCAP_AF_ISDX_VAL,
- VCAP_AF_IS_INNER_ACL,
- VCAP_AF_L3_MAC_UPDATE_DIS,
- VCAP_AF_LOG_MSG_INTERVAL,
- VCAP_AF_LPM_AFFIX_ENA,
- VCAP_AF_LPM_AFFIX_VAL,
- VCAP_AF_LPORT_ENA,
+ VCAP_AF_LOOP_ENA,
VCAP_AF_LRN_DIS,
VCAP_AF_MAP_IDX,
VCAP_AF_MAP_KEY,
@@ -658,78 +742,53 @@ enum vcap_action_field {
VCAP_AF_MASK_MODE,
VCAP_AF_MATCH_ID,
VCAP_AF_MATCH_ID_MASK,
- VCAP_AF_MIP_SEL,
+ VCAP_AF_MIRROR_ENA,
VCAP_AF_MIRROR_PROBE,
VCAP_AF_MIRROR_PROBE_ID,
- VCAP_AF_MPLS_IP_CTRL_ENA,
- VCAP_AF_MPLS_MEP_ENA,
- VCAP_AF_MPLS_MIP_ENA,
- VCAP_AF_MPLS_OAM_FLAVOR,
- VCAP_AF_MPLS_OAM_TYPE,
- VCAP_AF_NUM_VLD_LABELS,
VCAP_AF_NXT_IDX,
VCAP_AF_NXT_IDX_CTRL,
- VCAP_AF_NXT_KEY_TYPE,
- VCAP_AF_NXT_NORMALIZE,
- VCAP_AF_NXT_NORM_W16_OFFSET,
- VCAP_AF_NXT_NORM_W32_OFFSET,
- VCAP_AF_NXT_OFFSET_FROM_TYPE,
- VCAP_AF_NXT_TYPE_AFTER_OFFSET,
- VCAP_AF_OAM_IP_BFD_ENA,
- VCAP_AF_OAM_TWAMP_ENA,
- VCAP_AF_OAM_Y1731_SEL,
VCAP_AF_PAG_OVERRIDE_MASK,
VCAP_AF_PAG_VAL,
+ VCAP_AF_PCP_A_VAL,
+ VCAP_AF_PCP_B_VAL,
+ VCAP_AF_PCP_C_VAL,
VCAP_AF_PCP_ENA,
VCAP_AF_PCP_VAL,
- VCAP_AF_PIPELINE_ACT_SEL,
+ VCAP_AF_PIPELINE_ACT,
VCAP_AF_PIPELINE_FORCE_ENA,
VCAP_AF_PIPELINE_PT,
- VCAP_AF_PIPELINE_PT_REDUCED,
VCAP_AF_POLICE_ENA,
VCAP_AF_POLICE_IDX,
VCAP_AF_POLICE_REMARK,
+ VCAP_AF_POLICE_VCAP_ONLY,
+ VCAP_AF_POP_VAL,
VCAP_AF_PORT_MASK,
- VCAP_AF_PTP_MASTER_SEL,
+ VCAP_AF_PUSH_CUSTOMER_TAG,
+ VCAP_AF_PUSH_INNER_TAG,
+ VCAP_AF_PUSH_OUTER_TAG,
VCAP_AF_QOS_ENA,
VCAP_AF_QOS_VAL,
- VCAP_AF_REW_CMD,
- VCAP_AF_RLEG_DMAC_CHK_DIS,
- VCAP_AF_RLEG_STAT_IDX,
- VCAP_AF_RSDX_ENA,
- VCAP_AF_RSDX_VAL,
- VCAP_AF_RSVD_LBL_VAL,
+ VCAP_AF_REW_OP,
VCAP_AF_RT_DIS,
- VCAP_AF_RT_SEL,
- VCAP_AF_S2_KEY_SEL_ENA,
- VCAP_AF_S2_KEY_SEL_IDX,
- VCAP_AF_SAM_SEQ_ENA,
- VCAP_AF_SIP_IDX,
- VCAP_AF_SWAP_MAC_ENA,
- VCAP_AF_TCP_UDP_DPORT,
- VCAP_AF_TCP_UDP_ENA,
- VCAP_AF_TCP_UDP_SPORT,
- VCAP_AF_TC_ENA,
- VCAP_AF_TC_LABEL,
- VCAP_AF_TPID_SEL,
- VCAP_AF_TTL_DECR_DIS,
- VCAP_AF_TTL_ENA,
- VCAP_AF_TTL_LABEL,
- VCAP_AF_TTL_UPDATE_ENA,
+ VCAP_AF_SWAP_MACS_ENA,
+ VCAP_AF_TAG_A_DEI_SEL,
+ VCAP_AF_TAG_A_PCP_SEL,
+ VCAP_AF_TAG_A_TPID_SEL,
+ VCAP_AF_TAG_A_VID_SEL,
+ VCAP_AF_TAG_B_DEI_SEL,
+ VCAP_AF_TAG_B_PCP_SEL,
+ VCAP_AF_TAG_B_TPID_SEL,
+ VCAP_AF_TAG_B_VID_SEL,
+ VCAP_AF_TAG_C_DEI_SEL,
+ VCAP_AF_TAG_C_PCP_SEL,
+ VCAP_AF_TAG_C_TPID_SEL,
+ VCAP_AF_TAG_C_VID_SEL,
VCAP_AF_TYPE,
+ VCAP_AF_UNTAG_VID_ENA,
+ VCAP_AF_VID_A_VAL,
+ VCAP_AF_VID_B_VAL,
+ VCAP_AF_VID_C_VAL,
VCAP_AF_VID_VAL,
- VCAP_AF_VLAN_POP_CNT,
- VCAP_AF_VLAN_POP_CNT_ENA,
- VCAP_AF_VLAN_PUSH_CNT,
- VCAP_AF_VLAN_PUSH_CNT_ENA,
- VCAP_AF_VLAN_WAS_TAGGED,
- VCAP_AF_MIRROR_ENA,
- VCAP_AF_POLICE_VCAP_ONLY,
- VCAP_AF_REW_OP,
- VCAP_AF_ISDX_ENA,
- VCAP_AF_ACL_ID,
- VCAP_AF_FWD_KILL_ENA,
- VCAP_AF_HOST_MATCH,
};
#endif /* __VCAP_AG_API__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index 664aae3e2acd..4847d0d99ec9 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -37,11 +37,13 @@ struct vcap_rule_move {
int count; /* blocksize of addresses to move */
};
-/* Stores the filter cookie that enabled the port */
+/* Stores the filter cookie and chain id that enabled the port */
struct vcap_enabled_port {
struct list_head list; /* for insertion in enabled ports list */
struct net_device *ndev; /* the enabled port */
unsigned long cookie; /* filter that enabled the port */
+ int src_cid; /* source chain id */
+ int dst_cid; /* destination chain id */
};
void vcap_iter_set(struct vcap_stream_iter *itr, int sw_width,
@@ -508,10 +510,133 @@ static void vcap_encode_keyfield_typegroups(struct vcap_control *vctrl,
vcap_encode_typegroups(cache->maskstream, sw_width, tgt, true);
}
+/* Copy data from src to dst but reverse the data in chunks of 32bits.
+ * For example if src is 00:11:22:33:44:55 where 55 is LSB the dst will
+ * have the value 22:33:44:55:00:11.
+ */
+static void vcap_copy_to_w32be(u8 *dst, const u8 *src, int size)
+{
+ for (int idx = 0; idx < size; ++idx) {
+ int first_byte_index = 0;
+ int nidx;
+
+ first_byte_index = size - (((idx >> 2) + 1) << 2);
+ if (first_byte_index < 0)
+ first_byte_index = 0;
+ nidx = idx + first_byte_index - (idx & ~0x3);
+ dst[nidx] = src[idx];
+ }
+}
+
+static void
+vcap_copy_from_client_keyfield(struct vcap_rule *rule,
+ struct vcap_client_keyfield *dst,
+ const struct vcap_client_keyfield *src)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_keyfield_data *sdata;
+ struct vcap_client_keyfield_data *ddata;
+ int size;
+
+ dst->ctrl.type = src->ctrl.type;
+ dst->ctrl.key = src->ctrl.key;
+ INIT_LIST_HEAD(&dst->ctrl.list);
+ sdata = &src->data;
+ ddata = &dst->data;
+
+ if (!ri->admin->w32be) {
+ memcpy(ddata, sdata, sizeof(dst->data));
+ return;
+ }
+
+ size = keyfield_size_table[dst->ctrl.type] / 2;
+
+ switch (dst->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ case VCAP_FIELD_U32:
+ memcpy(ddata, sdata, sizeof(dst->data));
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_to_w32be(ddata->u48.value, src->data.u48.value, size);
+ vcap_copy_to_w32be(ddata->u48.mask, src->data.u48.mask, size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_to_w32be(ddata->u56.value, sdata->u56.value, size);
+ vcap_copy_to_w32be(ddata->u56.mask, sdata->u56.mask, size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_to_w32be(ddata->u64.value, sdata->u64.value, size);
+ vcap_copy_to_w32be(ddata->u64.mask, sdata->u64.mask, size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_to_w32be(ddata->u72.value, sdata->u72.value, size);
+ vcap_copy_to_w32be(ddata->u72.mask, sdata->u72.mask, size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_to_w32be(ddata->u112.value, sdata->u112.value, size);
+ vcap_copy_to_w32be(ddata->u112.mask, sdata->u112.mask, size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_to_w32be(ddata->u128.value, sdata->u128.value, size);
+ vcap_copy_to_w32be(ddata->u128.mask, sdata->u128.mask, size);
+ break;
+ }
+}
+
+static void
+vcap_copy_from_client_actionfield(struct vcap_rule *rule,
+ struct vcap_client_actionfield *dst,
+ const struct vcap_client_actionfield *src)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_actionfield_data *sdata;
+ struct vcap_client_actionfield_data *ddata;
+ int size;
+
+ dst->ctrl.type = src->ctrl.type;
+ dst->ctrl.action = src->ctrl.action;
+ INIT_LIST_HEAD(&dst->ctrl.list);
+ sdata = &src->data;
+ ddata = &dst->data;
+
+ if (!ri->admin->w32be) {
+ memcpy(ddata, sdata, sizeof(dst->data));
+ return;
+ }
+
+ size = actionfield_size_table[dst->ctrl.type];
+
+ switch (dst->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ case VCAP_FIELD_U32:
+ memcpy(ddata, sdata, sizeof(dst->data));
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_to_w32be(ddata->u48.value, sdata->u48.value, size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_to_w32be(ddata->u56.value, sdata->u56.value, size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_to_w32be(ddata->u64.value, sdata->u64.value, size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_to_w32be(ddata->u72.value, sdata->u72.value, size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_to_w32be(ddata->u112.value, sdata->u112.value, size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_to_w32be(ddata->u128.value, sdata->u128.value, size);
+ break;
+ }
+}
+
static int vcap_encode_rule_keyset(struct vcap_rule_internal *ri)
{
const struct vcap_client_keyfield *ckf;
const struct vcap_typegroup *tg_table;
+ struct vcap_client_keyfield tempkf;
const struct vcap_field *kf_table;
int keyset_size;
@@ -552,7 +677,9 @@ static int vcap_encode_rule_keyset(struct vcap_rule_internal *ri)
__func__, __LINE__, ckf->ctrl.key);
return -EINVAL;
}
- vcap_encode_keyfield(ri, ckf, &kf_table[ckf->ctrl.key], tg_table);
+ vcap_copy_from_client_keyfield(&ri->data, &tempkf, ckf);
+ vcap_encode_keyfield(ri, &tempkf, &kf_table[ckf->ctrl.key],
+ tg_table);
}
/* Add typegroup bits to the key/mask bitstreams */
vcap_encode_keyfield_typegroups(ri->vctrl, ri, tg_table);
@@ -667,6 +794,7 @@ static int vcap_encode_rule_actionset(struct vcap_rule_internal *ri)
{
const struct vcap_client_actionfield *caf;
const struct vcap_typegroup *tg_table;
+ struct vcap_client_actionfield tempaf;
const struct vcap_field *af_table;
int actionset_size;
@@ -707,8 +835,9 @@ static int vcap_encode_rule_actionset(struct vcap_rule_internal *ri)
__func__, __LINE__, caf->ctrl.action);
return -EINVAL;
}
- vcap_encode_actionfield(ri, caf, &af_table[caf->ctrl.action],
- tg_table);
+ vcap_copy_from_client_actionfield(&ri->data, &tempaf, caf);
+ vcap_encode_actionfield(ri, &tempaf,
+ &af_table[caf->ctrl.action], tg_table);
}
/* Add typegroup bits to the entry bitstreams */
vcap_encode_actionfield_typegroups(ri, tg_table);
@@ -738,7 +867,7 @@ int vcap_api_check(struct vcap_control *ctrl)
!ctrl->ops->add_default_fields || !ctrl->ops->cache_erase ||
!ctrl->ops->cache_write || !ctrl->ops->cache_read ||
!ctrl->ops->init || !ctrl->ops->update || !ctrl->ops->move ||
- !ctrl->ops->port_info || !ctrl->ops->enable) {
+ !ctrl->ops->port_info) {
pr_err("%s:%d: client operations are missing\n",
__func__, __LINE__);
return -ENOENT;
@@ -791,9 +920,8 @@ int vcap_set_rule_set_actionset(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_set_rule_set_actionset);
-/* Find a rule with a provided rule id */
-static struct vcap_rule_internal *vcap_lookup_rule(struct vcap_control *vctrl,
- u32 id)
+/* Check if a rule with this id exists */
+static bool vcap_rule_exists(struct vcap_control *vctrl, u32 id)
{
struct vcap_rule_internal *ri;
struct vcap_admin *admin;
@@ -802,7 +930,25 @@ static struct vcap_rule_internal *vcap_lookup_rule(struct vcap_control *vctrl,
list_for_each_entry(admin, &vctrl->list, list)
list_for_each_entry(ri, &admin->rules, list)
if (ri->data.id == id)
+ return true;
+ return false;
+}
+
+/* Find a rule with a provided rule id return a locked vcap */
+static struct vcap_rule_internal *
+vcap_get_locked_rule(struct vcap_control *vctrl, u32 id)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+
+ /* Look for the rule id in all vcaps */
+ list_for_each_entry(admin, &vctrl->list, list) {
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list)
+ if (ri->data.id == id)
return ri;
+ mutex_unlock(&admin->lock);
+ }
return NULL;
}
@@ -811,19 +957,31 @@ int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie)
{
struct vcap_rule_internal *ri;
struct vcap_admin *admin;
+ int id = 0;
/* Look for the rule id in all vcaps */
- list_for_each_entry(admin, &vctrl->list, list)
- list_for_each_entry(ri, &admin->rules, list)
- if (ri->data.cookie == cookie)
- return ri->data.id;
+ list_for_each_entry(admin, &vctrl->list, list) {
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ if (ri->data.cookie == cookie) {
+ id = ri->data.id;
+ break;
+ }
+ }
+ mutex_unlock(&admin->lock);
+ if (id)
+ return id;
+ }
return -ENOENT;
}
EXPORT_SYMBOL_GPL(vcap_lookup_rule_by_cookie);
-/* Make a shallow copy of the rule without the fields */
-struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri)
+/* Make a copy of the rule, shallow or full */
+static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri,
+ bool full)
{
+ struct vcap_client_actionfield *caf, *newcaf;
+ struct vcap_client_keyfield *ckf, *newckf;
struct vcap_rule_internal *duprule;
/* Allocate the client part */
@@ -836,6 +994,25 @@ struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri)
/* No elements in these lists */
INIT_LIST_HEAD(&duprule->data.keyfields);
INIT_LIST_HEAD(&duprule->data.actionfields);
+
+ /* A full rule copy includes keys and actions */
+ if (!full)
+ return duprule;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
+ if (!newckf)
+ return ERR_PTR(-ENOMEM);
+ list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
+ }
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
+ if (!newcaf)
+ return ERR_PTR(-ENOMEM);
+ list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
+ }
+
return duprule;
}
@@ -1424,39 +1601,65 @@ struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid)
}
EXPORT_SYMBOL_GPL(vcap_find_admin);
-/* Is the next chain id in the following lookup, possible in another VCAP */
-bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid)
+/* Is this the last admin instance ordered by chain id and direction */
+static bool vcap_admin_is_last(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ bool ingress)
{
- struct vcap_admin *admin, *next_admin;
- int lookup, next_lookup;
+ struct vcap_admin *iter, *last = NULL;
+ int max_cid = 0;
- /* The offset must be at least one lookup */
- if (next_cid < cur_cid + VCAP_CID_LOOKUP_SIZE)
+ list_for_each_entry(iter, &vctrl->list, list) {
+ if (iter->first_cid > max_cid &&
+ iter->ingress == ingress) {
+ last = iter;
+ max_cid = iter->first_cid;
+ }
+ }
+ if (!last)
return false;
- if (vcap_api_check(vctrl))
- return false;
+ return admin == last;
+}
- admin = vcap_find_admin(vctrl, cur_cid);
- if (!admin)
+/* Calculate the value used for chaining VCAP rules */
+int vcap_chain_offset(struct vcap_control *vctrl, int from_cid, int to_cid)
+{
+ int diff = to_cid - from_cid;
+
+ if (diff < 0) /* Wrong direction */
+ return diff;
+ to_cid %= VCAP_CID_LOOKUP_SIZE;
+ if (to_cid == 0) /* Destination aligned to a lookup == no chaining */
+ return 0;
+ diff %= VCAP_CID_LOOKUP_SIZE; /* Limit to a value within a lookup */
+ return diff;
+}
+EXPORT_SYMBOL_GPL(vcap_chain_offset);
+
+/* Is the next chain id in one of the following lookups
+ * For now this does not support filters linked to other filters using
+ * keys and actions. That will be added later.
+ */
+bool vcap_is_next_lookup(struct vcap_control *vctrl, int src_cid, int dst_cid)
+{
+ struct vcap_admin *admin;
+ int next_cid;
+
+ if (vcap_api_check(vctrl))
return false;
- /* If no VCAP contains the next chain, the next chain must be beyond
- * the last chain in the current VCAP
- */
- next_admin = vcap_find_admin(vctrl, next_cid);
- if (!next_admin)
- return next_cid > admin->last_cid;
+ /* The offset must be at least one lookup so round up one chain */
+ next_cid = roundup(src_cid + 1, VCAP_CID_LOOKUP_SIZE);
- lookup = vcap_chain_id_to_lookup(admin, cur_cid);
- next_lookup = vcap_chain_id_to_lookup(next_admin, next_cid);
+ if (dst_cid < next_cid)
+ return false;
- /* Next lookup must be the following lookup */
- if (admin == next_admin || admin->vtype == next_admin->vtype)
- return next_lookup == lookup + 1;
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return false;
- /* Must be the first lookup in the next VCAP instance */
- return next_lookup == 0;
+ return true;
}
EXPORT_SYMBOL_GPL(vcap_is_next_lookup);
@@ -1504,6 +1707,39 @@ static int vcap_add_type_keyfield(struct vcap_rule *rule)
return 0;
}
+/* Add the actionset typefield to the list of rule actionfields */
+static int vcap_add_type_actionfield(struct vcap_rule *rule)
+{
+ enum vcap_actionfield_set actionset = rule->actionset;
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+ const struct vcap_set *aset;
+ int ret = -EINVAL;
+
+ aset = vcap_actionfieldset(ri->vctrl, vt, actionset);
+ if (!aset)
+ return ret;
+ if (aset->type_id == (u8)-1) /* No type field is needed */
+ return 0;
+
+ fields = vcap_actionfields(ri->vctrl, vt, actionset);
+ if (!fields)
+ return -EINVAL;
+ if (fields[VCAP_AF_TYPE].width > 1) {
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE,
+ aset->type_id);
+ } else {
+ if (aset->type_id)
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_TYPE,
+ VCAP_BIT_1);
+ else
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_TYPE,
+ VCAP_BIT_0);
+ }
+ return ret;
+}
+
/* Add a keyset to a keyset list */
bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
enum vcap_keyfield_set keyset)
@@ -1521,6 +1757,22 @@ bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
}
EXPORT_SYMBOL_GPL(vcap_keyset_list_add);
+/* Add a actionset to a actionset list */
+static bool vcap_actionset_list_add(struct vcap_actionset_list *actionsetlist,
+ enum vcap_actionfield_set actionset)
+{
+ int idx;
+
+ if (actionsetlist->cnt < actionsetlist->max) {
+ /* Avoid duplicates */
+ for (idx = 0; idx < actionsetlist->cnt; ++idx)
+ if (actionsetlist->actionsets[idx] == actionset)
+ return actionsetlist->cnt < actionsetlist->max;
+ actionsetlist->actionsets[actionsetlist->cnt++] = actionset;
+ }
+ return actionsetlist->cnt < actionsetlist->max;
+}
+
/* map keyset id to a string with the keyset name */
const char *vcap_keyset_name(struct vcap_control *vctrl,
enum vcap_keyfield_set keyset)
@@ -1629,6 +1881,75 @@ bool vcap_rule_find_keysets(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_rule_find_keysets);
+/* Return the actionfield that matches a action in a actionset */
+static const struct vcap_field *
+vcap_find_actionset_actionfield(struct vcap_control *vctrl,
+ enum vcap_type vtype,
+ enum vcap_actionfield_set actionset,
+ enum vcap_action_field action)
+{
+ const struct vcap_field *fields;
+ int idx, count;
+
+ fields = vcap_actionfields(vctrl, vtype, actionset);
+ if (!fields)
+ return NULL;
+
+ /* Iterate the actionfields of the actionset */
+ count = vcap_actionfield_count(vctrl, vtype, actionset);
+ for (idx = 0; idx < count; ++idx) {
+ if (fields[idx].width == 0)
+ continue;
+
+ if (action == idx)
+ return &fields[idx];
+ }
+
+ return NULL;
+}
+
+/* Match a list of actions against the actionsets available in a vcap type */
+static bool vcap_rule_find_actionsets(struct vcap_rule_internal *ri,
+ struct vcap_actionset_list *matches)
+{
+ int actionset, found, actioncount, map_size;
+ const struct vcap_client_actionfield *ckf;
+ const struct vcap_field **map;
+ enum vcap_type vtype;
+
+ vtype = ri->admin->vtype;
+ map = ri->vctrl->vcaps[vtype].actionfield_set_map;
+ map_size = ri->vctrl->vcaps[vtype].actionfield_set_size;
+
+ /* Get a count of the actionfields we want to match */
+ actioncount = 0;
+ list_for_each_entry(ckf, &ri->data.actionfields, ctrl.list)
+ ++actioncount;
+
+ matches->cnt = 0;
+ /* Iterate the actionsets of the VCAP */
+ for (actionset = 0; actionset < map_size; ++actionset) {
+ if (!map[actionset])
+ continue;
+
+ /* Iterate the actions in the rule */
+ found = 0;
+ list_for_each_entry(ckf, &ri->data.actionfields, ctrl.list)
+ if (vcap_find_actionset_actionfield(ri->vctrl, vtype,
+ actionset,
+ ckf->ctrl.action))
+ ++found;
+
+ /* Save the actionset if all actionfields were found */
+ if (found == actioncount)
+ if (!vcap_actionset_list_add(matches, actionset))
+ /* bail out when the quota is filled */
+ break;
+ }
+
+ return matches->cnt > 0;
+}
+
/* Validate a rule with respect to available port keys */
int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
{
@@ -1680,13 +2001,26 @@ int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
return ret;
}
if (ri->data.actionset == VCAP_AFS_NO_VALUE) {
- /* Later also actionsets will be matched against actions in
- * the rule, and the type will be set accordingly
- */
- ri->data.exterr = VCAP_ERR_NO_ACTIONSET_MATCH;
- return -EINVAL;
+ struct vcap_actionset_list matches = {};
+ enum vcap_actionfield_set actionsets[10];
+
+ matches.actionsets = actionsets;
+ matches.max = ARRAY_SIZE(actionsets);
+
+ /* Find an actionset that fits the rule actions */
+ if (!vcap_rule_find_actionsets(ri, &matches)) {
+ ri->data.exterr = VCAP_ERR_NO_ACTIONSET_MATCH;
+ return -EINVAL;
+ }
+ ret = vcap_set_rule_set_actionset(rule, actionsets[0]);
+ if (ret < 0) {
+ pr_err("%s:%d: actionset was not updated: %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
}
vcap_add_type_keyfield(rule);
+ vcap_add_type_actionfield(rule);
/* Add default fields to this rule */
ri->vctrl->ops->add_default_fields(ri->ndev, ri->admin, rule);
@@ -1721,7 +2055,7 @@ static u32 vcap_set_rule_id(struct vcap_rule_internal *ri)
return ri->data.id;
for (u32 next_id = 1; next_id < ~0; ++next_id) {
- if (!vcap_lookup_rule(ri->vctrl, next_id)) {
+ if (!vcap_rule_exists(ri->vctrl, next_id)) {
ri->data.id = next_id;
break;
}
@@ -1756,8 +2090,8 @@ static int vcap_insert_rule(struct vcap_rule_internal *ri,
ri->addr = vcap_next_rule_addr(admin->last_used_addr, ri);
admin->last_used_addr = ri->addr;
- /* Add a shallow copy of the rule to the VCAP list */
- duprule = vcap_dup_rule(ri);
+ /* Add a copy of the rule to the VCAP list */
+ duprule = vcap_dup_rule(ri, ri->state == VCAP_RS_DISABLED);
if (IS_ERR(duprule))
return PTR_ERR(duprule);
@@ -1770,8 +2104,8 @@ static int vcap_insert_rule(struct vcap_rule_internal *ri,
ri->addr = vcap_next_rule_addr(addr, ri);
addr = ri->addr;
- /* Add a shallow copy of the rule to the VCAP list */
- duprule = vcap_dup_rule(ri);
+ /* Add a copy of the rule to the VCAP list */
+ duprule = vcap_dup_rule(ri, ri->state == VCAP_RS_DISABLED);
if (IS_ERR(duprule))
return PTR_ERR(duprule);
@@ -1803,11 +2137,96 @@ static void vcap_move_rules(struct vcap_rule_internal *ri,
move->offset, move->count);
}
+/* Check if the chain is already used to enable a VCAP lookup for this port */
+static bool vcap_is_chain_used(struct vcap_control *vctrl,
+ struct net_device *ndev, int src_cid)
+{
+ struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(eport, &admin->enabled, list)
+ if (eport->src_cid == src_cid && eport->ndev == ndev)
+ return true;
+
+ return false;
+}
+
+/* Fetch the next chain in the enabled list for the port */
+static int vcap_get_next_chain(struct vcap_control *vctrl,
+ struct net_device *ndev,
+ int dst_cid)
+{
+ struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(eport, &admin->enabled, list) {
+ if (eport->ndev != ndev)
+ continue;
+ if (eport->src_cid == dst_cid)
+ return eport->dst_cid;
+ }
+ }
+
+ return 0;
+}
+
+static bool vcap_path_exist(struct vcap_control *vctrl, struct net_device *ndev,
+ int dst_cid)
+{
+ int cid = rounddown(dst_cid, VCAP_CID_LOOKUP_SIZE);
+ struct vcap_enabled_port *eport = NULL;
+ struct vcap_enabled_port *elem;
+ struct vcap_admin *admin;
+ int tmp;
+
+ if (cid == 0) /* Chain zero is always available */
+ return true;
+
+ /* Find first entry that starts from chain 0*/
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(elem, &admin->enabled, list) {
+ if (elem->src_cid == 0 && elem->ndev == ndev) {
+ eport = elem;
+ break;
+ }
+ }
+ if (eport)
+ break;
+ }
+
+ if (!eport)
+ return false;
+
+ tmp = eport->dst_cid;
+ while (tmp != cid && tmp != 0)
+ tmp = vcap_get_next_chain(vctrl, ndev, tmp);
+
+ return !!tmp;
+}
+
+/* Internal clients can always store their rules in HW
+ * External clients can store their rules if the chain is enabled all
+ * the way from chain 0, otherwise the rule will be cached until
+ * the chain is enabled.
+ */
+static void vcap_rule_set_state(struct vcap_rule_internal *ri)
+{
+ if (ri->data.user <= VCAP_USER_QOS)
+ ri->state = VCAP_RS_PERMANENT;
+ else if (vcap_path_exist(ri->vctrl, ri->ndev, ri->data.vcap_chain_id))
+ ri->state = VCAP_RS_ENABLED;
+ else
+ ri->state = VCAP_RS_DISABLED;
+}
+
/* Encode and write a validated rule to the VCAP */
int vcap_add_rule(struct vcap_rule *rule)
{
struct vcap_rule_internal *ri = to_intrule(rule);
struct vcap_rule_move move = {0};
+ struct vcap_counter ctr = {0};
int ret;
ret = vcap_api_check(ri->vctrl);
@@ -1815,6 +2234,8 @@ int vcap_add_rule(struct vcap_rule *rule)
return ret;
/* Insert the new rule in the list of vcap rules */
mutex_lock(&ri->admin->lock);
+
+ vcap_rule_set_state(ri);
ret = vcap_insert_rule(ri, &move);
if (ret < 0) {
pr_err("%s:%d: could not insert rule in vcap list: %d\n",
@@ -1823,6 +2244,19 @@ int vcap_add_rule(struct vcap_rule *rule)
}
if (move.count > 0)
vcap_move_rules(ri, &move);
+
+ /* Set the counter to zero */
+ ret = vcap_write_counter(ri, &ctr);
+ if (ret)
+ goto out;
+
+ if (ri->state == VCAP_RS_DISABLED) {
+ /* Erase the rule area */
+ ri->vctrl->ops->init(ri->ndev, ri->admin, ri->addr, ri->size);
+ goto out;
+ }
+
+ vcap_erase_cache(ri);
ret = vcap_encode_rule(ri);
if (ret) {
pr_err("%s:%d: rule encoding error: %d\n", __func__, __LINE__, ret);
@@ -1830,8 +2264,10 @@ int vcap_add_rule(struct vcap_rule *rule)
}
ret = vcap_write_rule(ri);
- if (ret)
+ if (ret) {
pr_err("%s:%d: rule write error: %d\n", __func__, __LINE__, ret);
+ goto out;
+ }
out:
mutex_unlock(&ri->admin->lock);
return ret;
@@ -1860,17 +2296,28 @@ struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl,
/* Sanity check that this VCAP is supported on this platform */
if (vctrl->vcaps[admin->vtype].rows == 0)
return ERR_PTR(-EINVAL);
+
+ mutex_lock(&admin->lock);
/* Check if a rule with this id already exists */
- if (vcap_lookup_rule(vctrl, id))
- return ERR_PTR(-EEXIST);
+ if (vcap_rule_exists(vctrl, id)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
/* Check if there is room for the rule in the block(s) of the VCAP */
maxsize = vctrl->vcaps[admin->vtype].sw_count; /* worst case rule size */
- if (vcap_rule_space(admin, maxsize))
- return ERR_PTR(-ENOSPC);
+ if (vcap_rule_space(admin, maxsize)) {
+ err = -ENOSPC;
+ goto out_unlock;
+ }
+
/* Create a container for the rule and return it */
ri = kzalloc(sizeof(*ri), GFP_KERNEL);
- if (!ri)
- return ERR_PTR(-ENOMEM);
+ if (!ri) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
ri->data.vcap_chain_id = vcap_chain_id;
ri->data.user = user;
ri->data.priority = priority;
@@ -1883,14 +2330,21 @@ struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl,
ri->ndev = ndev;
ri->admin = admin; /* refer to the vcap instance */
ri->vctrl = vctrl; /* refer to the client */
- if (vcap_set_rule_id(ri) == 0)
+
+ if (vcap_set_rule_id(ri) == 0) {
+ err = -EINVAL;
goto out_free;
- vcap_erase_cache(ri);
+ }
+
+ mutex_unlock(&admin->lock);
return (struct vcap_rule *)ri;
out_free:
kfree(ri);
- return ERR_PTR(-EINVAL);
+out_unlock:
+ mutex_unlock(&admin->lock);
+ return ERR_PTR(err);
+
}
EXPORT_SYMBOL_GPL(vcap_alloc_rule);
@@ -1915,43 +2369,52 @@ void vcap_free_rule(struct vcap_rule *rule)
}
EXPORT_SYMBOL_GPL(vcap_free_rule);
-struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id)
+/* Decode a rule from the VCAP cache and return a copy */
+struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem)
{
- struct vcap_rule_internal *elem;
struct vcap_rule_internal *ri;
int err;
- ri = NULL;
+ ri = vcap_dup_rule(elem, elem->state == VCAP_RS_DISABLED);
+ if (IS_ERR(ri))
+ return ERR_PTR(PTR_ERR(ri));
+
+ if (ri->state == VCAP_RS_DISABLED)
+ goto out;
+
+ err = vcap_read_rule(ri);
+ if (err)
+ return ERR_PTR(err);
+
+ err = vcap_decode_keyset(ri);
+ if (err)
+ return ERR_PTR(err);
+
+ err = vcap_decode_actionset(ri);
+ if (err)
+ return ERR_PTR(err);
+
+out:
+ return &ri->data;
+}
+
+struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id)
+{
+ struct vcap_rule_internal *elem;
+ struct vcap_rule *rule;
+ int err;
err = vcap_api_check(vctrl);
if (err)
return ERR_PTR(err);
- elem = vcap_lookup_rule(vctrl, id);
+
+ elem = vcap_get_locked_rule(vctrl, id);
if (!elem)
return NULL;
- mutex_lock(&elem->admin->lock);
- ri = vcap_dup_rule(elem);
- if (IS_ERR(ri))
- goto unlock;
- err = vcap_read_rule(ri);
- if (err) {
- ri = ERR_PTR(err);
- goto unlock;
- }
- err = vcap_decode_keyset(ri);
- if (err) {
- ri = ERR_PTR(err);
- goto unlock;
- }
- err = vcap_decode_actionset(ri);
- if (err) {
- ri = ERR_PTR(err);
- goto unlock;
- }
-unlock:
+ rule = vcap_decode_rule(elem);
mutex_unlock(&elem->admin->lock);
- return (struct vcap_rule *)ri;
+ return rule;
}
EXPORT_SYMBOL_GPL(vcap_get_rule);
@@ -1966,10 +2429,13 @@ int vcap_mod_rule(struct vcap_rule *rule)
if (err)
return err;
- if (!vcap_lookup_rule(ri->vctrl, ri->data.id))
+ if (!vcap_get_locked_rule(ri->vctrl, ri->data.id))
return -ENOENT;
- mutex_lock(&ri->admin->lock);
+ vcap_rule_set_state(ri);
+ if (ri->state == VCAP_RS_DISABLED)
+ goto out;
+
/* Encode the bitstreams to the VCAP cache */
vcap_erase_cache(ri);
err = vcap_encode_rule(ri);
@@ -1982,8 +2448,6 @@ int vcap_mod_rule(struct vcap_rule *rule)
memset(&ctr, 0, sizeof(ctr));
err = vcap_write_counter(ri, &ctr);
- if (err)
- goto out;
out:
mutex_unlock(&ri->admin->lock);
@@ -2050,20 +2514,19 @@ int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id)
if (err)
return err;
/* Look for the rule id in all vcaps */
- ri = vcap_lookup_rule(vctrl, id);
+ ri = vcap_get_locked_rule(vctrl, id);
if (!ri)
- return -EINVAL;
+ return -ENOENT;
+
admin = ri->admin;
if (ri->addr > admin->last_used_addr)
gap = vcap_fill_rule_gap(ri);
/* Delete the rule from the list of rules and the cache */
- mutex_lock(&admin->lock);
list_del(&ri->list);
vctrl->ops->init(ndev, admin, admin->last_used_addr, ri->size + gap);
- kfree(ri);
- mutex_unlock(&admin->lock);
+ vcap_free_rule(&ri->data);
/* Update the last used address, set to default when no rules */
if (list_empty(&admin->rules)) {
@@ -2073,7 +2536,9 @@ int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id)
list);
admin->last_used_addr = elem->addr;
}
- return 0;
+
+ mutex_unlock(&admin->lock);
+ return err;
}
EXPORT_SYMBOL_GPL(vcap_del_rule);
@@ -2091,7 +2556,7 @@ int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin)
list_for_each_entry_safe(ri, next_ri, &admin->rules, list) {
vctrl->ops->init(ri->ndev, admin, ri->addr, ri->size);
list_del(&ri->list);
- kfree(ri);
+ vcap_free_rule(&ri->data);
}
admin->last_used_addr = admin->last_valid_addr;
@@ -2137,69 +2602,6 @@ const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_lookup_keyfield);
-/* Copy data from src to dst but reverse the data in chunks of 32bits.
- * For example if src is 00:11:22:33:44:55 where 55 is LSB the dst will
- * have the value 22:33:44:55:00:11.
- */
-static void vcap_copy_to_w32be(u8 *dst, u8 *src, int size)
-{
- for (int idx = 0; idx < size; ++idx) {
- int first_byte_index = 0;
- int nidx;
-
- first_byte_index = size - (((idx >> 2) + 1) << 2);
- if (first_byte_index < 0)
- first_byte_index = 0;
- nidx = idx + first_byte_index - (idx & ~0x3);
- dst[nidx] = src[idx];
- }
-}
-
-static void vcap_copy_from_client_keyfield(struct vcap_rule *rule,
- struct vcap_client_keyfield *field,
- struct vcap_client_keyfield_data *data)
-{
- struct vcap_rule_internal *ri = to_intrule(rule);
- int size;
-
- if (!ri->admin->w32be) {
- memcpy(&field->data, data, sizeof(field->data));
- return;
- }
-
- size = keyfield_size_table[field->ctrl.type] / 2;
- switch (field->ctrl.type) {
- case VCAP_FIELD_BIT:
- case VCAP_FIELD_U32:
- memcpy(&field->data, data, sizeof(field->data));
- break;
- case VCAP_FIELD_U48:
- vcap_copy_to_w32be(field->data.u48.value, data->u48.value, size);
- vcap_copy_to_w32be(field->data.u48.mask, data->u48.mask, size);
- break;
- case VCAP_FIELD_U56:
- vcap_copy_to_w32be(field->data.u56.value, data->u56.value, size);
- vcap_copy_to_w32be(field->data.u56.mask, data->u56.mask, size);
- break;
- case VCAP_FIELD_U64:
- vcap_copy_to_w32be(field->data.u64.value, data->u64.value, size);
- vcap_copy_to_w32be(field->data.u64.mask, data->u64.mask, size);
- break;
- case VCAP_FIELD_U72:
- vcap_copy_to_w32be(field->data.u72.value, data->u72.value, size);
- vcap_copy_to_w32be(field->data.u72.mask, data->u72.mask, size);
- break;
- case VCAP_FIELD_U112:
- vcap_copy_to_w32be(field->data.u112.value, data->u112.value, size);
- vcap_copy_to_w32be(field->data.u112.mask, data->u112.mask, size);
- break;
- case VCAP_FIELD_U128:
- vcap_copy_to_w32be(field->data.u128.value, data->u128.value, size);
- vcap_copy_to_w32be(field->data.u128.mask, data->u128.mask, size);
- break;
- }
-}
-
/* Check if the keyfield is already in the rule */
static bool vcap_keyfield_unique(struct vcap_rule *rule,
enum vcap_key_field key)
@@ -2257,9 +2659,9 @@ static int vcap_rule_add_key(struct vcap_rule *rule,
field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field)
return -ENOMEM;
+ memcpy(&field->data, data, sizeof(field->data));
field->ctrl.key = key;
field->ctrl.type = ftype;
- vcap_copy_from_client_keyfield(rule, field, data);
list_add_tail(&field->ctrl.list, &rule->keyfields);
return 0;
}
@@ -2355,7 +2757,7 @@ int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
EXPORT_SYMBOL_GPL(vcap_rule_get_key_u32);
/* Find a client action field in a rule */
-static struct vcap_client_actionfield *
+struct vcap_client_actionfield *
vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act)
{
struct vcap_rule_internal *ri = (struct vcap_rule_internal *)rule;
@@ -2366,45 +2768,7 @@ vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act)
return caf;
return NULL;
}
-
-static void vcap_copy_from_client_actionfield(struct vcap_rule *rule,
- struct vcap_client_actionfield *field,
- struct vcap_client_actionfield_data *data)
-{
- struct vcap_rule_internal *ri = to_intrule(rule);
- int size;
-
- if (!ri->admin->w32be) {
- memcpy(&field->data, data, sizeof(field->data));
- return;
- }
-
- size = actionfield_size_table[field->ctrl.type];
- switch (field->ctrl.type) {
- case VCAP_FIELD_BIT:
- case VCAP_FIELD_U32:
- memcpy(&field->data, data, sizeof(field->data));
- break;
- case VCAP_FIELD_U48:
- vcap_copy_to_w32be(field->data.u48.value, data->u48.value, size);
- break;
- case VCAP_FIELD_U56:
- vcap_copy_to_w32be(field->data.u56.value, data->u56.value, size);
- break;
- case VCAP_FIELD_U64:
- vcap_copy_to_w32be(field->data.u64.value, data->u64.value, size);
- break;
- case VCAP_FIELD_U72:
- vcap_copy_to_w32be(field->data.u72.value, data->u72.value, size);
- break;
- case VCAP_FIELD_U112:
- vcap_copy_to_w32be(field->data.u112.value, data->u112.value, size);
- break;
- case VCAP_FIELD_U128:
- vcap_copy_to_w32be(field->data.u128.value, data->u128.value, size);
- break;
- }
-}
+EXPORT_SYMBOL_GPL(vcap_find_actionfield);
/* Check if the actionfield is already in the rule */
static bool vcap_actionfield_unique(struct vcap_rule *rule,
@@ -2463,9 +2827,9 @@ static int vcap_rule_add_action(struct vcap_rule *rule,
field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field)
return -ENOMEM;
+ memcpy(&field->data, data, sizeof(field->data));
field->ctrl.action = action;
field->ctrl.type = ftype;
- vcap_copy_from_client_actionfield(rule, field, data);
list_add_tail(&field->ctrl.list, &rule->actionfields);
return 0;
}
@@ -2564,24 +2928,157 @@ void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule)
}
EXPORT_SYMBOL_GPL(vcap_set_tc_exterr);
+/* Write a rule to VCAP HW to enable it */
+static int vcap_enable_rule(struct vcap_rule_internal *ri)
+{
+ struct vcap_client_actionfield *af, *naf;
+ struct vcap_client_keyfield *kf, *nkf;
+ int err;
+
+ vcap_erase_cache(ri);
+ err = vcap_encode_rule(ri);
+ if (err)
+ goto out;
+ err = vcap_write_rule(ri);
+ if (err)
+ goto out;
+
+ /* Deallocate the list of keys and actions */
+ list_for_each_entry_safe(kf, nkf, &ri->data.keyfields, ctrl.list) {
+ list_del(&kf->ctrl.list);
+ kfree(kf);
+ }
+ list_for_each_entry_safe(af, naf, &ri->data.actionfields, ctrl.list) {
+ list_del(&af->ctrl.list);
+ kfree(af);
+ }
+ ri->state = VCAP_RS_ENABLED;
+out:
+ return err;
+}
+
+/* Enable all disabled rules for a specific chain/port in the VCAP HW */
+static int vcap_enable_rules(struct vcap_control *vctrl,
+ struct net_device *ndev, int chain)
+{
+ int next_chain = chain + VCAP_CID_LOOKUP_SIZE;
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int err = 0;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ if (!(chain >= admin->first_cid && chain <= admin->last_cid))
+ continue;
+
+ /* Found the admin, now find the offloadable rules */
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ /* Is the rule in the lookup defined by the chain */
+ if (!(ri->data.vcap_chain_id >= chain &&
+ ri->data.vcap_chain_id < next_chain)) {
+ continue;
+ }
+
+ if (ri->ndev != ndev)
+ continue;
+
+ if (ri->state != VCAP_RS_DISABLED)
+ continue;
+
+ err = vcap_enable_rule(ri);
+ if (err)
+ break;
+ }
+ mutex_unlock(&admin->lock);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/* Read and erase a rule from VCAP HW to disable it */
+static int vcap_disable_rule(struct vcap_rule_internal *ri)
+{
+ int err;
+
+ err = vcap_read_rule(ri);
+ if (err)
+ return err;
+ err = vcap_decode_keyset(ri);
+ if (err)
+ return err;
+ err = vcap_decode_actionset(ri);
+ if (err)
+ return err;
+
+ ri->state = VCAP_RS_DISABLED;
+ ri->vctrl->ops->init(ri->ndev, ri->admin, ri->addr, ri->size);
+ return 0;
+}
+
+/* Disable all enabled rules for a specific chain/port in the VCAP HW */
+static int vcap_disable_rules(struct vcap_control *vctrl,
+ struct net_device *ndev, int chain)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int err = 0;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ if (!(chain >= admin->first_cid && chain <= admin->last_cid))
+ continue;
+
+ /* Found the admin, now find the rules on the chain */
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ if (ri->data.vcap_chain_id != chain)
+ continue;
+
+ if (ri->ndev != ndev)
+ continue;
+
+ if (ri->state != VCAP_RS_ENABLED)
+ continue;
+
+ err = vcap_disable_rule(ri);
+ if (err)
+ break;
+ }
+ mutex_unlock(&admin->lock);
+ if (err)
+ break;
+ }
+ return err;
+}
+
/* Check if this port is already enabled for this VCAP instance */
-static bool vcap_is_enabled(struct vcap_admin *admin, struct net_device *ndev,
- unsigned long cookie)
+static bool vcap_is_enabled(struct vcap_control *vctrl, struct net_device *ndev,
+ int dst_cid)
{
struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
- list_for_each_entry(eport, &admin->enabled, list)
- if (eport->cookie == cookie || eport->ndev == ndev)
- return true;
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(eport, &admin->enabled, list)
+ if (eport->dst_cid == dst_cid && eport->ndev == ndev)
+ return true;
return false;
}
-/* Enable this port for this VCAP instance */
-static int vcap_enable(struct vcap_admin *admin, struct net_device *ndev,
- unsigned long cookie)
+/* Enable this port and chain id in a VCAP instance */
+static int vcap_enable(struct vcap_control *vctrl, struct net_device *ndev,
+ unsigned long cookie, int src_cid, int dst_cid)
{
struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ if (src_cid >= dst_cid)
+ return -EFAULT;
+
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return -ENOENT;
eport = kzalloc(sizeof(*eport), GFP_KERNEL);
if (!eport)
@@ -2589,48 +3086,72 @@ static int vcap_enable(struct vcap_admin *admin, struct net_device *ndev,
eport->ndev = ndev;
eport->cookie = cookie;
+ eport->src_cid = src_cid;
+ eport->dst_cid = dst_cid;
+ mutex_lock(&admin->lock);
list_add_tail(&eport->list, &admin->enabled);
+ mutex_unlock(&admin->lock);
+ if (vcap_path_exist(vctrl, ndev, src_cid)) {
+ /* Enable chained lookups */
+ while (dst_cid) {
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return -ENOENT;
+
+ vcap_enable_rules(vctrl, ndev, dst_cid);
+ dst_cid = vcap_get_next_chain(vctrl, ndev, dst_cid);
+ }
+ }
return 0;
}
-/* Disable this port for this VCAP instance */
-static int vcap_disable(struct vcap_admin *admin, struct net_device *ndev,
+/* Disable this port and chain id for a VCAP instance */
+static int vcap_disable(struct vcap_control *vctrl, struct net_device *ndev,
unsigned long cookie)
{
- struct vcap_enabled_port *eport;
+ struct vcap_enabled_port *elem, *eport = NULL;
+ struct vcap_admin *found = NULL, *admin;
+ int dst_cid;
- list_for_each_entry(eport, &admin->enabled, list) {
- if (eport->cookie == cookie && eport->ndev == ndev) {
- list_del(&eport->list);
- kfree(eport);
- return 0;
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(elem, &admin->enabled, list) {
+ if (elem->cookie == cookie && elem->ndev == ndev) {
+ eport = elem;
+ found = admin;
+ break;
+ }
}
+ if (eport)
+ break;
}
- return -ENOENT;
-}
+ if (!eport)
+ return -ENOENT;
-/* Find the VCAP instance that enabled the port using a specific filter */
-static struct vcap_admin *vcap_find_admin_by_cookie(struct vcap_control *vctrl,
- unsigned long cookie)
-{
- struct vcap_enabled_port *eport;
- struct vcap_admin *admin;
+ /* Disable chained lookups */
+ dst_cid = eport->dst_cid;
+ while (dst_cid) {
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return -ENOENT;
- list_for_each_entry(admin, &vctrl->list, list)
- list_for_each_entry(eport, &admin->enabled, list)
- if (eport->cookie == cookie)
- return admin;
+ vcap_disable_rules(vctrl, ndev, dst_cid);
+ dst_cid = vcap_get_next_chain(vctrl, ndev, dst_cid);
+ }
- return NULL;
+ mutex_lock(&found->lock);
+ list_del(&eport->list);
+ mutex_unlock(&found->lock);
+ kfree(eport);
+ return 0;
}
-/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */
+/* Enable/Disable the VCAP instance lookups */
int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
- int chain_id, unsigned long cookie, bool enable)
+ int src_cid, int dst_cid, unsigned long cookie,
+ bool enable)
{
- struct vcap_admin *admin;
int err;
err = vcap_api_check(vctrl);
@@ -2640,36 +3161,48 @@ int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
if (!ndev)
return -ENODEV;
- if (chain_id)
- admin = vcap_find_admin(vctrl, chain_id);
- else
- admin = vcap_find_admin_by_cookie(vctrl, cookie);
- if (!admin)
- return -ENOENT;
-
- /* first instance and first chain */
- if (admin->vinst || chain_id > admin->first_cid)
+ /* Source and destination must be the first chain in a lookup */
+ if (src_cid % VCAP_CID_LOOKUP_SIZE)
+ return -EFAULT;
+ if (dst_cid % VCAP_CID_LOOKUP_SIZE)
return -EFAULT;
- err = vctrl->ops->enable(ndev, admin, enable);
- if (err)
- return err;
-
- if (chain_id) {
- if (vcap_is_enabled(admin, ndev, cookie))
+ if (enable) {
+ if (vcap_is_enabled(vctrl, ndev, dst_cid))
return -EADDRINUSE;
- mutex_lock(&admin->lock);
- vcap_enable(admin, ndev, cookie);
+ if (vcap_is_chain_used(vctrl, ndev, src_cid))
+ return -EADDRNOTAVAIL;
+ err = vcap_enable(vctrl, ndev, cookie, src_cid, dst_cid);
} else {
- mutex_lock(&admin->lock);
- vcap_disable(admin, ndev, cookie);
+ err = vcap_disable(vctrl, ndev, cookie);
}
- mutex_unlock(&admin->lock);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(vcap_enable_lookups);
+/* Is this chain id the last lookup of all VCAPs */
+bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress)
+{
+ struct vcap_admin *admin;
+ int lookup;
+
+ if (vcap_api_check(vctrl))
+ return false;
+
+ admin = vcap_find_admin(vctrl, cid);
+ if (!admin)
+ return false;
+
+ if (!vcap_admin_is_last(vctrl, admin, ingress))
+ return false;
+
+ /* This must be the last lookup in this VCAP type */
+ lookup = vcap_chain_id_to_lookup(admin, cid);
+ return lookup == admin->lookups - 1;
+}
+EXPORT_SYMBOL_GPL(vcap_is_last_chain);
+
/* Set a rule counter id (for certain vcaps only) */
void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id)
{
@@ -2679,31 +3212,6 @@ void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id)
}
EXPORT_SYMBOL_GPL(vcap_rule_set_counter_id);
-/* Provide all rules via a callback interface */
-int vcap_rule_iter(struct vcap_control *vctrl,
- int (*callback)(void *, struct vcap_rule *), void *arg)
-{
- struct vcap_rule_internal *ri;
- struct vcap_admin *admin;
- int ret;
-
- ret = vcap_api_check(vctrl);
- if (ret)
- return ret;
-
- /* Iterate all rules in each VCAP instance */
- list_for_each_entry(admin, &vctrl->list, list) {
- list_for_each_entry(ri, &admin->rules, list) {
- ret = callback(arg, &ri->data);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vcap_rule_iter);
-
int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
{
struct vcap_rule_internal *ri = to_intrule(rule);
@@ -2716,7 +3224,12 @@ int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
pr_err("%s:%d: counter is missing\n", __func__, __LINE__);
return -EINVAL;
}
- return vcap_write_counter(ri, ctr);
+
+ mutex_lock(&ri->admin->lock);
+ err = vcap_write_counter(ri, ctr);
+ mutex_unlock(&ri->admin->lock);
+
+ return err;
}
EXPORT_SYMBOL_GPL(vcap_rule_set_counter);
@@ -2732,10 +3245,138 @@ int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
pr_err("%s:%d: counter is missing\n", __func__, __LINE__);
return -EINVAL;
}
- return vcap_read_counter(ri, ctr);
+
+ mutex_lock(&ri->admin->lock);
+ err = vcap_read_counter(ri, ctr);
+ mutex_unlock(&ri->admin->lock);
+
+ return err;
}
EXPORT_SYMBOL_GPL(vcap_rule_get_counter);
+/* Get a copy of a client key field */
+static int vcap_rule_get_key(struct vcap_rule *rule,
+ enum vcap_key_field key,
+ struct vcap_client_keyfield *ckf)
+{
+ struct vcap_client_keyfield *field;
+
+ field = vcap_find_keyfield(rule, key);
+ if (!field)
+ return -EINVAL;
+ memcpy(ckf, field, sizeof(*ckf));
+ INIT_LIST_HEAD(&ckf->ctrl.list);
+ return 0;
+}
+
+/* Find a keyset having the same size as the provided rule, where the keyset
+ * does not have a type id.
+ */
+static int vcap_rule_get_untyped_keyset(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_set *keyfield_set;
+ int idx;
+
+ keyfield_set = vctrl->vcaps[vt].keyfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) {
+ if (keyfield_set[idx].sw_per_item == ri->keyset_sw &&
+ keyfield_set[idx].type_id == (u8)-1) {
+ vcap_keyset_list_add(matches, idx);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+/* Get the keysets that matches the rule key type/mask */
+int vcap_rule_get_keysets(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_set *keyfield_set;
+ struct vcap_client_keyfield kf = {};
+ u32 value, mask;
+ int err, idx;
+
+ err = vcap_rule_get_key(&ri->data, VCAP_KF_TYPE, &kf);
+ if (err)
+ return vcap_rule_get_untyped_keyset(ri, matches);
+
+ if (kf.ctrl.type == VCAP_FIELD_BIT) {
+ value = kf.data.u1.value;
+ mask = kf.data.u1.mask;
+ } else if (kf.ctrl.type == VCAP_FIELD_U32) {
+ value = kf.data.u32.value;
+ mask = kf.data.u32.mask;
+ } else {
+ return -EINVAL;
+ }
+
+ keyfield_set = vctrl->vcaps[vt].keyfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) {
+ if (keyfield_set[idx].sw_per_item != ri->keyset_sw)
+ continue;
+
+ if (keyfield_set[idx].type_id == (u8)-1) {
+ vcap_keyset_list_add(matches, idx);
+ continue;
+ }
+
+ if ((keyfield_set[idx].type_id & mask) == value)
+ vcap_keyset_list_add(matches, idx);
+ }
+ if (matches->cnt > 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+/* Collect packet counts from all rules with the same cookie */
+int vcap_get_rule_count_by_cookie(struct vcap_control *vctrl,
+ struct vcap_counter *ctr, u64 cookie)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_counter temp = {};
+ struct vcap_admin *admin;
+ int err;
+
+ err = vcap_api_check(vctrl);
+ if (err)
+ return err;
+
+ /* Iterate all rules in each VCAP instance */
+ list_for_each_entry(admin, &vctrl->list, list) {
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ if (ri->data.cookie != cookie)
+ continue;
+
+ err = vcap_read_counter(ri, &temp);
+ if (err)
+ goto unlock;
+ ctr->value += temp.value;
+
+ /* Reset the rule counter */
+ temp.value = 0;
+ temp.sticky = 0;
+ err = vcap_write_counter(ri, &temp);
+ if (err)
+ goto unlock;
+ }
+ mutex_unlock(&admin->lock);
+ }
+ return err;
+
+unlock:
+ mutex_unlock(&admin->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_get_rule_count_by_cookie);
+
static int vcap_rule_mod_key(struct vcap_rule *rule,
enum vcap_key_field key,
enum vcap_field_type ftype,
@@ -2746,7 +3387,7 @@ static int vcap_rule_mod_key(struct vcap_rule *rule,
field = vcap_find_keyfield(rule, key);
if (!field)
return vcap_rule_add_key(rule, key, ftype, data);
- vcap_copy_from_client_keyfield(rule, field, data);
+ memcpy(&field->data, data, sizeof(field->data));
return 0;
}
@@ -2772,7 +3413,7 @@ static int vcap_rule_mod_action(struct vcap_rule *rule,
field = vcap_find_actionfield(rule, action);
if (!field)
return vcap_rule_add_action(rule, action, ftype, data);
- vcap_copy_from_client_actionfield(rule, field, data);
+ memcpy(&field->data, data, sizeof(field->data));
return 0;
}
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h
index 689c7270f2a8..62db270f65af 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h
@@ -176,6 +176,7 @@ struct vcap_admin {
int first_valid_addr; /* bottom of address range to be used */
int last_used_addr; /* address of lowest added rule */
bool w32be; /* vcap uses "32bit-word big-endian" encoding */
+ bool ingress; /* chain traffic direction */
struct vcap_cache_data cache; /* encoded rule data */
};
@@ -201,6 +202,13 @@ struct vcap_keyset_list {
enum vcap_keyfield_set *keysets; /* the list of keysets */
};
+/* List of actionsets */
+struct vcap_actionset_list {
+ int max; /* size of the actionset list */
+ int cnt; /* count of actionsets actually in the list */
+ enum vcap_actionfield_set *actionsets; /* the list of actionsets */
+};
+
/* Client output printf-like function with destination */
struct vcap_output_print {
__printf(2, 3)
@@ -259,11 +267,6 @@ struct vcap_operations {
(struct net_device *ndev,
struct vcap_admin *admin,
struct vcap_output_print *out);
- /* enable/disable the lookups in a vcap instance */
- int (*enable)
- (struct net_device *ndev,
- struct vcap_admin *admin,
- bool enable);
};
/* VCAP API Client control interface */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
index 0319866f9c94..417af9754bcc 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -148,9 +148,10 @@ struct vcap_counter {
bool sticky;
};
-/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */
+/* Enable/Disable the VCAP instance lookups */
int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
- int chain_id, unsigned long cookie, bool enable);
+ int from_cid, int to_cid, unsigned long cookie,
+ bool enable);
/* VCAP rule operations */
/* Allocate a rule and fill in the basic information */
@@ -201,6 +202,8 @@ int vcap_rule_add_action_u32(struct vcap_rule *rule,
enum vcap_action_field action, u32 value);
/* VCAP rule counter operations */
+int vcap_get_rule_count_by_cookie(struct vcap_control *vctrl,
+ struct vcap_counter *ctr, u64 cookie);
int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr);
int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr);
@@ -214,8 +217,12 @@ const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
enum vcap_key_field key);
/* Find a rule id with a provided cookie */
int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie);
+/* Calculate the value used for chaining VCAP rules */
+int vcap_chain_offset(struct vcap_control *vctrl, int from_cid, int to_cid);
/* Is the next chain id in the following lookup, possible in another VCAP */
bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid);
+/* Is this chain id the last lookup of all VCAPs */
+bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress);
/* Provide all rules via a callback interface */
int vcap_rule_iter(struct vcap_control *vctrl,
int (*callback)(void *, struct vcap_rule *), void *arg);
@@ -262,4 +269,6 @@ int vcap_rule_mod_action_u32(struct vcap_rule *rule,
int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
u32 *value, u32 *mask);
+struct vcap_client_actionfield *
+vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act);
#endif /* __VCAP_API_CLIENT__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
index e0b206247f2e..c2c3397c5898 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
@@ -44,11 +44,14 @@ static void vcap_debugfs_show_rule_keyfield(struct vcap_control *vctrl,
out->prf(out->dst, "%pI4h/%pI4h", &data->u32.value,
&data->u32.mask);
} else if (key == VCAP_KF_ETYPE ||
- key == VCAP_KF_IF_IGR_PORT_MASK) {
+ key == VCAP_KF_IF_IGR_PORT_MASK ||
+ key == VCAP_KF_IF_EGR_PORT_MASK) {
hex = true;
} else {
u32 fmsk = (1 << keyfield[key].width) - 1;
+ if (keyfield[key].width == 32)
+ fmsk = ~0;
out->prf(out->dst, "%u/%u", data->u32.value & fmsk,
data->u32.mask & fmsk);
}
@@ -152,37 +155,48 @@ vcap_debugfs_show_rule_actionfield(struct vcap_control *vctrl,
out->prf(out->dst, "\n");
}
-static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri,
- struct vcap_output_print *out)
+static int vcap_debugfs_show_keysets(struct vcap_rule_internal *ri,
+ struct vcap_output_print *out)
{
- struct vcap_control *vctrl = ri->vctrl;
struct vcap_admin *admin = ri->admin;
enum vcap_keyfield_set keysets[10];
- const struct vcap_field *keyfield;
- enum vcap_type vt = admin->vtype;
- struct vcap_client_keyfield *ckf;
struct vcap_keyset_list matches;
- u32 *maskstream;
- u32 *keystream;
- int res;
+ int err;
- keystream = admin->cache.keystream;
- maskstream = admin->cache.maskstream;
matches.keysets = keysets;
matches.cnt = 0;
matches.max = ARRAY_SIZE(keysets);
- res = vcap_find_keystream_keysets(vctrl, vt, keystream, maskstream,
- false, 0, &matches);
- if (res < 0) {
+
+ if (ri->state == VCAP_RS_DISABLED)
+ err = vcap_rule_get_keysets(ri, &matches);
+ else
+ err = vcap_find_keystream_keysets(ri->vctrl, admin->vtype,
+ admin->cache.keystream,
+ admin->cache.maskstream,
+ false, 0, &matches);
+ if (err) {
pr_err("%s:%d: could not find valid keysets: %d\n",
- __func__, __LINE__, res);
- return -EINVAL;
+ __func__, __LINE__, err);
+ return err;
}
+
out->prf(out->dst, " keysets:");
for (int idx = 0; idx < matches.cnt; ++idx)
out->prf(out->dst, " %s",
- vcap_keyset_name(vctrl, matches.keysets[idx]));
+ vcap_keyset_name(ri->vctrl, matches.keysets[idx]));
out->prf(out->dst, "\n");
+ return 0;
+}
+
+static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri,
+ struct vcap_output_print *out)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_admin *admin = ri->admin;
+ const struct vcap_field *keyfield;
+ struct vcap_client_keyfield *ckf;
+
+ vcap_debugfs_show_keysets(ri, out);
out->prf(out->dst, " keyset_sw: %d\n", ri->keyset_sw);
out->prf(out->dst, " keyset_sw_regs: %d\n", ri->keyset_sw_regs);
@@ -233,6 +247,18 @@ static void vcap_show_admin_rule(struct vcap_control *vctrl,
out->prf(out->dst, " chain_id: %d\n", ri->data.vcap_chain_id);
out->prf(out->dst, " user: %d\n", ri->data.user);
out->prf(out->dst, " priority: %d\n", ri->data.priority);
+ out->prf(out->dst, " state: ");
+ switch (ri->state) {
+ case VCAP_RS_PERMANENT:
+ out->prf(out->dst, "permanent\n");
+ break;
+ case VCAP_RS_DISABLED:
+ out->prf(out->dst, "disabled\n");
+ break;
+ case VCAP_RS_ENABLED:
+ out->prf(out->dst, "enabled\n");
+ break;
+ }
vcap_debugfs_show_rule_keyset(ri, out);
vcap_debugfs_show_rule_actionset(ri, out);
}
@@ -254,6 +280,7 @@ static void vcap_show_admin_info(struct vcap_control *vctrl,
out->prf(out->dst, "version: %d\n", vcap->version);
out->prf(out->dst, "vtype: %d\n", admin->vtype);
out->prf(out->dst, "vinst: %d\n", admin->vinst);
+ out->prf(out->dst, "ingress: %d\n", admin->ingress);
out->prf(out->dst, "first_cid: %d\n", admin->first_cid);
out->prf(out->dst, "last_cid: %d\n", admin->last_cid);
out->prf(out->dst, "lookups: %d\n", admin->lookups);
@@ -272,7 +299,7 @@ static int vcap_show_admin(struct vcap_control *vctrl,
vcap_show_admin_info(vctrl, admin, out);
list_for_each_entry(elem, &admin->rules, list) {
- vrule = vcap_get_rule(vctrl, elem->data.id);
+ vrule = vcap_decode_rule(elem);
if (IS_ERR_OR_NULL(vrule)) {
ret = PTR_ERR(vrule);
break;
@@ -381,8 +408,12 @@ static int vcap_debugfs_show(struct seq_file *m, void *unused)
.prf = (void *)seq_printf,
.dst = m,
};
+ int ret;
- return vcap_show_admin(info->vctrl, info->admin, &out);
+ mutex_lock(&info->admin->lock);
+ ret = vcap_show_admin(info->vctrl, info->admin, &out);
+ mutex_unlock(&info->admin->lock);
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(vcap_debugfs);
@@ -394,8 +425,12 @@ static int vcap_raw_debugfs_show(struct seq_file *m, void *unused)
.prf = (void *)seq_printf,
.dst = m,
};
+ int ret;
- return vcap_show_admin_raw(info->vctrl, info->admin, &out);
+ mutex_lock(&info->admin->lock);
+ ret = vcap_show_admin_raw(info->vctrl, info->admin, &out);
+ mutex_unlock(&info->admin->lock);
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(vcap_raw_debugfs);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
index cf594668d5d9..0de3f677135a 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -221,13 +221,6 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static int vcap_test_enable(struct net_device *ndev,
- struct vcap_admin *admin,
- bool enable)
-{
- return 0;
-}
-
static struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
@@ -238,7 +231,6 @@ static struct vcap_operations test_callbacks = {
.update = test_cache_update,
.move = test_cache_move,
.port_info = vcap_test_port_info,
- .enable = vcap_test_enable,
};
static struct vcap_control test_vctrl = {
@@ -253,6 +245,8 @@ static void vcap_test_api_init(struct vcap_admin *admin)
INIT_LIST_HEAD(&test_vctrl.list);
INIT_LIST_HEAD(&admin->list);
INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+ mutex_init(&admin->lock);
list_add_tail(&admin->list, &test_vctrl.list);
memset(test_updateaddr, 0, sizeof(test_updateaddr));
test_updateaddridx = 0;
@@ -393,8 +387,9 @@ static const char * const test_admin_info_expect[] = {
"default_cnt: 73\n",
"require_cnt_dis: 0\n",
"version: 1\n",
- "vtype: 2\n",
+ "vtype: 3\n",
"vinst: 0\n",
+ "ingress: 1\n",
"first_cid: 10000\n",
"last_cid: 19999\n",
"lookups: 4\n",
@@ -413,6 +408,7 @@ static void vcap_api_show_admin_test(struct kunit *test)
.last_valid_addr = 3071,
.first_valid_addr = 0,
.last_used_addr = 794,
+ .ingress = true,
};
struct vcap_output_print out = {
.prf = (void *)test_prf,
@@ -439,8 +435,9 @@ static const char * const test_admin_expect[] = {
"default_cnt: 73\n",
"require_cnt_dis: 0\n",
"version: 1\n",
- "vtype: 2\n",
+ "vtype: 3\n",
"vinst: 0\n",
+ "ingress: 1\n",
"first_cid: 8000000\n",
"last_cid: 8199999\n",
"lookups: 4\n",
@@ -452,6 +449,7 @@ static const char * const test_admin_expect[] = {
" chain_id: 0\n",
" user: 0\n",
" priority: 0\n",
+ " state: permanent\n",
" keysets: VCAP_KFS_MAC_ETYPE\n",
" keyset_sw: 6\n",
" keyset_sw_regs: 2\n",
@@ -501,6 +499,7 @@ static void vcap_api_show_admin_rule_test(struct kunit *test)
.last_valid_addr = 3071,
.first_valid_addr = 0,
.last_used_addr = 794,
+ .ingress = true,
.cache = {
.keystream = keydata,
.maskstream = mskdata,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index 76a31215ebfb..c07f25e791c7 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -211,13 +211,6 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static int vcap_test_enable(struct net_device *ndev,
- struct vcap_admin *admin,
- bool enable)
-{
- return 0;
-}
-
static struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
@@ -228,7 +221,6 @@ static struct vcap_operations test_callbacks = {
.update = test_cache_update,
.move = test_cache_move,
.port_info = vcap_test_port_info,
- .enable = vcap_test_enable,
};
static struct vcap_control test_vctrl = {
@@ -243,6 +235,8 @@ static void vcap_test_api_init(struct vcap_admin *admin)
INIT_LIST_HEAD(&test_vctrl.list);
INIT_LIST_HEAD(&admin->list);
INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+ mutex_init(&admin->lock);
list_add_tail(&admin->list, &test_vctrl.list);
memset(test_updateaddr, 0, sizeof(test_updateaddr));
test_updateaddridx = 0;
@@ -302,7 +296,7 @@ test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
ret = vcap_set_rule_set_keyset(rule, keyset);
/* Add rule actions : there must be at least one action */
- ret = vcap_rule_add_action_u32(rule, VCAP_AF_COSID_VAL, 0);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_ISDX_VAL, 0);
/* Override rule actionset */
ret = vcap_set_rule_set_actionset(rule, actionset);
@@ -1312,8 +1306,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
struct vcap_admin is2_admin = {
.vtype = VCAP_TYPE_IS2,
- .first_cid = 10000,
- .last_cid = 19999,
+ .first_cid = 8000000,
+ .last_cid = 8099999,
.lookups = 4,
.last_valid_addr = 3071,
.first_valid_addr = 0,
@@ -1326,7 +1320,7 @@ static void vcap_api_encode_rule_test(struct kunit *test)
};
struct vcap_rule *rule;
struct vcap_rule_internal *ri;
- int vcap_chain_id = 10005;
+ int vcap_chain_id = 8000000;
enum vcap_user user = VCAP_USER_VCAP_UTIL;
u16 priority = 10;
int id = 100;
@@ -1343,8 +1337,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
u32 port_mask_rng_mask = 0x0f;
u32 igr_port_mask_value = 0xffabcd01;
u32 igr_port_mask_mask = ~0;
- /* counter is not written yet, so it is not in expwriteaddr */
- u32 expwriteaddr[] = {792, 793, 794, 795, 796, 797, 0};
+ /* counter is written as the first operation */
+ u32 expwriteaddr[] = {792, 792, 793, 794, 795, 796, 797};
int idx;
vcap_test_api_init(&is2_admin);
@@ -1398,6 +1392,11 @@ static void vcap_api_encode_rule_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, 2, ri->keyset_sw_regs);
KUNIT_EXPECT_EQ(test, 4, ri->actionset_sw_regs);
+ /* Enable lookup, so the rule will be written */
+ ret = vcap_enable_lookups(&test_vctrl, &test_netdev, 0,
+ rule->vcap_chain_id, rule->cookie, true);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
/* Add rule with write callback */
ret = vcap_add_rule(rule);
KUNIT_EXPECT_EQ(test, 0, ret);
@@ -1872,58 +1871,56 @@ static void vcap_api_next_lookup_basic_test(struct kunit *test)
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
- KUNIT_EXPECT_EQ(test, true, ret);
+ KUNIT_EXPECT_EQ(test, false, ret);
}
static void vcap_api_next_lookup_advanced_test(struct kunit *test)
{
- struct vcap_admin admin1 = {
+ struct vcap_admin admin[] = {
+ {
.vtype = VCAP_TYPE_IS0,
.vinst = 0,
.first_cid = 1000000,
.last_cid = 1199999,
.lookups = 6,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin2 = {
+ }, {
.vtype = VCAP_TYPE_IS0,
.vinst = 1,
.first_cid = 1200000,
.last_cid = 1399999,
.lookups = 6,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin3 = {
+ }, {
.vtype = VCAP_TYPE_IS0,
.vinst = 2,
.first_cid = 1400000,
.last_cid = 1599999,
.lookups = 6,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin4 = {
+ }, {
.vtype = VCAP_TYPE_IS2,
.vinst = 0,
.first_cid = 8000000,
.last_cid = 8199999,
.lookups = 4,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin5 = {
+ }, {
.vtype = VCAP_TYPE_IS2,
.vinst = 1,
.first_cid = 8200000,
.last_cid = 8399999,
.lookups = 4,
.lookups_per_instance = 2,
+ }
};
bool ret;
- vcap_test_api_init(&admin1);
- list_add_tail(&admin2.list, &test_vctrl.list);
- list_add_tail(&admin3.list, &test_vctrl.list);
- list_add_tail(&admin4.list, &test_vctrl.list);
- list_add_tail(&admin5.list, &test_vctrl.list);
+ vcap_test_api_init(&admin[0]);
+ list_add_tail(&admin[1].list, &test_vctrl.list);
+ list_add_tail(&admin[2].list, &test_vctrl.list);
+ list_add_tail(&admin[3].list, &test_vctrl.list);
+ list_add_tail(&admin[4].list, &test_vctrl.list);
ret = vcap_is_next_lookup(&test_vctrl, 1000000, 1001000);
KUNIT_EXPECT_EQ(test, false, ret);
@@ -1933,9 +1930,9 @@ static void vcap_api_next_lookup_advanced_test(struct kunit *test)
ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1201000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1301000);
- KUNIT_EXPECT_EQ(test, false, ret);
+ KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1100000, 8101000);
- KUNIT_EXPECT_EQ(test, false, ret);
+ KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1300000, 1401000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1400000, 1501000);
@@ -1951,7 +1948,7 @@ static void vcap_api_next_lookup_advanced_test(struct kunit *test)
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
- KUNIT_EXPECT_EQ(test, true, ret);
+ KUNIT_EXPECT_EQ(test, false, ret);
}
static void vcap_api_filter_unsupported_keys_test(struct kunit *test)
@@ -2146,6 +2143,71 @@ static void vcap_api_filter_keylist_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, 26, idx);
}
+static void vcap_api_rule_chain_path_test(struct kunit *test)
+{
+ struct vcap_admin admin1 = {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 0,
+ .first_cid = 1000000,
+ .last_cid = 1199999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_enabled_port eport3 = {
+ .ndev = &test_netdev,
+ .cookie = 0x100,
+ .src_cid = 0,
+ .dst_cid = 1000000,
+ };
+ struct vcap_enabled_port eport2 = {
+ .ndev = &test_netdev,
+ .cookie = 0x200,
+ .src_cid = 1000000,
+ .dst_cid = 1100000,
+ };
+ struct vcap_enabled_port eport1 = {
+ .ndev = &test_netdev,
+ .cookie = 0x300,
+ .src_cid = 1100000,
+ .dst_cid = 8000000,
+ };
+ bool ret;
+ int chain;
+
+ vcap_test_api_init(&admin1);
+ list_add_tail(&eport1.list, &admin1.enabled);
+ list_add_tail(&eport2.list, &admin1.enabled);
+ list_add_tail(&eport3.list, &admin1.enabled);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1000000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1100000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1200000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 0);
+ KUNIT_EXPECT_EQ(test, 1000000, chain);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 1000000);
+ KUNIT_EXPECT_EQ(test, 1100000, chain);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 1100000);
+ KUNIT_EXPECT_EQ(test, 8000000, chain);
+}
+
+static struct kunit_case vcap_api_rule_enable_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_chain_path_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_enable_test_suite = {
+ .name = "VCAP_API_Rule_Enable_Testsuite",
+ .test_cases = vcap_api_rule_enable_test_cases,
+};
+
static struct kunit_suite vcap_api_rule_remove_test_suite = {
.name = "VCAP_API_Rule_Remove_Testsuite",
.test_cases = vcap_api_rule_remove_test_cases,
@@ -2236,6 +2298,7 @@ static struct kunit_suite vcap_api_encoding_test_suite = {
.test_cases = vcap_api_encoding_test_cases,
};
+kunit_test_suite(vcap_api_rule_enable_test_suite);
kunit_test_suite(vcap_api_rule_remove_test_suite);
kunit_test_suite(vcap_api_rule_insert_test_suite);
kunit_test_suite(vcap_api_rule_counter_test_suite);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
index 4fd21da97679..df81d9ff502b 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
@@ -13,6 +13,12 @@
#define to_intrule(rule) container_of((rule), struct vcap_rule_internal, data)
+enum vcap_rule_state {
+ VCAP_RS_PERMANENT, /* the rule is always stored in HW */
+ VCAP_RS_ENABLED, /* enabled in HW but can be disabled */
+ VCAP_RS_DISABLED, /* disabled (stored in SW) and can be enabled */
+};
+
/* Private VCAP API rule data */
struct vcap_rule_internal {
struct vcap_rule data; /* provided by the client */
@@ -29,6 +35,7 @@ struct vcap_rule_internal {
u32 addr; /* address in the VCAP at insertion */
u32 counter_id; /* counter id (if a dedicated counter is available) */
struct vcap_counter counter; /* last read counter value */
+ enum vcap_rule_state state; /* rule storage state */
};
/* Bit iterator for the VCAP cache streams */
@@ -43,8 +50,6 @@ struct vcap_stream_iter {
/* Check that the control has a valid set of callbacks */
int vcap_api_check(struct vcap_control *ctrl);
-/* Make a shallow copy of the rule without the fields */
-struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri);
/* Erase the VCAP cache area used or encoding and decoding */
void vcap_erase_cache(struct vcap_rule_internal *ri);
@@ -110,4 +115,10 @@ int vcap_find_keystream_keysets(struct vcap_control *vctrl, enum vcap_type vt,
u32 *keystream, u32 *mskstream, bool mask,
int sw_max, struct vcap_keyset_list *kslist);
+/* Get the keysets that matches the rule key type/mask */
+int vcap_rule_get_keysets(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches);
+/* Decode a rule from the VCAP cache and return a copy */
+struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem);
+
#endif /* __VCAP_API_PRIVATE__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
index 5d681d2697cd..5dbfc0d0c369 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
@@ -1,6 +1,10 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
- * Microchip VCAP API Test VCAP Model Data
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP test model interface for kunit testing
+ */
+
+/* This file is autogenerated by cml-utils 2023-02-10 11:16:00 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
*/
#include <linux/types.h>
@@ -10,177 +14,6 @@
#include "vcap_model_kunit.h"
/* keyfields */
-static const struct vcap_field is0_mll_keyfield[] = {
- [VCAP_KF_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 0,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_FIRST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 2,
- .width = 1,
- },
- [VCAP_KF_IF_IGR_PORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 3,
- .width = 7,
- },
- [VCAP_KF_8021Q_VLAN_TAGS] = {
- .type = VCAP_FIELD_U32,
- .offset = 10,
- .width = 3,
- },
- [VCAP_KF_8021Q_TPID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 13,
- .width = 3,
- },
- [VCAP_KF_8021Q_VID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 16,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 28,
- .width = 3,
- },
- [VCAP_KF_8021Q_VID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 31,
- .width = 12,
- },
- [VCAP_KF_L2_DMAC] = {
- .type = VCAP_FIELD_U48,
- .offset = 43,
- .width = 48,
- },
- [VCAP_KF_L2_SMAC] = {
- .type = VCAP_FIELD_U48,
- .offset = 91,
- .width = 48,
- },
- [VCAP_KF_ETYPE_MPLS] = {
- .type = VCAP_FIELD_U32,
- .offset = 139,
- .width = 2,
- },
- [VCAP_KF_L4_RNG] = {
- .type = VCAP_FIELD_U32,
- .offset = 141,
- .width = 8,
- },
-};
-
-static const struct vcap_field is0_tri_vid_keyfield[] = {
- [VCAP_KF_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 0,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_FIRST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 2,
- .width = 1,
- },
- [VCAP_KF_IF_IGR_PORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 3,
- .width = 7,
- },
- [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 10,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_GEN_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 12,
- .width = 12,
- },
- [VCAP_KF_8021Q_VLAN_TAGS] = {
- .type = VCAP_FIELD_U32,
- .offset = 24,
- .width = 3,
- },
- [VCAP_KF_8021Q_TPID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 27,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP0] = {
- .type = VCAP_FIELD_U32,
- .offset = 30,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI0] = {
- .type = VCAP_FIELD_BIT,
- .offset = 33,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 34,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 46,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP1] = {
- .type = VCAP_FIELD_U32,
- .offset = 49,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI1] = {
- .type = VCAP_FIELD_BIT,
- .offset = 52,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 53,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID2] = {
- .type = VCAP_FIELD_U32,
- .offset = 65,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP2] = {
- .type = VCAP_FIELD_U32,
- .offset = 68,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI2] = {
- .type = VCAP_FIELD_BIT,
- .offset = 71,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID2] = {
- .type = VCAP_FIELD_U32,
- .offset = 72,
- .width = 12,
- },
- [VCAP_KF_L4_RNG] = {
- .type = VCAP_FIELD_U32,
- .offset = 84,
- .width = 8,
- },
- [VCAP_KF_OAM_Y1731_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 92,
- .width = 1,
- },
- [VCAP_KF_OAM_MEL_FLAGS] = {
- .type = VCAP_FIELD_U32,
- .offset = 93,
- .width = 7,
- },
-};
-
static const struct vcap_field is0_ll_full_keyfield[] = {
[VCAP_KF_TYPE] = {
.type = VCAP_FIELD_U32,
@@ -344,194 +177,6 @@ static const struct vcap_field is0_ll_full_keyfield[] = {
},
};
-static const struct vcap_field is0_normal_keyfield[] = {
- [VCAP_KF_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 0,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_FIRST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 2,
- .width = 1,
- },
- [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 3,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_GEN_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 5,
- .width = 12,
- },
- [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 17,
- .width = 2,
- },
- [VCAP_KF_IF_IGR_PORT_MASK] = {
- .type = VCAP_FIELD_U72,
- .offset = 19,
- .width = 65,
- },
- [VCAP_KF_L2_MC_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 84,
- .width = 1,
- },
- [VCAP_KF_L2_BC_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 85,
- .width = 1,
- },
- [VCAP_KF_8021Q_VLAN_TAGS] = {
- .type = VCAP_FIELD_U32,
- .offset = 86,
- .width = 3,
- },
- [VCAP_KF_8021Q_TPID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 89,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP0] = {
- .type = VCAP_FIELD_U32,
- .offset = 92,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI0] = {
- .type = VCAP_FIELD_BIT,
- .offset = 95,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 96,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 108,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP1] = {
- .type = VCAP_FIELD_U32,
- .offset = 111,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI1] = {
- .type = VCAP_FIELD_BIT,
- .offset = 114,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 115,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID2] = {
- .type = VCAP_FIELD_U32,
- .offset = 127,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP2] = {
- .type = VCAP_FIELD_U32,
- .offset = 130,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI2] = {
- .type = VCAP_FIELD_BIT,
- .offset = 133,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID2] = {
- .type = VCAP_FIELD_U32,
- .offset = 134,
- .width = 12,
- },
- [VCAP_KF_DST_ENTRY] = {
- .type = VCAP_FIELD_BIT,
- .offset = 146,
- .width = 1,
- },
- [VCAP_KF_L2_SMAC] = {
- .type = VCAP_FIELD_U48,
- .offset = 147,
- .width = 48,
- },
- [VCAP_KF_IP_MC_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 195,
- .width = 1,
- },
- [VCAP_KF_ETYPE_LEN_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 196,
- .width = 1,
- },
- [VCAP_KF_ETYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 197,
- .width = 16,
- },
- [VCAP_KF_IP_SNAP_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 213,
- .width = 1,
- },
- [VCAP_KF_IP4_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 214,
- .width = 1,
- },
- [VCAP_KF_L3_FRAGMENT_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 215,
- .width = 2,
- },
- [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
- .type = VCAP_FIELD_BIT,
- .offset = 217,
- .width = 1,
- },
- [VCAP_KF_L3_OPTIONS_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 218,
- .width = 1,
- },
- [VCAP_KF_L3_DSCP] = {
- .type = VCAP_FIELD_U32,
- .offset = 219,
- .width = 6,
- },
- [VCAP_KF_L3_IP4_SIP] = {
- .type = VCAP_FIELD_U32,
- .offset = 225,
- .width = 32,
- },
- [VCAP_KF_TCP_UDP_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 257,
- .width = 1,
- },
- [VCAP_KF_TCP_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 258,
- .width = 1,
- },
- [VCAP_KF_L4_SPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 259,
- .width = 16,
- },
- [VCAP_KF_L4_RNG] = {
- .type = VCAP_FIELD_U32,
- .offset = 275,
- .width = 8,
- },
-};
-
static const struct vcap_field is0_normal_7tuple_keyfield[] = {
[VCAP_KF_TYPE] = {
.type = VCAP_FIELD_BIT,
@@ -1095,16 +740,6 @@ static const struct vcap_field is2_mac_etype_keyfield[] = {
.offset = 85,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 86,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 87,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 88,
@@ -1381,16 +1016,6 @@ static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
.offset = 85,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 86,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 87,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 88,
@@ -1594,16 +1219,6 @@ static const struct vcap_field is2_ip4_other_keyfield[] = {
.offset = 85,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 86,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 87,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 88,
@@ -1757,26 +1372,11 @@ static const struct vcap_field is2_ip6_std_keyfield[] = {
.offset = 85,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 86,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 87,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 88,
.width = 1,
},
- [VCAP_KF_L3_DST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 89,
- .width = 1,
- },
[VCAP_KF_L3_TTL_GT0] = {
.type = VCAP_FIELD_BIT,
.offset = 90,
@@ -1890,16 +1490,6 @@ static const struct vcap_field is2_ip_7tuple_keyfield[] = {
.offset = 116,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 117,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 118,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 119,
@@ -2022,69 +1612,6 @@ static const struct vcap_field is2_ip_7tuple_keyfield[] = {
},
};
-static const struct vcap_field is2_ip6_vid_keyfield[] = {
- [VCAP_KF_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 0,
- .width = 4,
- },
- [VCAP_KF_LOOKUP_FIRST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 4,
- .width = 1,
- },
- [VCAP_KF_LOOKUP_PAG] = {
- .type = VCAP_FIELD_U32,
- .offset = 5,
- .width = 8,
- },
- [VCAP_KF_ISDX_GT0_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 13,
- .width = 1,
- },
- [VCAP_KF_ISDX_CLS] = {
- .type = VCAP_FIELD_U32,
- .offset = 14,
- .width = 12,
- },
- [VCAP_KF_8021Q_VID_CLS] = {
- .type = VCAP_FIELD_U32,
- .offset = 26,
- .width = 13,
- },
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 39,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 40,
- .width = 1,
- },
- [VCAP_KF_L3_RT_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 41,
- .width = 1,
- },
- [VCAP_KF_L3_DST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 42,
- .width = 1,
- },
- [VCAP_KF_L3_IP6_DIP] = {
- .type = VCAP_FIELD_U128,
- .offset = 43,
- .width = 128,
- },
- [VCAP_KF_L3_IP6_SIP] = {
- .type = VCAP_FIELD_U128,
- .offset = 171,
- .width = 128,
- },
-};
-
static const struct vcap_field es2_mac_etype_keyfield[] = {
[VCAP_KF_TYPE] = {
.type = VCAP_FIELD_U32,
@@ -2096,16 +1623,6 @@ static const struct vcap_field es2_mac_etype_keyfield[] = {
.offset = 3,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 4,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 12,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 13,
@@ -2181,16 +1698,6 @@ static const struct vcap_field es2_mac_etype_keyfield[] = {
.offset = 95,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 96,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 97,
- .width = 2,
- },
[VCAP_KF_L2_DMAC] = {
.type = VCAP_FIELD_U48,
.offset = 99,
@@ -2239,16 +1746,6 @@ static const struct vcap_field es2_arp_keyfield[] = {
.offset = 3,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 4,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 12,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 13,
@@ -2319,16 +1816,6 @@ static const struct vcap_field es2_arp_keyfield[] = {
.offset = 94,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 95,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 96,
- .width = 2,
- },
[VCAP_KF_L2_SMAC] = {
.type = VCAP_FIELD_U48,
.offset = 98,
@@ -2397,16 +1884,6 @@ static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
.offset = 3,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 4,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 12,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 13,
@@ -2482,16 +1959,6 @@ static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
.offset = 95,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 96,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 97,
- .width = 2,
- },
[VCAP_KF_IP4_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 99,
@@ -2610,16 +2077,6 @@ static const struct vcap_field es2_ip4_other_keyfield[] = {
.offset = 3,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 4,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 12,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 13,
@@ -2695,16 +2152,6 @@ static const struct vcap_field es2_ip4_other_keyfield[] = {
.offset = 95,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 96,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 97,
- .width = 2,
- },
[VCAP_KF_IP4_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 99,
@@ -2763,16 +2210,6 @@ static const struct vcap_field es2_ip_7tuple_keyfield[] = {
.offset = 0,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 1,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 9,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 10,
@@ -2848,16 +2285,6 @@ static const struct vcap_field es2_ip_7tuple_keyfield[] = {
.offset = 92,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 93,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 94,
- .width = 2,
- },
[VCAP_KF_L2_DMAC] = {
.type = VCAP_FIELD_U48,
.offset = 96,
@@ -2970,6 +2397,124 @@ static const struct vcap_field es2_ip_7tuple_keyfield[] = {
},
};
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 100,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 229,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 237,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 253,
+ .width = 40,
+ },
+};
+
static const struct vcap_field es2_ip4_vid_keyfield[] = {
[VCAP_KF_LOOKUP_FIRST_IS] = {
.type = VCAP_FIELD_BIT,
@@ -3046,7 +2591,7 @@ static const struct vcap_field es2_ip4_vid_keyfield[] = {
.offset = 48,
.width = 1,
},
- [VCAP_KF_MIRROR_ENA] = {
+ [VCAP_KF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 49,
.width = 2,
@@ -3143,26 +2688,11 @@ static const struct vcap_field es2_ip6_vid_keyfield[] = {
/* keyfield_set */
static const struct vcap_set is0_keyfield_set[] = {
- [VCAP_KFS_MLL] = {
- .type_id = 0,
- .sw_per_item = 3,
- .sw_cnt = 4,
- },
- [VCAP_KFS_TRI_VID] = {
- .type_id = 0,
- .sw_per_item = 2,
- .sw_cnt = 6,
- },
[VCAP_KFS_LL_FULL] = {
.type_id = 0,
.sw_per_item = 6,
.sw_cnt = 2,
},
- [VCAP_KFS_NORMAL] = {
- .type_id = 1,
- .sw_per_item = 6,
- .sw_cnt = 2,
- },
[VCAP_KFS_NORMAL_7TUPLE] = {
.type_id = 0,
.sw_per_item = 12,
@@ -3216,11 +2746,6 @@ static const struct vcap_set is2_keyfield_set[] = {
.sw_per_item = 12,
.sw_cnt = 1,
},
- [VCAP_KFS_IP6_VID] = {
- .type_id = 9,
- .sw_per_item = 6,
- .sw_cnt = 2,
- },
};
static const struct vcap_set es2_keyfield_set[] = {
@@ -3249,6 +2774,11 @@ static const struct vcap_set es2_keyfield_set[] = {
.sw_per_item = 12,
.sw_cnt = 1,
},
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
[VCAP_KFS_IP4_VID] = {
.type_id = -1,
.sw_per_item = 3,
@@ -3263,10 +2793,7 @@ static const struct vcap_set es2_keyfield_set[] = {
/* keyfield_set map */
static const struct vcap_field *is0_keyfield_set_map[] = {
- [VCAP_KFS_MLL] = is0_mll_keyfield,
- [VCAP_KFS_TRI_VID] = is0_tri_vid_keyfield,
[VCAP_KFS_LL_FULL] = is0_ll_full_keyfield,
- [VCAP_KFS_NORMAL] = is0_normal_keyfield,
[VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
[VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
[VCAP_KFS_PURE_5TUPLE_IP4] = is0_pure_5tuple_ip4_keyfield,
@@ -3280,7 +2807,6 @@ static const struct vcap_field *is2_keyfield_set_map[] = {
[VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
[VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
[VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
- [VCAP_KFS_IP6_VID] = is2_ip6_vid_keyfield,
};
static const struct vcap_field *es2_keyfield_set_map[] = {
@@ -3289,16 +2815,14 @@ static const struct vcap_field *es2_keyfield_set_map[] = {
[VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
[VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
[VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
[VCAP_KFS_IP4_VID] = es2_ip4_vid_keyfield,
[VCAP_KFS_IP6_VID] = es2_ip6_vid_keyfield,
};
/* keyfield_set map sizes */
static int is0_keyfield_set_map_size[] = {
- [VCAP_KFS_MLL] = ARRAY_SIZE(is0_mll_keyfield),
- [VCAP_KFS_TRI_VID] = ARRAY_SIZE(is0_tri_vid_keyfield),
[VCAP_KFS_LL_FULL] = ARRAY_SIZE(is0_ll_full_keyfield),
- [VCAP_KFS_NORMAL] = ARRAY_SIZE(is0_normal_keyfield),
[VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
[VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
[VCAP_KFS_PURE_5TUPLE_IP4] = ARRAY_SIZE(is0_pure_5tuple_ip4_keyfield),
@@ -3312,7 +2836,6 @@ static int is2_keyfield_set_map_size[] = {
[VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
[VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
[VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
- [VCAP_KFS_IP6_VID] = ARRAY_SIZE(is2_ip6_vid_keyfield),
};
static int es2_keyfield_set_map_size[] = {
@@ -3321,387 +2844,12 @@ static int es2_keyfield_set_map_size[] = {
[VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
[VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
[VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
[VCAP_KFS_IP4_VID] = ARRAY_SIZE(es2_ip4_vid_keyfield),
[VCAP_KFS_IP6_VID] = ARRAY_SIZE(es2_ip6_vid_keyfield),
};
/* actionfields */
-static const struct vcap_field is0_mlbs_actionfield[] = {
- [VCAP_AF_TYPE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 0,
- .width = 1,
- },
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 1,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 2,
- .width = 3,
- },
- [VCAP_AF_QOS_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 5,
- .width = 1,
- },
- [VCAP_AF_QOS_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 6,
- .width = 3,
- },
- [VCAP_AF_DP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 9,
- .width = 1,
- },
- [VCAP_AF_DP_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 10,
- .width = 2,
- },
- [VCAP_AF_MAP_LOOKUP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 12,
- .width = 2,
- },
- [VCAP_AF_MAP_KEY] = {
- .type = VCAP_FIELD_U32,
- .offset = 14,
- .width = 3,
- },
- [VCAP_AF_MAP_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 17,
- .width = 9,
- },
- [VCAP_AF_CLS_VID_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 26,
- .width = 3,
- },
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 29,
- .width = 3,
- },
- [VCAP_AF_VID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 32,
- .width = 13,
- },
- [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 45,
- .width = 1,
- },
- [VCAP_AF_ISDX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 46,
- .width = 12,
- },
- [VCAP_AF_FWD_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 58,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 59,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 60,
- .width = 3,
- },
- [VCAP_AF_OAM_Y1731_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 63,
- .width = 3,
- },
- [VCAP_AF_OAM_TWAMP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 66,
- .width = 1,
- },
- [VCAP_AF_OAM_IP_BFD_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 67,
- .width = 1,
- },
- [VCAP_AF_TC_LABEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 68,
- .width = 3,
- },
- [VCAP_AF_TTL_LABEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 71,
- .width = 3,
- },
- [VCAP_AF_NUM_VLD_LABELS] = {
- .type = VCAP_FIELD_U32,
- .offset = 74,
- .width = 2,
- },
- [VCAP_AF_FWD_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 76,
- .width = 3,
- },
- [VCAP_AF_MPLS_OAM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 79,
- .width = 3,
- },
- [VCAP_AF_MPLS_MEP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 82,
- .width = 1,
- },
- [VCAP_AF_MPLS_MIP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 83,
- .width = 1,
- },
- [VCAP_AF_MPLS_OAM_FLAVOR] = {
- .type = VCAP_FIELD_BIT,
- .offset = 84,
- .width = 1,
- },
- [VCAP_AF_MPLS_IP_CTRL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 85,
- .width = 1,
- },
- [VCAP_AF_PAG_OVERRIDE_MASK] = {
- .type = VCAP_FIELD_U32,
- .offset = 86,
- .width = 8,
- },
- [VCAP_AF_PAG_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 94,
- .width = 8,
- },
- [VCAP_AF_S2_KEY_SEL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 102,
- .width = 1,
- },
- [VCAP_AF_S2_KEY_SEL_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 103,
- .width = 6,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 109,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 111,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT] = {
- .type = VCAP_FIELD_U32,
- .offset = 112,
- .width = 5,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 117,
- .width = 5,
- },
- [VCAP_AF_NXT_NORM_W16_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 122,
- .width = 5,
- },
- [VCAP_AF_NXT_OFFSET_FROM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 127,
- .width = 2,
- },
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 129,
- .width = 2,
- },
- [VCAP_AF_NXT_NORMALIZE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 131,
- .width = 1,
- },
- [VCAP_AF_NXT_IDX_CTRL] = {
- .type = VCAP_FIELD_U32,
- .offset = 132,
- .width = 3,
- },
- [VCAP_AF_NXT_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 135,
- .width = 12,
- },
-};
-
-static const struct vcap_field is0_mlbs_reduced_actionfield[] = {
- [VCAP_AF_TYPE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 0,
- .width = 1,
- },
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 1,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 2,
- .width = 3,
- },
- [VCAP_AF_QOS_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 5,
- .width = 1,
- },
- [VCAP_AF_QOS_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 6,
- .width = 3,
- },
- [VCAP_AF_DP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 9,
- .width = 1,
- },
- [VCAP_AF_DP_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 10,
- .width = 2,
- },
- [VCAP_AF_MAP_LOOKUP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 12,
- .width = 2,
- },
- [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 14,
- .width = 1,
- },
- [VCAP_AF_ISDX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 15,
- .width = 12,
- },
- [VCAP_AF_FWD_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 27,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 28,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 29,
- .width = 3,
- },
- [VCAP_AF_TC_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 32,
- .width = 1,
- },
- [VCAP_AF_TTL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 33,
- .width = 1,
- },
- [VCAP_AF_FWD_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 34,
- .width = 3,
- },
- [VCAP_AF_MPLS_OAM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 37,
- .width = 3,
- },
- [VCAP_AF_MPLS_MEP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 40,
- .width = 1,
- },
- [VCAP_AF_MPLS_MIP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 41,
- .width = 1,
- },
- [VCAP_AF_MPLS_OAM_FLAVOR] = {
- .type = VCAP_FIELD_BIT,
- .offset = 42,
- .width = 1,
- },
- [VCAP_AF_MPLS_IP_CTRL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 43,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 44,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 46,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT_REDUCED] = {
- .type = VCAP_FIELD_U32,
- .offset = 47,
- .width = 3,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 50,
- .width = 5,
- },
- [VCAP_AF_NXT_NORM_W32_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 55,
- .width = 2,
- },
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 57,
- .width = 2,
- },
- [VCAP_AF_NXT_NORMALIZE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 59,
- .width = 1,
- },
- [VCAP_AF_NXT_IDX_CTRL] = {
- .type = VCAP_FIELD_U32,
- .offset = 60,
- .width = 3,
- },
- [VCAP_AF_NXT_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 63,
- .width = 12,
- },
-};
-
static const struct vcap_field is0_classification_actionfield[] = {
[VCAP_AF_TYPE] = {
.type = VCAP_FIELD_BIT,
@@ -3718,16 +2866,6 @@ static const struct vcap_field is0_classification_actionfield[] = {
.offset = 2,
.width = 6,
},
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 8,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 9,
- .width = 3,
- },
[VCAP_AF_QOS_ENA] = {
.type = VCAP_FIELD_BIT,
.offset = 12,
@@ -3788,46 +2926,11 @@ static const struct vcap_field is0_classification_actionfield[] = {
.offset = 39,
.width = 3,
},
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 42,
- .width = 3,
- },
[VCAP_AF_VID_VAL] = {
.type = VCAP_FIELD_U32,
.offset = 45,
.width = 13,
},
- [VCAP_AF_VLAN_POP_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 58,
- .width = 1,
- },
- [VCAP_AF_VLAN_POP_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 59,
- .width = 2,
- },
- [VCAP_AF_VLAN_PUSH_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 61,
- .width = 1,
- },
- [VCAP_AF_VLAN_PUSH_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 62,
- .width = 2,
- },
- [VCAP_AF_TPID_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 64,
- .width = 2,
- },
- [VCAP_AF_VLAN_WAS_TAGGED] = {
- .type = VCAP_FIELD_U32,
- .offset = 66,
- .width = 2,
- },
[VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
.type = VCAP_FIELD_BIT,
.offset = 68,
@@ -3838,71 +2941,6 @@ static const struct vcap_field is0_classification_actionfield[] = {
.offset = 69,
.width = 12,
},
- [VCAP_AF_RT_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 81,
- .width = 2,
- },
- [VCAP_AF_LPM_AFFIX_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 83,
- .width = 1,
- },
- [VCAP_AF_LPM_AFFIX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 84,
- .width = 10,
- },
- [VCAP_AF_RLEG_DMAC_CHK_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 94,
- .width = 1,
- },
- [VCAP_AF_TTL_DECR_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 95,
- .width = 1,
- },
- [VCAP_AF_L3_MAC_UPDATE_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 96,
- .width = 1,
- },
- [VCAP_AF_FWD_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 97,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 98,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 99,
- .width = 3,
- },
- [VCAP_AF_MIP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 102,
- .width = 2,
- },
- [VCAP_AF_OAM_Y1731_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 104,
- .width = 3,
- },
- [VCAP_AF_OAM_TWAMP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 107,
- .width = 1,
- },
- [VCAP_AF_OAM_IP_BFD_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 108,
- .width = 1,
- },
[VCAP_AF_PAG_OVERRIDE_MASK] = {
.type = VCAP_FIELD_U32,
.offset = 109,
@@ -3913,76 +2951,6 @@ static const struct vcap_field is0_classification_actionfield[] = {
.offset = 117,
.width = 8,
},
- [VCAP_AF_S2_KEY_SEL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 125,
- .width = 1,
- },
- [VCAP_AF_S2_KEY_SEL_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 126,
- .width = 6,
- },
- [VCAP_AF_INJ_MASQ_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 132,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_PORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 133,
- .width = 7,
- },
- [VCAP_AF_LPORT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 140,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_LPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 141,
- .width = 7,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 148,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 150,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT] = {
- .type = VCAP_FIELD_U32,
- .offset = 151,
- .width = 5,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 156,
- .width = 5,
- },
- [VCAP_AF_NXT_NORM_W16_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 161,
- .width = 5,
- },
- [VCAP_AF_NXT_OFFSET_FROM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 166,
- .width = 2,
- },
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 168,
- .width = 2,
- },
- [VCAP_AF_NXT_NORMALIZE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 170,
- .width = 1,
- },
[VCAP_AF_NXT_IDX_CTRL] = {
.type = VCAP_FIELD_U32,
.offset = 171,
@@ -4006,16 +2974,6 @@ static const struct vcap_field is0_full_actionfield[] = {
.offset = 1,
.width = 6,
},
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 7,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 8,
- .width = 3,
- },
[VCAP_AF_QOS_ENA] = {
.type = VCAP_FIELD_BIT,
.offset = 11,
@@ -4076,46 +3034,11 @@ static const struct vcap_field is0_full_actionfield[] = {
.offset = 38,
.width = 3,
},
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 41,
- .width = 3,
- },
[VCAP_AF_VID_VAL] = {
.type = VCAP_FIELD_U32,
.offset = 44,
.width = 13,
},
- [VCAP_AF_VLAN_POP_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 57,
- .width = 1,
- },
- [VCAP_AF_VLAN_POP_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 58,
- .width = 2,
- },
- [VCAP_AF_VLAN_PUSH_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 60,
- .width = 1,
- },
- [VCAP_AF_VLAN_PUSH_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 61,
- .width = 2,
- },
- [VCAP_AF_TPID_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 63,
- .width = 2,
- },
- [VCAP_AF_VLAN_WAS_TAGGED] = {
- .type = VCAP_FIELD_U32,
- .offset = 65,
- .width = 2,
- },
[VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
.type = VCAP_FIELD_BIT,
.offset = 67,
@@ -4136,126 +3059,6 @@ static const struct vcap_field is0_full_actionfield[] = {
.offset = 83,
.width = 65,
},
- [VCAP_AF_RT_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 148,
- .width = 2,
- },
- [VCAP_AF_LPM_AFFIX_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 150,
- .width = 1,
- },
- [VCAP_AF_LPM_AFFIX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 151,
- .width = 10,
- },
- [VCAP_AF_RLEG_DMAC_CHK_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 161,
- .width = 1,
- },
- [VCAP_AF_TTL_DECR_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 162,
- .width = 1,
- },
- [VCAP_AF_L3_MAC_UPDATE_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 163,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 164,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 165,
- .width = 3,
- },
- [VCAP_AF_MIP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 168,
- .width = 2,
- },
- [VCAP_AF_OAM_Y1731_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 170,
- .width = 3,
- },
- [VCAP_AF_OAM_TWAMP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 173,
- .width = 1,
- },
- [VCAP_AF_OAM_IP_BFD_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 174,
- .width = 1,
- },
- [VCAP_AF_RSVD_LBL_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 175,
- .width = 4,
- },
- [VCAP_AF_TC_LABEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 179,
- .width = 3,
- },
- [VCAP_AF_TTL_LABEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 182,
- .width = 3,
- },
- [VCAP_AF_NUM_VLD_LABELS] = {
- .type = VCAP_FIELD_U32,
- .offset = 185,
- .width = 2,
- },
- [VCAP_AF_FWD_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 187,
- .width = 3,
- },
- [VCAP_AF_MPLS_OAM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 190,
- .width = 3,
- },
- [VCAP_AF_MPLS_MEP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 193,
- .width = 1,
- },
- [VCAP_AF_MPLS_MIP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 194,
- .width = 1,
- },
- [VCAP_AF_MPLS_OAM_FLAVOR] = {
- .type = VCAP_FIELD_BIT,
- .offset = 195,
- .width = 1,
- },
- [VCAP_AF_MPLS_IP_CTRL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 196,
- .width = 1,
- },
- [VCAP_AF_CUSTOM_ACE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 197,
- .width = 5,
- },
- [VCAP_AF_CUSTOM_ACE_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 202,
- .width = 2,
- },
[VCAP_AF_PAG_OVERRIDE_MASK] = {
.type = VCAP_FIELD_U32,
.offset = 204,
@@ -4266,86 +3069,6 @@ static const struct vcap_field is0_full_actionfield[] = {
.offset = 212,
.width = 8,
},
- [VCAP_AF_S2_KEY_SEL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 220,
- .width = 1,
- },
- [VCAP_AF_S2_KEY_SEL_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 221,
- .width = 6,
- },
- [VCAP_AF_INJ_MASQ_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 227,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_PORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 228,
- .width = 7,
- },
- [VCAP_AF_LPORT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 235,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_LPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 236,
- .width = 7,
- },
- [VCAP_AF_MATCH_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 243,
- .width = 16,
- },
- [VCAP_AF_MATCH_ID_MASK] = {
- .type = VCAP_FIELD_U32,
- .offset = 259,
- .width = 16,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 275,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 277,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT] = {
- .type = VCAP_FIELD_U32,
- .offset = 278,
- .width = 5,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 283,
- .width = 5,
- },
- [VCAP_AF_NXT_NORM_W16_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 288,
- .width = 5,
- },
- [VCAP_AF_NXT_OFFSET_FROM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 293,
- .width = 2,
- },
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 295,
- .width = 2,
- },
- [VCAP_AF_NXT_NORMALIZE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 297,
- .width = 1,
- },
[VCAP_AF_NXT_IDX_CTRL] = {
.type = VCAP_FIELD_U32,
.offset = 298,
@@ -4364,16 +3087,6 @@ static const struct vcap_field is0_class_reduced_actionfield[] = {
.offset = 0,
.width = 1,
},
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 1,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 2,
- .width = 3,
- },
[VCAP_AF_QOS_ENA] = {
.type = VCAP_FIELD_BIT,
.offset = 5,
@@ -4409,46 +3122,11 @@ static const struct vcap_field is0_class_reduced_actionfield[] = {
.offset = 17,
.width = 3,
},
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 20,
- .width = 3,
- },
[VCAP_AF_VID_VAL] = {
.type = VCAP_FIELD_U32,
.offset = 23,
.width = 13,
},
- [VCAP_AF_VLAN_POP_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 36,
- .width = 1,
- },
- [VCAP_AF_VLAN_POP_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 37,
- .width = 2,
- },
- [VCAP_AF_VLAN_PUSH_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 39,
- .width = 1,
- },
- [VCAP_AF_VLAN_PUSH_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 40,
- .width = 2,
- },
- [VCAP_AF_TPID_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 42,
- .width = 2,
- },
- [VCAP_AF_VLAN_WAS_TAGGED] = {
- .type = VCAP_FIELD_U32,
- .offset = 44,
- .width = 2,
- },
[VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
.type = VCAP_FIELD_BIT,
.offset = 46,
@@ -4459,61 +3137,6 @@ static const struct vcap_field is0_class_reduced_actionfield[] = {
.offset = 47,
.width = 12,
},
- [VCAP_AF_FWD_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 59,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 60,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 61,
- .width = 3,
- },
- [VCAP_AF_MIP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 64,
- .width = 2,
- },
- [VCAP_AF_OAM_Y1731_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 66,
- .width = 3,
- },
- [VCAP_AF_LPORT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 69,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_LPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 70,
- .width = 7,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 77,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 79,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT] = {
- .type = VCAP_FIELD_U32,
- .offset = 80,
- .width = 5,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 85,
- .width = 5,
- },
[VCAP_AF_NXT_IDX_CTRL] = {
.type = VCAP_FIELD_U32,
.offset = 90,
@@ -4527,11 +3150,6 @@ static const struct vcap_field is0_class_reduced_actionfield[] = {
};
static const struct vcap_field is2_base_type_actionfield[] = {
- [VCAP_AF_IS_INNER_ACL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 0,
- .width = 1,
- },
[VCAP_AF_PIPELINE_FORCE_ENA] = {
.type = VCAP_FIELD_BIT,
.offset = 1,
@@ -4562,11 +3180,6 @@ static const struct vcap_field is2_base_type_actionfield[] = {
.offset = 10,
.width = 3,
},
- [VCAP_AF_CPU_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 13,
- .width = 1,
- },
[VCAP_AF_LRN_DIS] = {
.type = VCAP_FIELD_BIT,
.offset = 14,
@@ -4592,11 +3205,6 @@ static const struct vcap_field is2_base_type_actionfield[] = {
.offset = 23,
.width = 1,
},
- [VCAP_AF_DLB_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 24,
- .width = 3,
- },
[VCAP_AF_MASK_MODE] = {
.type = VCAP_FIELD_U32,
.offset = 27,
@@ -4607,51 +3215,11 @@ static const struct vcap_field is2_base_type_actionfield[] = {
.offset = 30,
.width = 68,
},
- [VCAP_AF_RSDX_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 98,
- .width = 1,
- },
- [VCAP_AF_RSDX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 99,
- .width = 12,
- },
[VCAP_AF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 111,
.width = 2,
},
- [VCAP_AF_REW_CMD] = {
- .type = VCAP_FIELD_U32,
- .offset = 113,
- .width = 11,
- },
- [VCAP_AF_TTL_UPDATE_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 124,
- .width = 1,
- },
- [VCAP_AF_SAM_SEQ_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 125,
- .width = 1,
- },
- [VCAP_AF_TCP_UDP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 126,
- .width = 1,
- },
- [VCAP_AF_TCP_UDP_DPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 127,
- .width = 16,
- },
- [VCAP_AF_TCP_UDP_SPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 143,
- .width = 16,
- },
[VCAP_AF_MATCH_ID] = {
.type = VCAP_FIELD_U32,
.offset = 159,
@@ -4667,56 +3235,6 @@ static const struct vcap_field is2_base_type_actionfield[] = {
.offset = 191,
.width = 12,
},
- [VCAP_AF_SWAP_MAC_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 203,
- .width = 1,
- },
- [VCAP_AF_ACL_RT_MODE] = {
- .type = VCAP_FIELD_U32,
- .offset = 204,
- .width = 4,
- },
- [VCAP_AF_ACL_MAC] = {
- .type = VCAP_FIELD_U48,
- .offset = 208,
- .width = 48,
- },
- [VCAP_AF_DMAC_OFFSET_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 256,
- .width = 1,
- },
- [VCAP_AF_PTP_MASTER_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 257,
- .width = 2,
- },
- [VCAP_AF_LOG_MSG_INTERVAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 259,
- .width = 4,
- },
- [VCAP_AF_SIP_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 263,
- .width = 5,
- },
- [VCAP_AF_RLEG_STAT_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 268,
- .width = 3,
- },
- [VCAP_AF_IGR_ACL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 271,
- .width = 1,
- },
- [VCAP_AF_EGR_ACL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 272,
- .width = 1,
- },
};
static const struct vcap_field es2_base_type_actionfield[] = {
@@ -4794,16 +3312,6 @@ static const struct vcap_field es2_base_type_actionfield[] = {
/* actionfield_set */
static const struct vcap_set is0_actionfield_set[] = {
- [VCAP_AFS_MLBS] = {
- .type_id = 0,
- .sw_per_item = 2,
- .sw_cnt = 6,
- },
- [VCAP_AFS_MLBS_REDUCED] = {
- .type_id = 0,
- .sw_per_item = 1,
- .sw_cnt = 12,
- },
[VCAP_AFS_CLASSIFICATION] = {
.type_id = 1,
.sw_per_item = 2,
@@ -4839,8 +3347,6 @@ static const struct vcap_set es2_actionfield_set[] = {
/* actionfield_set map */
static const struct vcap_field *is0_actionfield_set_map[] = {
- [VCAP_AFS_MLBS] = is0_mlbs_actionfield,
- [VCAP_AFS_MLBS_REDUCED] = is0_mlbs_reduced_actionfield,
[VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
[VCAP_AFS_FULL] = is0_full_actionfield,
[VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
@@ -4856,8 +3362,6 @@ static const struct vcap_field *es2_actionfield_set_map[] = {
/* actionfield_set map size */
static int is0_actionfield_set_map_size[] = {
- [VCAP_AFS_MLBS] = ARRAY_SIZE(is0_mlbs_actionfield),
- [VCAP_AFS_MLBS_REDUCED] = ARRAY_SIZE(is0_mlbs_reduced_actionfield),
[VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
[VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
[VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
@@ -5244,17 +3748,22 @@ static const char * const vcap_keyfield_set_names[] = {
[VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
[VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
[VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
[VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
[VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
[VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
[VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
[VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
- [VCAP_KFS_MLL] = "VCAP_KFS_MLL",
- [VCAP_KFS_NORMAL] = "VCAP_KFS_NORMAL",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
[VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
[VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
[VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
- [VCAP_KFS_TRI_VID] = "VCAP_KFS_TRI_VID",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
};
/* Actionfieldset names */
@@ -5263,9 +3772,9 @@ static const char * const vcap_actionfield_set_names[] = {
[VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
[VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
[VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
[VCAP_AFS_FULL] = "VCAP_AFS_FULL",
- [VCAP_AFS_MLBS] = "VCAP_AFS_MLBS",
- [VCAP_AFS_MLBS_REDUCED] = "VCAP_AFS_MLBS_REDUCED",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
};
/* Keyfield names */
@@ -5285,6 +3794,7 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
[VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
[VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
[VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
[VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
[VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
@@ -5303,13 +3813,13 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
[VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
[VCAP_KF_COSID_CLS] = "COSID_CLS",
- [VCAP_KF_DST_ENTRY] = "DST_ENTRY",
[VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
[VCAP_KF_ETYPE] = "ETYPE",
[VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
- [VCAP_KF_ETYPE_MPLS] = "ETYPE_MPLS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
[VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
[VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
[VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
[VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
[VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
@@ -5324,17 +3834,24 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
[VCAP_KF_L2_BC_IS] = "L2_BC_IS",
[VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
[VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
[VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
[VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
[VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
[VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
- [VCAP_KF_L3_DMAC_DIP_MATCH] = "L3_DMAC_DIP_MATCH",
[VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
[VCAP_KF_L3_DSCP] = "L3_DSCP",
[VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
[VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
[VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
[VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
[VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
[VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
@@ -5343,9 +3860,10 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
[VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
[VCAP_KF_L3_RT_IS] = "L3_RT_IS",
- [VCAP_KF_L3_SMAC_SIP_MATCH] = "L3_SMAC_SIP_MATCH",
[VCAP_KF_L3_TOS] = "L3_TOS",
[VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
[VCAP_KF_L4_ACK] = "L4_ACK",
[VCAP_KF_L4_DPORT] = "L4_DPORT",
[VCAP_KF_L4_FIN] = "L4_FIN",
@@ -5362,9 +3880,14 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
[VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
[VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
- [VCAP_KF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
[VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
[VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
[VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
[VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
[VCAP_KF_TCP_IS] = "TCP_IS",
@@ -5375,50 +3898,37 @@ static const char * const vcap_keyfield_names[] = {
/* Actionfield names */
static const char * const vcap_actionfield_names[] = {
[VCAP_AF_NO_VALUE] = "(None)",
- [VCAP_AF_ACL_MAC] = "ACL_MAC",
- [VCAP_AF_ACL_RT_MODE] = "ACL_RT_MODE",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
[VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
[VCAP_AF_CNT_ID] = "CNT_ID",
[VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
[VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
- [VCAP_AF_COSID_ENA] = "COSID_ENA",
- [VCAP_AF_COSID_VAL] = "COSID_VAL",
[VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
- [VCAP_AF_CPU_DIS] = "CPU_DIS",
- [VCAP_AF_CPU_ENA] = "CPU_ENA",
- [VCAP_AF_CPU_Q] = "CPU_Q",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
[VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
- [VCAP_AF_CUSTOM_ACE_ENA] = "CUSTOM_ACE_ENA",
- [VCAP_AF_CUSTOM_ACE_OFFSET] = "CUSTOM_ACE_OFFSET",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
[VCAP_AF_DEI_ENA] = "DEI_ENA",
[VCAP_AF_DEI_VAL] = "DEI_VAL",
- [VCAP_AF_DLB_OFFSET] = "DLB_OFFSET",
- [VCAP_AF_DMAC_OFFSET_ENA] = "DMAC_OFFSET_ENA",
[VCAP_AF_DP_ENA] = "DP_ENA",
[VCAP_AF_DP_VAL] = "DP_VAL",
[VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
[VCAP_AF_DSCP_VAL] = "DSCP_VAL",
- [VCAP_AF_EGR_ACL_ENA] = "EGR_ACL_ENA",
[VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
- [VCAP_AF_FWD_DIS] = "FWD_DIS",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
[VCAP_AF_FWD_MODE] = "FWD_MODE",
- [VCAP_AF_FWD_TYPE] = "FWD_TYPE",
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = "GVID_ADD_REPLACE_SEL",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
[VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
[VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
- [VCAP_AF_IGR_ACL_ENA] = "IGR_ACL_ENA",
- [VCAP_AF_INJ_MASQ_ENA] = "INJ_MASQ_ENA",
- [VCAP_AF_INJ_MASQ_LPORT] = "INJ_MASQ_LPORT",
- [VCAP_AF_INJ_MASQ_PORT] = "INJ_MASQ_PORT",
[VCAP_AF_INTR_ENA] = "INTR_ENA",
[VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
[VCAP_AF_ISDX_VAL] = "ISDX_VAL",
- [VCAP_AF_IS_INNER_ACL] = "IS_INNER_ACL",
- [VCAP_AF_L3_MAC_UPDATE_DIS] = "L3_MAC_UPDATE_DIS",
- [VCAP_AF_LOG_MSG_INTERVAL] = "LOG_MSG_INTERVAL",
- [VCAP_AF_LPM_AFFIX_ENA] = "LPM_AFFIX_ENA",
- [VCAP_AF_LPM_AFFIX_VAL] = "LPM_AFFIX_VAL",
- [VCAP_AF_LPORT_ENA] = "LPORT_ENA",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
[VCAP_AF_LRN_DIS] = "LRN_DIS",
[VCAP_AF_MAP_IDX] = "MAP_IDX",
[VCAP_AF_MAP_KEY] = "MAP_KEY",
@@ -5426,71 +3936,53 @@ static const char * const vcap_actionfield_names[] = {
[VCAP_AF_MASK_MODE] = "MASK_MODE",
[VCAP_AF_MATCH_ID] = "MATCH_ID",
[VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
- [VCAP_AF_MIP_SEL] = "MIP_SEL",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
[VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
[VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
- [VCAP_AF_MPLS_IP_CTRL_ENA] = "MPLS_IP_CTRL_ENA",
- [VCAP_AF_MPLS_MEP_ENA] = "MPLS_MEP_ENA",
- [VCAP_AF_MPLS_MIP_ENA] = "MPLS_MIP_ENA",
- [VCAP_AF_MPLS_OAM_FLAVOR] = "MPLS_OAM_FLAVOR",
- [VCAP_AF_MPLS_OAM_TYPE] = "MPLS_OAM_TYPE",
- [VCAP_AF_NUM_VLD_LABELS] = "NUM_VLD_LABELS",
[VCAP_AF_NXT_IDX] = "NXT_IDX",
[VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
- [VCAP_AF_NXT_KEY_TYPE] = "NXT_KEY_TYPE",
- [VCAP_AF_NXT_NORMALIZE] = "NXT_NORMALIZE",
- [VCAP_AF_NXT_NORM_W16_OFFSET] = "NXT_NORM_W16_OFFSET",
- [VCAP_AF_NXT_NORM_W32_OFFSET] = "NXT_NORM_W32_OFFSET",
- [VCAP_AF_NXT_OFFSET_FROM_TYPE] = "NXT_OFFSET_FROM_TYPE",
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = "NXT_TYPE_AFTER_OFFSET",
- [VCAP_AF_OAM_IP_BFD_ENA] = "OAM_IP_BFD_ENA",
- [VCAP_AF_OAM_TWAMP_ENA] = "OAM_TWAMP_ENA",
- [VCAP_AF_OAM_Y1731_SEL] = "OAM_Y1731_SEL",
[VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
[VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
[VCAP_AF_PCP_ENA] = "PCP_ENA",
[VCAP_AF_PCP_VAL] = "PCP_VAL",
- [VCAP_AF_PIPELINE_ACT_SEL] = "PIPELINE_ACT_SEL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
[VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
[VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
- [VCAP_AF_PIPELINE_PT_REDUCED] = "PIPELINE_PT_REDUCED",
[VCAP_AF_POLICE_ENA] = "POLICE_ENA",
[VCAP_AF_POLICE_IDX] = "POLICE_IDX",
[VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
[VCAP_AF_PORT_MASK] = "PORT_MASK",
- [VCAP_AF_PTP_MASTER_SEL] = "PTP_MASTER_SEL",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
[VCAP_AF_QOS_ENA] = "QOS_ENA",
[VCAP_AF_QOS_VAL] = "QOS_VAL",
- [VCAP_AF_REW_CMD] = "REW_CMD",
- [VCAP_AF_RLEG_DMAC_CHK_DIS] = "RLEG_DMAC_CHK_DIS",
- [VCAP_AF_RLEG_STAT_IDX] = "RLEG_STAT_IDX",
- [VCAP_AF_RSDX_ENA] = "RSDX_ENA",
- [VCAP_AF_RSDX_VAL] = "RSDX_VAL",
- [VCAP_AF_RSVD_LBL_VAL] = "RSVD_LBL_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
[VCAP_AF_RT_DIS] = "RT_DIS",
- [VCAP_AF_RT_SEL] = "RT_SEL",
- [VCAP_AF_S2_KEY_SEL_ENA] = "S2_KEY_SEL_ENA",
- [VCAP_AF_S2_KEY_SEL_IDX] = "S2_KEY_SEL_IDX",
- [VCAP_AF_SAM_SEQ_ENA] = "SAM_SEQ_ENA",
- [VCAP_AF_SIP_IDX] = "SIP_IDX",
- [VCAP_AF_SWAP_MAC_ENA] = "SWAP_MAC_ENA",
- [VCAP_AF_TCP_UDP_DPORT] = "TCP_UDP_DPORT",
- [VCAP_AF_TCP_UDP_ENA] = "TCP_UDP_ENA",
- [VCAP_AF_TCP_UDP_SPORT] = "TCP_UDP_SPORT",
- [VCAP_AF_TC_ENA] = "TC_ENA",
- [VCAP_AF_TC_LABEL] = "TC_LABEL",
- [VCAP_AF_TPID_SEL] = "TPID_SEL",
- [VCAP_AF_TTL_DECR_DIS] = "TTL_DECR_DIS",
- [VCAP_AF_TTL_ENA] = "TTL_ENA",
- [VCAP_AF_TTL_LABEL] = "TTL_LABEL",
- [VCAP_AF_TTL_UPDATE_ENA] = "TTL_UPDATE_ENA",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
[VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
[VCAP_AF_VID_VAL] = "VID_VAL",
- [VCAP_AF_VLAN_POP_CNT] = "VLAN_POP_CNT",
- [VCAP_AF_VLAN_POP_CNT_ENA] = "VLAN_POP_CNT_ENA",
- [VCAP_AF_VLAN_PUSH_CNT] = "VLAN_PUSH_CNT",
- [VCAP_AF_VLAN_PUSH_CNT_ENA] = "VLAN_PUSH_CNT_ENA",
- [VCAP_AF_VLAN_WAS_TAGGED] = "VLAN_WAS_TAGGED",
};
/* VCAPs */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
index b5a74f0eef9b..55762f24e196 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
@@ -1,10 +1,18 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
* Microchip VCAP test model interface for kunit testing
*/
+/* This file is autogenerated by cml-utils 2023-02-10 11:16:00 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
+ */
+
#ifndef __VCAP_MODEL_KUNIT_H__
#define __VCAP_MODEL_KUNIT_H__
+
+/* VCAPs */
extern const struct vcap_info kunit_test_vcaps[];
extern const struct vcap_statistics kunit_test_vcap_stats;
+
#endif /* __VCAP_MODEL_KUNIT_H__ */
+
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.c b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
new file mode 100644
index 000000000000..09abe7944af6
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP TC
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/flow_offload.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+
+#include "vcap_api_client.h"
+#include "vcap_tc.h"
+
+enum vcap_is2_arp_opcode {
+ VCAP_IS2_ARP_REQUEST,
+ VCAP_IS2_ARP_REPLY,
+ VCAP_IS2_RARP_REQUEST,
+ VCAP_IS2_RARP_REPLY,
+};
+
+enum vcap_arp_opcode {
+ VCAP_ARP_OP_RESERVED,
+ VCAP_ARP_OP_REQUEST,
+ VCAP_ARP_OP_REPLY,
+};
+
+int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
+ enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
+ struct flow_match_eth_addrs match;
+ struct vcap_u48_key smac, dmac;
+ int err = 0;
+
+ flow_rule_match_eth_addrs(st->frule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
+ vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
+ if (err)
+ goto out;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
+ vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ethaddr_usage);
+
+int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IP) {
+ struct flow_match_ipv4_addrs mt;
+
+ flow_rule_match_ipv4_addrs(st->frule, &mt);
+ if (mt.mask->src) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_SIP,
+ be32_to_cpu(mt.key->src),
+ be32_to_cpu(mt.mask->src));
+ if (err)
+ goto out;
+ }
+ if (mt.mask->dst) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_DIP,
+ be32_to_cpu(mt.key->dst),
+ be32_to_cpu(mt.mask->dst));
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv4_usage);
+
+int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IPV6) {
+ struct flow_match_ipv6_addrs mt;
+ struct vcap_u128_key sip;
+ struct vcap_u128_key dip;
+
+ flow_rule_match_ipv6_addrs(st->frule, &mt);
+ /* Check if address masks are non-zero */
+ if (!ipv6_addr_any(&mt.mask->src)) {
+ vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
+ vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_SIP, &sip);
+ if (err)
+ goto out;
+ }
+ if (!ipv6_addr_any(&mt.mask->dst)) {
+ vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
+ vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_DIP, &dip);
+ if (err)
+ goto out;
+ }
+ }
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv6_usage);
+
+int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_ports mt;
+ u16 value, mask;
+ int err = 0;
+
+ flow_rule_match_ports(st->frule, &mt);
+
+ if (mt.mask->src) {
+ value = be16_to_cpu(mt.key->src);
+ mask = be16_to_cpu(mt.mask->src);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->dst) {
+ value = be16_to_cpu(mt.key->dst);
+ mask = be16_to_cpu(mt.mask->dst);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_portnum_usage);
+
+int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0;
+ struct flow_match_vlan mt;
+ u16 tpid;
+ int err;
+
+ flow_rule_match_cvlan(st->frule, &mt);
+
+ tpid = be16_to_cpu(mt.key->vlan_tpid);
+
+ if (tpid == ETH_P_8021Q) {
+ vid_key = VCAP_KF_8021Q_VID1;
+ pcp_key = VCAP_KF_8021Q_PCP1;
+ }
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
+
+ return 0;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_cvlan_usage);
+
+int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
+ enum vcap_key_field vid_key,
+ enum vcap_key_field pcp_key)
+{
+ struct flow_match_vlan mt;
+ int err;
+
+ flow_rule_match_vlan(st->frule, &mt);
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_tpid)
+ st->tpid = be16_to_cpu(mt.key->vlan_tpid);
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
+
+ return 0;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_vlan_usage);
+
+int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_tcp mt;
+ u16 tcp_flags_mask;
+ u16 tcp_flags_key;
+ enum vcap_bit val;
+ int err = 0;
+
+ flow_rule_match_tcp(st->frule, &mt);
+ tcp_flags_key = be16_to_cpu(mt.key->flags);
+ tcp_flags_mask = be16_to_cpu(mt.mask->flags);
+
+ if (tcp_flags_mask & TCPHDR_FIN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_FIN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_SYN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_SYN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_RST) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_RST)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_PSH) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_PSH)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_ACK) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_ACK)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_URG) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_URG)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_tcp_usage);
+
+int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_arp mt;
+ u16 value, mask;
+ u32 ipval, ipmsk;
+ int err;
+
+ flow_rule_match_arp(st->frule, &mt);
+
+ if (mt.mask->op) {
+ mask = 0x3;
+ if (st->l3_proto == ETH_P_ARP) {
+ value = mt.key->op == VCAP_ARP_OP_REQUEST ?
+ VCAP_IS2_ARP_REQUEST :
+ VCAP_IS2_ARP_REPLY;
+ } else { /* RARP */
+ value = mt.key->op == VCAP_ARP_OP_REQUEST ?
+ VCAP_IS2_RARP_REQUEST :
+ VCAP_IS2_RARP_REPLY;
+ }
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
+ value, mask);
+ if (err)
+ goto out;
+ }
+
+ /* The IS2 ARP keyset does not support ARP hardware addresses */
+ if (!is_zero_ether_addr(mt.mask->sha) ||
+ !is_zero_ether_addr(mt.mask->tha)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mt.mask->sip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->sip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->tip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->tip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
+
+ return 0;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_arp_usage);
+
+int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_ip mt;
+ int err = 0;
+
+ flow_rule_match_ip(st->frule, &mt);
+
+ if (mt.mask->tos) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
+ mt.key->tos,
+ mt.mask->tos);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ip_usage);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.h b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
new file mode 100644
index 000000000000..071f892f9aa4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP TC
+ */
+
+#ifndef __VCAP_TC__
+#define __VCAP_TC__
+
+struct vcap_tc_flower_parse_usage {
+ struct flow_cls_offload *fco;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ struct vcap_admin *admin;
+ u16 l3_proto;
+ u8 l4_proto;
+ u16 tpid;
+ unsigned int used_keys;
+};
+
+int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
+ enum vcap_key_field vid_key,
+ enum vcap_key_field pcp_key);
+int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st);
+
+#endif /* __VCAP_TC__ */
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 2f6a048dee90..6120f2b6684f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2160,6 +2160,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->hw_features |= NETIF_F_RXHASH;
ndev->features = ndev->hw_features;
ndev->vlan_features = 0;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index 8dd8c7f425d2..81e605691bb8 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -13,6 +13,7 @@ if NET_VENDOR_MICROSEMI
# Users should depend on NET_SWITCHDEV, HAS_IOMEM, BRIDGE
config MSCC_OCELOT_SWITCH_LIB
+ depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select REGMAP_MMIO
select PACKING
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 5d435a565d4c..16987b72dfc0 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -5,6 +5,7 @@ mscc_ocelot_switch_lib-y := \
ocelot_devlink.o \
ocelot_flower.o \
ocelot_io.o \
+ ocelot_mm.o \
ocelot_police.o \
ocelot_ptp.o \
ocelot_stats.o \
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index da56f9bfeaf0..08acb7b89086 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -6,12 +6,16 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
-#define TABLE_UPDATE_SLEEP_US 10
-#define TABLE_UPDATE_TIMEOUT_US 100000
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+#define MEM_INIT_SLEEP_US 1000
+#define MEM_INIT_TIMEOUT_US 100000
+
#define OCELOT_RSV_VLAN_RANGE_START 4000
struct ocelot_mact_entry {
@@ -2713,6 +2717,46 @@ static void ocelot_detect_features(struct ocelot *ocelot)
ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
}
+static int ocelot_mem_init_status(struct ocelot *ocelot)
+{
+ unsigned int val;
+ int err;
+
+ err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+
+ return err ?: val;
+}
+
+int ocelot_reset(struct ocelot *ocelot)
+{
+ int err;
+ u32 val;
+
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ if (err)
+ return err;
+
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
+
+ /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
+ * 100us) before enabling the switch core.
+ */
+ err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
+ MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
+ if (err)
+ return err;
+
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
+
+ return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+}
+EXPORT_SYMBOL(ocelot_reset);
+
int ocelot_init(struct ocelot *ocelot)
{
int i, ret;
@@ -2738,10 +2782,8 @@ int ocelot_init(struct ocelot *ocelot)
return -ENOMEM;
ret = ocelot_stats_init(ocelot);
- if (ret) {
- destroy_workqueue(ocelot->owq);
- return ret;
- }
+ if (ret)
+ goto err_stats_init;
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
@@ -2756,6 +2798,12 @@ int ocelot_init(struct ocelot *ocelot)
if (ocelot->ops->psfp_init)
ocelot->ops->psfp_init(ocelot);
+ if (ocelot->mm_supported) {
+ ret = ocelot_mm_init(ocelot);
+ if (ret)
+ goto err_mm_init;
+ }
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
@@ -2853,6 +2901,12 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG, i);
return 0;
+
+err_mm_init:
+ ocelot_stats_deinit(ocelot);
+err_stats_init:
+ destroy_workqueue(ocelot->owq);
+ return ret;
}
EXPORT_SYMBOL(ocelot_init);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 70dbd9c4e512..e9a0179448bf 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -109,6 +109,8 @@ void ocelot_mirror_put(struct ocelot *ocelot);
int ocelot_stats_init(struct ocelot *ocelot);
void ocelot_stats_deinit(struct ocelot *ocelot);
+int ocelot_mm_init(struct ocelot *ocelot);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
index b8737efd2a85..d9ea75a14f2f 100644
--- a/drivers/net/ethernet/mscc/ocelot_devlink.c
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -487,6 +487,37 @@ static void ocelot_watermark_init(struct ocelot *ocelot)
ocelot_setup_sharing_watermarks(ocelot);
}
+/* Watermark encode
+ * Bit 8: Unit; 0:1, 1:16
+ * Bit 7-0: Value to be multiplied with unit
+ */
+u16 ocelot_wm_enc(u16 value)
+{
+ WARN_ON(value >= 16 * BIT(8));
+
+ if (value >= BIT(8))
+ return BIT(8) | (value / 16);
+
+ return value;
+}
+EXPORT_SYMBOL(ocelot_wm_enc);
+
+u16 ocelot_wm_dec(u16 wm)
+{
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+EXPORT_SYMBOL(ocelot_wm_dec);
+
+void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+EXPORT_SYMBOL(ocelot_wm_stat);
+
/* Pool size and type are fixed up at runtime. Keeping this structure to
* look up the cell size multipliers.
*/
diff --git a/drivers/net/ethernet/mscc/ocelot_mm.c b/drivers/net/ethernet/mscc/ocelot_mm.c
new file mode 100644
index 000000000000..0a8f21ae23f0
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_mm.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Hardware library for MAC Merge Layer and Frame Preemption on TSN-capable
+ * switches (VSC9959)
+ *
+ * Copyright 2022-2023 NXP
+ */
+#include <linux/ethtool.h>
+#include <soc/mscc/ocelot.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_qsys.h>
+
+#include "ocelot.h"
+
+static const char *
+mm_verify_state_to_string(enum ethtool_mm_verify_status state)
+{
+ switch (state) {
+ case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ return "INITIAL";
+ case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ return "VERIFYING";
+ case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ return "SUCCEEDED";
+ case ETHTOOL_MM_VERIFY_STATUS_FAILED:
+ return "FAILED";
+ case ETHTOOL_MM_VERIFY_STATUS_DISABLED:
+ return "DISABLED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static enum ethtool_mm_verify_status ocelot_mm_verify_status(u32 val)
+{
+ switch (DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_X(val)) {
+ case 0:
+ return ETHTOOL_MM_VERIFY_STATUS_INITIAL;
+ case 1:
+ return ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ case 2:
+ return ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+ case 3:
+ return ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ case 4:
+ return ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ default:
+ return ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
+ }
+}
+
+void ocelot_port_mm_irq(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_mm_state *mm = &ocelot->mm[port];
+ enum ethtool_mm_verify_status verify_status;
+ u32 val;
+
+ mutex_lock(&mm->lock);
+
+ val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS);
+
+ verify_status = ocelot_mm_verify_status(val);
+ if (mm->verify_status != verify_status) {
+ dev_dbg(ocelot->dev,
+ "Port %d MAC Merge verification state %s\n",
+ port, mm_verify_state_to_string(verify_status));
+ mm->verify_status = verify_status;
+ }
+
+ if (val & DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY) {
+ mm->tx_active = !!(val & DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STATUS);
+
+ dev_dbg(ocelot->dev, "Port %d TX preemption %s\n",
+ port, mm->tx_active ? "active" : "inactive");
+ }
+
+ if (val & DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY) {
+ dev_err(ocelot->dev,
+ "Unexpected P-frame received on port %d while verification was unsuccessful or not yet verified\n",
+ port);
+ }
+
+ if (val & DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY) {
+ dev_err(ocelot->dev,
+ "Unexpected P-frame requested to be transmitted on port %d while verification was unsuccessful or not yet verified, or MM_TX_ENA=0\n",
+ port);
+ }
+
+ ocelot_port_writel(ocelot_port, val, DEV_MM_STATUS);
+
+ mutex_unlock(&mm->lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mm_irq);
+
+int ocelot_port_set_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 mm_enable = 0, verify_disable = 0, add_frag_size;
+ struct ocelot_mm_state *mm;
+ int err;
+
+ if (!ocelot->mm_supported)
+ return -EOPNOTSUPP;
+
+ mm = &ocelot->mm[port];
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &add_frag_size, extack);
+ if (err)
+ return err;
+
+ if (cfg->pmac_enabled)
+ mm_enable |= DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA;
+
+ if (cfg->tx_enabled)
+ mm_enable |= DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA;
+
+ if (!cfg->verify_enabled)
+ verify_disable = DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS;
+
+ mutex_lock(&mm->lock);
+
+ ocelot_port_rmwl(ocelot_port, mm_enable,
+ DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA |
+ DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA,
+ DEV_MM_ENABLE_CONFIG);
+
+ ocelot_port_rmwl(ocelot_port, verify_disable |
+ DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(cfg->verify_time),
+ DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS |
+ DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M,
+ DEV_MM_VERIF_CONFIG);
+
+ ocelot_rmw_rix(ocelot,
+ QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(add_frag_size),
+ QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M,
+ QSYS_PREEMPTION_CFG,
+ port);
+
+ mutex_unlock(&mm->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_set_mm);
+
+int ocelot_port_get_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_state *state)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_mm_state *mm;
+ u32 val, add_frag_size;
+
+ if (!ocelot->mm_supported)
+ return -EOPNOTSUPP;
+
+ mm = &ocelot->mm[port];
+
+ mutex_lock(&mm->lock);
+
+ val = ocelot_port_readl(ocelot_port, DEV_MM_ENABLE_CONFIG);
+ state->pmac_enabled = !!(val & DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA);
+ state->tx_enabled = !!(val & DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA);
+
+ val = ocelot_port_readl(ocelot_port, DEV_MM_VERIF_CONFIG);
+ state->verify_enabled = !(val & DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS);
+ state->verify_time = DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(val);
+ state->max_verify_time = 128;
+
+ val = ocelot_read_rix(ocelot, QSYS_PREEMPTION_CFG, port);
+ add_frag_size = QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(val);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(add_frag_size);
+ state->rx_min_frag_size = ETH_ZLEN;
+
+ state->verify_status = mm->verify_status;
+ state->tx_active = mm->tx_active;
+
+ mutex_unlock(&mm->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_mm);
+
+int ocelot_mm_init(struct ocelot *ocelot)
+{
+ struct ocelot_port *ocelot_port;
+ struct ocelot_mm_state *mm;
+ int port;
+
+ if (!ocelot->mm_supported)
+ return 0;
+
+ ocelot->mm = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(*ocelot->mm), GFP_KERNEL);
+ if (!ocelot->mm)
+ return -ENOMEM;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ u32 val;
+
+ mm = &ocelot->mm[port];
+ mutex_init(&mm->lock);
+ ocelot_port = ocelot->ports[port];
+
+ /* Update initial status variable for the
+ * verification state machine
+ */
+ val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS);
+ mm->verify_status = ocelot_mm_verify_status(val);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index 1478c3b21af1..bdb893476832 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -4,6 +4,7 @@
* Copyright (c) 2017 Microsemi Corporation
* Copyright 2022 NXP
*/
+#include <linux/ethtool_netlink.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
@@ -54,6 +55,29 @@ enum ocelot_stat {
OCELOT_STAT_RX_GREEN_PRIO_5,
OCELOT_STAT_RX_GREEN_PRIO_6,
OCELOT_STAT_RX_GREEN_PRIO_7,
+ OCELOT_STAT_RX_ASSEMBLY_ERRS,
+ OCELOT_STAT_RX_SMD_ERRS,
+ OCELOT_STAT_RX_ASSEMBLY_OK,
+ OCELOT_STAT_RX_MERGE_FRAGMENTS,
+ OCELOT_STAT_RX_PMAC_OCTETS,
+ OCELOT_STAT_RX_PMAC_UNICAST,
+ OCELOT_STAT_RX_PMAC_MULTICAST,
+ OCELOT_STAT_RX_PMAC_BROADCAST,
+ OCELOT_STAT_RX_PMAC_SHORTS,
+ OCELOT_STAT_RX_PMAC_FRAGMENTS,
+ OCELOT_STAT_RX_PMAC_JABBERS,
+ OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS,
+ OCELOT_STAT_RX_PMAC_SYM_ERRS,
+ OCELOT_STAT_RX_PMAC_64,
+ OCELOT_STAT_RX_PMAC_65_127,
+ OCELOT_STAT_RX_PMAC_128_255,
+ OCELOT_STAT_RX_PMAC_256_511,
+ OCELOT_STAT_RX_PMAC_512_1023,
+ OCELOT_STAT_RX_PMAC_1024_1526,
+ OCELOT_STAT_RX_PMAC_1527_MAX,
+ OCELOT_STAT_RX_PMAC_PAUSE,
+ OCELOT_STAT_RX_PMAC_CONTROL,
+ OCELOT_STAT_RX_PMAC_LONGS,
OCELOT_STAT_TX_OCTETS,
OCELOT_STAT_TX_UNICAST,
OCELOT_STAT_TX_MULTICAST,
@@ -85,6 +109,20 @@ enum ocelot_stat {
OCELOT_STAT_TX_GREEN_PRIO_6,
OCELOT_STAT_TX_GREEN_PRIO_7,
OCELOT_STAT_TX_AGED,
+ OCELOT_STAT_TX_MM_HOLD,
+ OCELOT_STAT_TX_MERGE_FRAGMENTS,
+ OCELOT_STAT_TX_PMAC_OCTETS,
+ OCELOT_STAT_TX_PMAC_UNICAST,
+ OCELOT_STAT_TX_PMAC_MULTICAST,
+ OCELOT_STAT_TX_PMAC_BROADCAST,
+ OCELOT_STAT_TX_PMAC_PAUSE,
+ OCELOT_STAT_TX_PMAC_64,
+ OCELOT_STAT_TX_PMAC_65_127,
+ OCELOT_STAT_TX_PMAC_128_255,
+ OCELOT_STAT_TX_PMAC_256_511,
+ OCELOT_STAT_TX_PMAC_512_1023,
+ OCELOT_STAT_TX_PMAC_1024_1526,
+ OCELOT_STAT_TX_PMAC_1527_MAX,
OCELOT_STAT_DROP_LOCAL,
OCELOT_STAT_DROP_TAIL,
OCELOT_STAT_DROP_YELLOW_PRIO_0,
@@ -228,6 +266,55 @@ static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
OCELOT_COMMON_STATS,
};
+static const struct ocelot_stat_layout ocelot_mm_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
+ OCELOT_STAT(RX_ASSEMBLY_ERRS),
+ OCELOT_STAT(RX_SMD_ERRS),
+ OCELOT_STAT(RX_ASSEMBLY_OK),
+ OCELOT_STAT(RX_MERGE_FRAGMENTS),
+ OCELOT_STAT(TX_MERGE_FRAGMENTS),
+ OCELOT_STAT(RX_PMAC_OCTETS),
+ OCELOT_STAT(RX_PMAC_UNICAST),
+ OCELOT_STAT(RX_PMAC_MULTICAST),
+ OCELOT_STAT(RX_PMAC_BROADCAST),
+ OCELOT_STAT(RX_PMAC_SHORTS),
+ OCELOT_STAT(RX_PMAC_FRAGMENTS),
+ OCELOT_STAT(RX_PMAC_JABBERS),
+ OCELOT_STAT(RX_PMAC_CRC_ALIGN_ERRS),
+ OCELOT_STAT(RX_PMAC_SYM_ERRS),
+ OCELOT_STAT(RX_PMAC_64),
+ OCELOT_STAT(RX_PMAC_65_127),
+ OCELOT_STAT(RX_PMAC_128_255),
+ OCELOT_STAT(RX_PMAC_256_511),
+ OCELOT_STAT(RX_PMAC_512_1023),
+ OCELOT_STAT(RX_PMAC_1024_1526),
+ OCELOT_STAT(RX_PMAC_1527_MAX),
+ OCELOT_STAT(RX_PMAC_PAUSE),
+ OCELOT_STAT(RX_PMAC_CONTROL),
+ OCELOT_STAT(RX_PMAC_LONGS),
+ OCELOT_STAT(TX_PMAC_OCTETS),
+ OCELOT_STAT(TX_PMAC_UNICAST),
+ OCELOT_STAT(TX_PMAC_MULTICAST),
+ OCELOT_STAT(TX_PMAC_BROADCAST),
+ OCELOT_STAT(TX_PMAC_PAUSE),
+ OCELOT_STAT(TX_PMAC_64),
+ OCELOT_STAT(TX_PMAC_65_127),
+ OCELOT_STAT(TX_PMAC_128_255),
+ OCELOT_STAT(TX_PMAC_256_511),
+ OCELOT_STAT(TX_PMAC_512_1023),
+ OCELOT_STAT(TX_PMAC_1024_1526),
+ OCELOT_STAT(TX_PMAC_1527_MAX),
+};
+
+static const struct ocelot_stat_layout *
+ocelot_get_stats_layout(struct ocelot *ocelot)
+{
+ if (ocelot->mm_supported)
+ return ocelot_mm_stats_layout;
+
+ return ocelot_stats_layout;
+}
+
/* Read the counters from hardware and keep them in region->buf.
* Caller must hold &ocelot->stat_view_lock.
*/
@@ -306,17 +393,20 @@ static void ocelot_check_stats_work(struct work_struct *work)
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
{
+ const struct ocelot_stat_layout *layout;
int i;
if (sset != ETH_SS_STATS)
return;
+ layout = ocelot_get_stats_layout(ocelot);
+
for (i = 0; i < OCELOT_NUM_STATS; i++) {
- if (ocelot_stats_layout[i].name[0] == '\0')
+ if (layout[i].name[0] == '\0')
continue;
- memcpy(data + i * ETH_GSTRING_LEN, ocelot_stats_layout[i].name,
- ETH_GSTRING_LEN);
+ memcpy(data, layout[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
}
}
EXPORT_SYMBOL(ocelot_get_strings);
@@ -350,13 +440,16 @@ out_unlock:
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
{
+ const struct ocelot_stat_layout *layout;
int i, num_stats = 0;
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
+ layout = ocelot_get_stats_layout(ocelot);
+
for (i = 0; i < OCELOT_NUM_STATS; i++)
- if (ocelot_stats_layout[i].name[0] != '\0')
+ if (layout[i].name[0] != '\0')
num_stats++;
return num_stats;
@@ -366,14 +459,17 @@ EXPORT_SYMBOL(ocelot_get_sset_count);
static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
+ const struct ocelot_stat_layout *layout;
u64 *data = priv;
int i;
+ layout = ocelot_get_stats_layout(ocelot);
+
/* Copy all supported counters */
for (i = 0; i < OCELOT_NUM_STATS; i++) {
int index = port * OCELOT_NUM_STATS + i;
- if (ocelot_stats_layout[i].name[0] == '\0')
+ if (layout[i].name[0] == '\0')
continue;
*data++ = ocelot->stats[index];
@@ -395,14 +491,63 @@ static void ocelot_port_pause_stats_cb(struct ocelot *ocelot, int port, void *pr
pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PAUSE];
}
+static void ocelot_port_pmac_pause_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_pause_stats *pause_stats = priv;
+
+ pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PMAC_PAUSE];
+ pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PMAC_PAUSE];
+}
+
+static void ocelot_port_mm_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_mm_stats *stats = priv;
+
+ stats->MACMergeFrameAssErrorCount = s[OCELOT_STAT_RX_ASSEMBLY_ERRS];
+ stats->MACMergeFrameSmdErrorCount = s[OCELOT_STAT_RX_SMD_ERRS];
+ stats->MACMergeFrameAssOkCount = s[OCELOT_STAT_RX_ASSEMBLY_OK];
+ stats->MACMergeFragCountRx = s[OCELOT_STAT_RX_MERGE_FRAGMENTS];
+ stats->MACMergeFragCountTx = s[OCELOT_STAT_TX_MERGE_FRAGMENTS];
+ stats->MACMergeHoldCount = s[OCELOT_STAT_TX_MM_HOLD];
+}
+
void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
struct ethtool_pause_stats *pause_stats)
{
- ocelot_port_stats_run(ocelot, port, pause_stats,
- ocelot_port_pause_stats_cb);
+ struct net_device *dev;
+
+ switch (pause_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, pause_stats,
+ ocelot_port_pause_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, pause_stats,
+ ocelot_port_pmac_pause_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_pause_stats(dev, pause_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_pause_stats);
+void ocelot_port_get_mm_stats(struct ocelot *ocelot, int port,
+ struct ethtool_mm_stats *stats)
+{
+ if (!ocelot->mm_supported)
+ return;
+
+ ocelot_port_stats_run(ocelot, port, stats, ocelot_port_mm_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_mm_stats);
+
static const struct ethtool_rmon_hist_range ocelot_rmon_ranges[] = {
{ 64, 64 },
{ 65, 127 },
@@ -441,14 +586,57 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
}
+static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_rmon_stats *rmon_stats = priv;
+
+ rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_PMAC_SHORTS];
+ rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_PMAC_LONGS];
+ rmon_stats->fragments = s[OCELOT_STAT_RX_PMAC_FRAGMENTS];
+ rmon_stats->jabbers = s[OCELOT_STAT_RX_PMAC_JABBERS];
+
+ rmon_stats->hist[0] = s[OCELOT_STAT_RX_PMAC_64];
+ rmon_stats->hist[1] = s[OCELOT_STAT_RX_PMAC_65_127];
+ rmon_stats->hist[2] = s[OCELOT_STAT_RX_PMAC_128_255];
+ rmon_stats->hist[3] = s[OCELOT_STAT_RX_PMAC_256_511];
+ rmon_stats->hist[4] = s[OCELOT_STAT_RX_PMAC_512_1023];
+ rmon_stats->hist[5] = s[OCELOT_STAT_RX_PMAC_1024_1526];
+ rmon_stats->hist[6] = s[OCELOT_STAT_RX_PMAC_1527_MAX];
+
+ rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64];
+ rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127];
+ rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255];
+ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_128_255];
+ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_256_511];
+ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_512_1023];
+ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1024_1526];
+}
+
void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges)
{
+ struct net_device *dev;
+
*ranges = ocelot_rmon_ranges;
- ocelot_port_stats_run(ocelot, port, rmon_stats,
- ocelot_port_rmon_stats_cb);
+ switch (rmon_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, rmon_stats,
+ ocelot_port_rmon_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, rmon_stats,
+ ocelot_port_pmac_rmon_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_rmon_stats(dev, rmon_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_rmon_stats);
@@ -460,11 +648,35 @@ static void ocelot_port_ctrl_stats_cb(struct ocelot *ocelot, int port, void *pri
ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_CONTROL];
}
+static void ocelot_port_pmac_ctrl_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_ctrl_stats *ctrl_stats = priv;
+
+ ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_PMAC_CONTROL];
+}
+
void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
- ocelot_port_stats_run(ocelot, port, ctrl_stats,
- ocelot_port_ctrl_stats_cb);
+ struct net_device *dev;
+
+ switch (ctrl_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, ctrl_stats,
+ ocelot_port_ctrl_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, ctrl_stats,
+ ocelot_port_pmac_ctrl_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_ctrl_stats(dev, ctrl_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_ctrl_stats);
@@ -510,11 +722,60 @@ static void ocelot_port_mac_stats_cb(struct ocelot *ocelot, int port, void *priv
mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
}
+static void ocelot_port_pmac_mac_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_mac_stats *mac_stats = priv;
+
+ mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_PMAC_OCTETS];
+ mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_PMAC_64] +
+ s[OCELOT_STAT_TX_PMAC_65_127] +
+ s[OCELOT_STAT_TX_PMAC_128_255] +
+ s[OCELOT_STAT_TX_PMAC_256_511] +
+ s[OCELOT_STAT_TX_PMAC_512_1023] +
+ s[OCELOT_STAT_TX_PMAC_1024_1526] +
+ s[OCELOT_STAT_TX_PMAC_1527_MAX];
+ mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_PMAC_OCTETS];
+ mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_PMAC_64] +
+ s[OCELOT_STAT_RX_PMAC_65_127] +
+ s[OCELOT_STAT_RX_PMAC_128_255] +
+ s[OCELOT_STAT_RX_PMAC_256_511] +
+ s[OCELOT_STAT_RX_PMAC_512_1023] +
+ s[OCELOT_STAT_RX_PMAC_1024_1526] +
+ s[OCELOT_STAT_RX_PMAC_1527_MAX];
+ mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_MULTICAST];
+ mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_BROADCAST];
+ mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_MULTICAST];
+ mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_BROADCAST];
+ mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_PMAC_LONGS];
+ /* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not
+ * counted individually.
+ */
+ mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS];
+ mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS];
+}
+
void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_mac_stats *mac_stats)
{
- ocelot_port_stats_run(ocelot, port, mac_stats,
- ocelot_port_mac_stats_cb);
+ struct net_device *dev;
+
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, mac_stats,
+ ocelot_port_mac_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, mac_stats,
+ ocelot_port_pmac_mac_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_mac_stats(dev, mac_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_mac_stats);
@@ -526,11 +787,35 @@ static void ocelot_port_phy_stats_cb(struct ocelot *ocelot, int port, void *priv
phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_SYM_ERRS];
}
+static void ocelot_port_pmac_phy_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_phy_stats *phy_stats = priv;
+
+ phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_PMAC_SYM_ERRS];
+}
+
void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
- ocelot_port_stats_run(ocelot, port, phy_stats,
- ocelot_port_phy_stats_cb);
+ struct net_device *dev;
+
+ switch (phy_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, phy_stats,
+ ocelot_port_phy_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, phy_stats,
+ ocelot_port_pmac_phy_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_phy_stats(dev, phy_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
@@ -602,16 +887,19 @@ EXPORT_SYMBOL(ocelot_port_get_stats64);
static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
{
struct ocelot_stats_region *region = NULL;
+ const struct ocelot_stat_layout *layout;
unsigned int last = 0;
int i;
INIT_LIST_HEAD(&ocelot->stats_regions);
+ layout = ocelot_get_stats_layout(ocelot);
+
for (i = 0; i < OCELOT_NUM_STATS; i++) {
- if (!ocelot_stats_layout[i].reg)
+ if (!layout[i].reg)
continue;
- if (region && ocelot_stats_layout[i].reg == last + 4) {
+ if (region && layout[i].reg == last + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -620,17 +908,17 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
return -ENOMEM;
/* enum ocelot_stat must be kept sorted in the same
- * order as ocelot_stats_layout[i].reg in order to have
- * efficient bulking
+ * order as layout[i].reg in order to have efficient
+ * bulking
*/
- WARN_ON(last >= ocelot_stats_layout[i].reg);
+ WARN_ON(last >= layout[i].reg);
- region->base = ocelot_stats_layout[i].reg;
+ region->base = layout[i].reg;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
- last = ocelot_stats_layout[i].reg;
+ last = layout[i].reg;
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index b097fd4a4061..7388c3b0535c 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -6,7 +6,6 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/interrupt.h>
-#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/netdevice.h>
@@ -17,6 +16,7 @@
#include <linux/skbuff.h>
#include <net/switchdev.h>
+#include <soc/mscc/ocelot.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_hsio.h>
#include <soc/mscc/vsc7514_regs.h>
@@ -26,80 +26,6 @@
#define VSC7514_VCAP_POLICER_BASE 128
#define VSC7514_VCAP_POLICER_MAX 191
-#define MEM_INIT_SLEEP_US 1000
-#define MEM_INIT_TIMEOUT_US 100000
-
-static const u32 *ocelot_regmap[TARGET_MAX] = {
- [ANA] = vsc7514_ana_regmap,
- [QS] = vsc7514_qs_regmap,
- [QSYS] = vsc7514_qsys_regmap,
- [REW] = vsc7514_rew_regmap,
- [SYS] = vsc7514_sys_regmap,
- [S0] = vsc7514_vcap_regmap,
- [S1] = vsc7514_vcap_regmap,
- [S2] = vsc7514_vcap_regmap,
- [PTP] = vsc7514_ptp_regmap,
- [DEV_GMII] = vsc7514_dev_gmii_regmap,
-};
-
-static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
- [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
- [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
- [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
- [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
- [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
- [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
- [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
- [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
- [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
- [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
- [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
- [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
- [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
- [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
- [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
- [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
- [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
- [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
- [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
- [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
- [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
- [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
- [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
- [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
- [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
- [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
- [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
- [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
- [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
- [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
- [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
- [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
- [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
- [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
- [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
- [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
- /* Replicated per number of ports (12), register size 4 per port */
- [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4),
- [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4),
- [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4),
- [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4),
- [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4),
- [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4),
- [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4),
- [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4),
- [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4),
- [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4),
- [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4),
- [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4),
- [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
-};
-
static void ocelot_pll5_init(struct ocelot *ocelot)
{
/* Configure PLL5. This will need a proper CCF driver
@@ -133,11 +59,11 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
{
int ret;
- ocelot->map = ocelot_regmap;
+ ocelot->map = vsc7514_regmap;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
- ret = ocelot_regfields_init(ocelot, ocelot_regfields);
+ ret = ocelot_regfields_init(ocelot, vsc7514_regfields);
if (ret)
return ret;
@@ -190,73 +116,6 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
-static int ocelot_mem_init_status(struct ocelot *ocelot)
-{
- unsigned int val;
- int err;
-
- err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
- &val);
-
- return err ?: val;
-}
-
-static int ocelot_reset(struct ocelot *ocelot)
-{
- int err;
- u32 val;
-
- err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
- if (err)
- return err;
-
- err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- if (err)
- return err;
-
- /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
- * 100us) before enabling the switch core.
- */
- err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
- MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
- if (err)
- return err;
-
- err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- if (err)
- return err;
-
- return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
-}
-
-/* Watermark encode
- * Bit 8: Unit; 0:1, 1:16
- * Bit 7-0: Value to be multiplied with unit
- */
-static u16 ocelot_wm_enc(u16 value)
-{
- WARN_ON(value >= 16 * BIT(8));
-
- if (value >= BIT(8))
- return BIT(8) | (value / 16);
-
- return value;
-}
-
-static u16 ocelot_wm_dec(u16 wm)
-{
- if (wm & BIT(8))
- return (wm & GENMASK(7, 0)) * 16;
-
- return wm;
-}
-
-static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
-{
- *inuse = (val & GENMASK(23, 12)) >> 12;
- *maxuse = val & GENMASK(11, 0);
-}
-
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
@@ -266,49 +125,6 @@ static const struct ocelot_ops ocelot_ops = {
.netdev_to_port = ocelot_netdev_to_port,
};
-static struct vcap_props vsc7514_vcap_props[] = {
- [VCAP_ES0] = {
- .action_type_width = 0,
- .action_table = {
- [ES0_ACTION_TYPE_NORMAL] = {
- .width = 73, /* HIT_STICKY not included */
- .count = 1,
- },
- },
- .target = S0,
- .keys = vsc7514_vcap_es0_keys,
- .actions = vsc7514_vcap_es0_actions,
- },
- [VCAP_IS1] = {
- .action_type_width = 0,
- .action_table = {
- [IS1_ACTION_TYPE_NORMAL] = {
- .width = 78, /* HIT_STICKY not included */
- .count = 4,
- },
- },
- .target = S1,
- .keys = vsc7514_vcap_is1_keys,
- .actions = vsc7514_vcap_is1_actions,
- },
- [VCAP_IS2] = {
- .action_type_width = 1,
- .action_table = {
- [IS2_ACTION_TYPE_NORMAL] = {
- .width = 49,
- .count = 2
- },
- [IS2_ACTION_TYPE_SMAC_SIP] = {
- .width = 6,
- .count = 4
- },
- },
- .target = S2,
- .keys = vsc7514_vcap_is2_keys,
- .actions = vsc7514_vcap_is2_actions,
- },
-};
-
static struct ptp_clock_info ocelot_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "ocelot ptp",
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index 9d2d3e13cacf..ef6fd3f6be30 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -9,7 +9,66 @@
#include <soc/mscc/vsc7514_regs.h>
#include "ocelot.h"
-const u32 vsc7514_ana_regmap[] = {
+const struct reg_field vsc7514_regfields[REGFIELD_MAX] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
+ [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
+ [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
+ [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
+ [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+ /* Replicated per number of ports (12), register size 4 per port */
+ [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4),
+ [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4),
+ [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4),
+ [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4),
+ [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
+};
+EXPORT_SYMBOL(vsc7514_regfields);
+
+static const u32 vsc7514_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
REG(ANA_VLANMASK, 0x009004),
REG(ANA_PORT_B_DOMAIN, 0x009008),
@@ -89,9 +148,8 @@ const u32 vsc7514_ana_regmap[] = {
REG(ANA_POL_HYST, 0x008bec),
REG(ANA_POL_MISC_CFG, 0x008bf0),
};
-EXPORT_SYMBOL(vsc7514_ana_regmap);
-const u32 vsc7514_qs_regmap[] = {
+static const u32 vsc7514_qs_regmap[] = {
REG(QS_XTR_GRP_CFG, 0x000000),
REG(QS_XTR_RD, 0x000008),
REG(QS_XTR_FRM_PRUNING, 0x000010),
@@ -105,9 +163,8 @@ const u32 vsc7514_qs_regmap[] = {
REG(QS_INJ_ERR, 0x000040),
REG(QS_INH_DBG, 0x000048),
};
-EXPORT_SYMBOL(vsc7514_qs_regmap);
-const u32 vsc7514_qsys_regmap[] = {
+static const u32 vsc7514_qsys_regmap[] = {
REG(QSYS_PORT_MODE, 0x011200),
REG(QSYS_SWITCH_PORT_MODE, 0x011234),
REG(QSYS_STAT_CNT_CFG, 0x011264),
@@ -150,9 +207,8 @@ const u32 vsc7514_qsys_regmap[] = {
REG(QSYS_SE_STATE, 0x00004c),
REG(QSYS_HSCH_MISC_CFG, 0x011388),
};
-EXPORT_SYMBOL(vsc7514_qsys_regmap);
-const u32 vsc7514_rew_regmap[] = {
+static const u32 vsc7514_rew_regmap[] = {
REG(REW_PORT_VLAN_CFG, 0x000000),
REG(REW_TAG_CFG, 0x000004),
REG(REW_PORT_CFG, 0x000008),
@@ -165,9 +221,8 @@ const u32 vsc7514_rew_regmap[] = {
REG(REW_STAT_CFG, 0x000890),
REG(REW_PPT, 0x000680),
};
-EXPORT_SYMBOL(vsc7514_rew_regmap);
-const u32 vsc7514_sys_regmap[] = {
+static const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
@@ -288,9 +343,8 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_PTP_NXT, 0x0006c0),
REG(SYS_PTP_CFG, 0x0006c4),
};
-EXPORT_SYMBOL(vsc7514_sys_regmap);
-const u32 vsc7514_vcap_regmap[] = {
+static const u32 vsc7514_vcap_regmap[] = {
/* VCAP_CORE_CFG */
REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
REG(VCAP_CORE_MV_CFG, 0x000004),
@@ -312,9 +366,8 @@ const u32 vsc7514_vcap_regmap[] = {
REG(VCAP_CONST_CORE_CNT, 0x0003b8),
REG(VCAP_CONST_IF_CNT, 0x0003bc),
};
-EXPORT_SYMBOL(vsc7514_vcap_regmap);
-const u32 vsc7514_ptp_regmap[] = {
+static const u32 vsc7514_ptp_regmap[] = {
REG(PTP_PIN_CFG, 0x000000),
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
@@ -325,9 +378,8 @@ const u32 vsc7514_ptp_regmap[] = {
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
};
-EXPORT_SYMBOL(vsc7514_ptp_regmap);
-const u32 vsc7514_dev_gmii_regmap[] = {
+static const u32 vsc7514_dev_gmii_regmap[] = {
REG(DEV_CLOCK_CFG, 0x0),
REG(DEV_PORT_MISC, 0x4),
REG(DEV_EVENTS, 0x8),
@@ -368,9 +420,22 @@ const u32 vsc7514_dev_gmii_regmap[] = {
REG(DEV_PCS_FX100_CFG, 0x94),
REG(DEV_PCS_FX100_STATUS, 0x98),
};
-EXPORT_SYMBOL(vsc7514_dev_gmii_regmap);
-const struct vcap_field vsc7514_vcap_es0_keys[] = {
+const u32 *vsc7514_regmap[TARGET_MAX] = {
+ [ANA] = vsc7514_ana_regmap,
+ [QS] = vsc7514_qs_regmap,
+ [QSYS] = vsc7514_qsys_regmap,
+ [REW] = vsc7514_rew_regmap,
+ [SYS] = vsc7514_sys_regmap,
+ [S0] = vsc7514_vcap_regmap,
+ [S1] = vsc7514_vcap_regmap,
+ [S2] = vsc7514_vcap_regmap,
+ [PTP] = vsc7514_ptp_regmap,
+ [DEV_GMII] = vsc7514_dev_gmii_regmap,
+};
+EXPORT_SYMBOL(vsc7514_regmap);
+
+static const struct vcap_field vsc7514_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 4 },
[VCAP_ES0_IGR_PORT] = { 4, 4 },
[VCAP_ES0_RSV] = { 8, 2 },
@@ -380,9 +445,8 @@ const struct vcap_field vsc7514_vcap_es0_keys[] = {
[VCAP_ES0_DP] = { 24, 1 },
[VCAP_ES0_PCP] = { 25, 3 },
};
-EXPORT_SYMBOL(vsc7514_vcap_es0_keys);
-const struct vcap_field vsc7514_vcap_es0_actions[] = {
+static const struct vcap_field vsc7514_vcap_es0_actions[] = {
[VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 },
[VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 },
[VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 },
@@ -402,9 +466,8 @@ const struct vcap_field vsc7514_vcap_es0_actions[] = {
[VCAP_ES0_ACT_RSV] = { 49, 24 },
[VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 },
};
-EXPORT_SYMBOL(vsc7514_vcap_es0_actions);
-const struct vcap_field vsc7514_vcap_is1_keys[] = {
+static const struct vcap_field vsc7514_vcap_is1_keys[] = {
[VCAP_IS1_HK_TYPE] = { 0, 1 },
[VCAP_IS1_HK_LOOKUP] = { 1, 2 },
[VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 },
@@ -454,9 +517,8 @@ const struct vcap_field vsc7514_vcap_is1_keys[] = {
[VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 },
[VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is1_keys);
-const struct vcap_field vsc7514_vcap_is1_actions[] = {
+static const struct vcap_field vsc7514_vcap_is1_actions[] = {
[VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 },
[VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 },
[VCAP_IS1_ACT_QOS_ENA] = { 7, 1 },
@@ -479,9 +541,8 @@ const struct vcap_field vsc7514_vcap_is1_actions[] = {
[VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 },
[VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is1_actions);
-const struct vcap_field vsc7514_vcap_is2_keys[] = {
+static const struct vcap_field vsc7514_vcap_is2_keys[] = {
/* Common: 46 bits */
[VCAP_IS2_TYPE] = { 0, 4 },
[VCAP_IS2_HK_FIRST] = { 4, 1 },
@@ -560,9 +621,8 @@ const struct vcap_field vsc7514_vcap_is2_keys[] = {
[VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 },
[VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is2_keys);
-const struct vcap_field vsc7514_vcap_is2_actions[] = {
+static const struct vcap_field vsc7514_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 },
[VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 },
[VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 },
@@ -579,4 +639,47 @@ const struct vcap_field vsc7514_vcap_is2_actions[] = {
[VCAP_IS2_ACT_ACL_ID] = { 43, 6 },
[VCAP_IS2_ACT_HIT_CNT] = { 49, 32 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is2_actions);
+
+struct vcap_props vsc7514_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 73, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc7514_vcap_es0_keys,
+ .actions = vsc7514_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 78, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc7514_vcap_is1_keys,
+ .actions = vsc7514_vcap_is1_actions,
+ },
+ [VCAP_IS2] = {
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 49,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .target = S2,
+ .keys = vsc7514_vcap_is2_keys,
+ .actions = vsc7514_vcap_is2_actions,
+ },
+};
+EXPORT_SYMBOL(vsc7514_vcap_props);
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index e785c00b5845..d03d6e96f730 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_NETRONOME
config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on VXLAN || VXLAN=n
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 8a250214e289..808599b8066e 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -80,6 +80,8 @@ nfp-objs += \
abm/main.o
endif
-nfp-$(CONFIG_NFP_NET_IPSEC) += crypto/ipsec.o nfd3/ipsec.o
+nfp-$(CONFIG_NFP_NET_IPSEC) += crypto/ipsec.o nfd3/ipsec.o nfdk/ipsec.o
nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
+
+nfp-$(CONFIG_DCB) += nic/dcb.o
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 063cd371033a..c0dcce8ae437 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -10,6 +10,7 @@
#include <linux/ktime.h>
#include <net/xfrm.h>
+#include "../nfpcore/nfp_dev.h"
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
#include "crypto.h"
@@ -265,7 +266,8 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
}
}
-static int nfp_net_xfrm_add_state(struct xfrm_state *x)
+static int nfp_net_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct net_device *netdev = x->xso.dev;
struct nfp_ipsec_cfg_mssg msg = {};
@@ -286,7 +288,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
cfg->ctrl_word.mode = NFP_IPSEC_PROTMODE_TRANSPORT;
break;
default:
- nn_err(nn, "Unsupported mode for xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for xfrm offload");
return -EINVAL;
}
@@ -298,17 +300,17 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_AH;
break;
default:
- nn_err(nn, "Unsupported protocol for xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for xfrm offload");
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN) {
- nn_err(nn, "Unsupported XFRM_REPLAY_MODE_ESN for xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported XFRM_REPLAY_MODE_ESN for xfrm offload");
return -EINVAL;
}
if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- nn_err(nn, "Unsupported xfrm offload tyoe\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
return -EINVAL;
}
@@ -325,7 +327,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
if (x->aead) {
trunc_len = -1;
} else {
- nn_err(nn, "Unsupported authentication algorithm\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
return -EINVAL;
}
break;
@@ -334,6 +336,10 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
trunc_len = -1;
break;
case SADB_AALG_MD5HMAC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
+ return -EINVAL;
+ }
set_md5hmac(cfg, &trunc_len);
break;
case SADB_AALG_SHA1HMAC:
@@ -349,19 +355,19 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
set_sha2_512hmac(cfg, &trunc_len);
break;
default:
- nn_err(nn, "Unsupported authentication algorithm\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
return -EINVAL;
}
if (!trunc_len) {
- nn_err(nn, "Unsupported authentication algorithm trunc length\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm trunc length");
return -EINVAL;
}
if (x->aalg) {
key_len = DIV_ROUND_UP(x->aalg->alg_key_len, BITS_PER_BYTE);
if (key_len > sizeof(cfg->auth_key)) {
- nn_err(nn, "Insufficient space for offloaded auth key\n");
+ NL_SET_ERR_MSG_MOD(extack, "Insufficient space for offloaded auth key");
return -EINVAL;
}
for (i = 0; i < key_len / sizeof(cfg->auth_key[0]) ; i++)
@@ -377,18 +383,22 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL;
break;
case SADB_EALG_3DESCBC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported encryption algorithm for offload");
+ return -EINVAL;
+ }
cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_3DES;
break;
case SADB_X_EALG_AES_GCM_ICV16:
case SADB_X_EALG_NULL_AES_GMAC:
if (!x->aead) {
- nn_err(nn, "Invalid AES key data\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid AES key data");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128) {
- nn_err(nn, "ICV must be 128bit with SADB_X_EALG_AES_GCM_ICV16\n");
+ NL_SET_ERR_MSG_MOD(extack, "ICV must be 128bit with SADB_X_EALG_AES_GCM_ICV16");
return -EINVAL;
}
cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CTR;
@@ -396,23 +406,23 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
/* Aead->alg_key_len includes 32-bit salt */
if (set_aes_keylen(cfg, x->props.ealgo, x->aead->alg_key_len - 32)) {
- nn_err(nn, "Unsupported AES key length %d\n", x->aead->alg_key_len);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported AES key length");
return -EINVAL;
}
break;
case SADB_X_EALG_AESCBC:
cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
if (!x->ealg) {
- nn_err(nn, "Invalid AES key data\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid AES key data");
return -EINVAL;
}
if (set_aes_keylen(cfg, x->props.ealgo, x->ealg->alg_key_len) < 0) {
- nn_err(nn, "Unsupported AES key length %d\n", x->ealg->alg_key_len);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported AES key length");
return -EINVAL;
}
break;
default:
- nn_err(nn, "Unsupported encryption algorithm for offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported encryption algorithm for offload");
return -EINVAL;
}
@@ -423,7 +433,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
key_len -= salt_len;
if (key_len > sizeof(cfg->ciph_key)) {
- nn_err(nn, "aead: Insufficient space for offloaded key\n");
+ NL_SET_ERR_MSG_MOD(extack, "aead: Insufficient space for offloaded key");
return -EINVAL;
}
@@ -439,7 +449,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
key_len = DIV_ROUND_UP(x->ealg->alg_key_len, BITS_PER_BYTE);
if (key_len > sizeof(cfg->ciph_key)) {
- nn_err(nn, "ealg: Insufficient space for offloaded key\n");
+ NL_SET_ERR_MSG_MOD(extack, "ealg: Insufficient space for offloaded key");
return -EINVAL;
}
for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++)
@@ -462,7 +472,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
}
break;
default:
- nn_err(nn, "Unsupported address family\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported address family");
return -EINVAL;
}
@@ -477,7 +487,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
err = xa_alloc(&nn->xa_ipsec, &saidx, x,
XA_LIMIT(0, NFP_NET_IPSEC_MAX_SA_CNT - 1), GFP_KERNEL);
if (err < 0) {
- nn_err(nn, "Unable to get sa_data number for IPsec\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unable to get sa_data number for IPsec");
return err;
}
@@ -488,7 +498,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
sizeof(msg), nfp_net_ipsec_cfg);
if (err) {
xa_erase(&nn->xa_ipsec, saidx);
- nn_err(nn, "Failed to issue IPsec command err ret=%d\n", err);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue IPsec command");
return err;
}
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index db297ee4d7ad..a655f9e69a7b 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -233,8 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf)
if (err <= 0)
return err;
- return devlink_params_register(devlink, nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
+ return devl_params_register(devlink, nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
void nfp_devlink_params_unregister(struct nfp_pf *pf)
@@ -245,6 +245,6 @@ void nfp_devlink_params_unregister(struct nfp_pf *pf)
if (err <= 0)
return;
- devlink_params_unregister(priv_to_devlink(pf), nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
+ devl_params_unregister(priv_to_devlink(pf), nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index f693119541d5..d23830b5bcb8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1964,6 +1964,27 @@ int nfp_fl_ct_stats(struct flow_cls_offload *flow,
return 0;
}
+static bool
+nfp_fl_ct_offload_nft_supported(struct flow_cls_offload *flow)
+{
+ struct flow_rule *flow_rule = flow->rule;
+ struct flow_action *flow_action =
+ &flow_rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT_METADATA) {
+ enum ip_conntrack_info ctinfo =
+ act->ct_metadata.cookie & NFCT_INFOMASK;
+
+ return ctinfo != IP_CT_NEW;
+ }
+ }
+
+ return false;
+}
+
static int
nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
{
@@ -1976,6 +1997,9 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
extack = flow->common.extack;
switch (flow->command) {
case FLOW_CLS_REPLACE:
+ if (!nfp_fl_ct_offload_nft_supported(flow))
+ return -EOPNOTSUPP;
+
/* Netfilter can request offload multiple times for the same
* flow - protect against adding duplicates.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 861082c5dbff..59fb0583cc08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -192,10 +192,10 @@ static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb,
return 0;
md_bytes = sizeof(meta_id) +
- !!md_dst * NFP_NET_META_PORTID_SIZE +
- !!tls_handle * NFP_NET_META_CONN_HANDLE_SIZE +
- vlan_insert * NFP_NET_META_VLAN_SIZE +
- *ipsec * NFP_NET_META_IPSEC_FIELD_SIZE; /* IPsec has 12 bytes of metadata */
+ (!!md_dst ? NFP_NET_META_PORTID_SIZE : 0) +
+ (!!tls_handle ? NFP_NET_META_CONN_HANDLE_SIZE : 0) +
+ (vlan_insert ? NFP_NET_META_VLAN_SIZE : 0) +
+ (*ipsec ? NFP_NET_META_IPSEC_FIELD_SIZE : 0);
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
@@ -226,9 +226,6 @@ static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb,
meta_id |= NFP_NET_META_VLAN;
}
if (*ipsec) {
- /* IPsec has three consecutive 4-bit IPsec metadata types,
- * so in total IPsec has three 4 bytes of metadata.
- */
data -= NFP_NET_META_IPSEC_SIZE;
put_unaligned_be32(offload_info.seq_hi, data);
data -= NFP_NET_META_IPSEC_SIZE;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index ccacb6ab6c39..d60c0e991a91 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -6,6 +6,7 @@
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/bitfield.h>
+#include <net/xfrm.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
@@ -172,25 +173,32 @@ close_block:
static int
nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool *ipsec)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct nfp_ipsec_offload offload_info;
unsigned char *data;
bool vlan_insert;
u32 meta_id = 0;
int md_bytes;
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (xfrm_offload(skb))
+ *ipsec = nfp_net_ipsec_tx_prep(dp, skb, &offload_info);
+#endif
+
if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
md_dst = NULL;
vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
- if (!(md_dst || vlan_insert))
+ if (!(md_dst || vlan_insert || *ipsec))
return 0;
md_bytes = sizeof(meta_id) +
- !!md_dst * NFP_NET_META_PORTID_SIZE +
- vlan_insert * NFP_NET_META_VLAN_SIZE;
+ (!!md_dst ? NFP_NET_META_PORTID_SIZE : 0) +
+ (vlan_insert ? NFP_NET_META_VLAN_SIZE : 0) +
+ (*ipsec ? NFP_NET_META_IPSEC_FIELD_SIZE : 0);
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
@@ -212,6 +220,17 @@ nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
meta_id |= NFP_NET_META_VLAN;
}
+ if (*ipsec) {
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_hi, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_low, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.handle - 1, data);
+ meta_id <<= NFP_NET_META_IPSEC_FIELD_SIZE;
+ meta_id |= NFP_NET_META_IPSEC << 8 | NFP_NET_META_IPSEC << 4 | NFP_NET_META_IPSEC;
+ }
+
meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
FIELD_PREP(NFDK_META_FIELDS, meta_id);
@@ -243,6 +262,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
int nr_frags, wr_idx;
dma_addr_t dma_addr;
+ bool ipsec = false;
u64 metadata;
dp = &nn->dp;
@@ -263,7 +283,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb);
+ metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb, &ipsec);
if (unlikely((int)metadata < 0))
goto err_flush;
@@ -361,6 +381,9 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
(txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+ if (ipsec)
+ metadata = nfp_nfdk_ipsec_tx(metadata, skb);
+
if (!skb_is_gso(skb)) {
real_len = skb->len;
/* Metadata desc */
@@ -760,6 +783,15 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
return false;
data += sizeof(struct nfp_net_tls_resync_req);
break;
+#ifdef CONFIG_NFP_NET_IPSEC
+ case NFP_NET_META_IPSEC:
+ /* Note: IPsec packet could have zero saidx, so need add 1
+ * to indicate packet is IPsec packet within driver.
+ */
+ meta->ipsec_saidx = get_unaligned_be32(data) + 1;
+ data += 4;
+ break;
+#endif
default:
return true;
}
@@ -1186,6 +1218,13 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (meta.ipsec_saidx != 0 && unlikely(nfp_net_ipsec_rx(&meta, skb))) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+#endif
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
new file mode 100644
index 000000000000..58d8f59eb885
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2023 Corigine, Inc */
+
+#include <net/xfrm.h>
+
+#include "../nfp_net.h"
+#include "nfdk.h"
+
+u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+
+ if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM))
+ flags |= NFDK_DESC_TX_L3_CSUM | NFDK_DESC_TX_L4_CSUM;
+
+ return flags;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
index 0ea51d9f2325..fe55980348e9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
@@ -125,4 +125,12 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
void nfp_nfdk_ctrl_poll(struct tasklet_struct *t);
void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
+#ifndef CONFIG_NFP_NET_IPSEC
+static inline u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
+{
+ return flags;
+}
+#else
+u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb);
+#endif
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 70d7484c82af..81b7ca0ad222 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2531,10 +2531,15 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ if (nn->app && nn->app->type->id == NFP_APP_BPF_NIC)
+ netdev->xdp_features |= NETDEV_XDP_ACT_HW_OFFLOAD;
+
/* Finalise the netdev setup */
switch (nn->dp.ops->version) {
case NFP_NFD_VER_NFD3:
netdev->netdev_ops = &nfp_nfd3_netdev_ops;
+ netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
break;
case NFP_NFD_VER_NFDK:
netdev->netdev_ops = &nfp_nfdk_netdev_ops;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index f03dcadff738..669b9dccb6a9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -412,6 +412,7 @@
#define NFP_NET_CFG_MBOX_CMD_IPSEC 3
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
+#define NFP_NET_CFG_MBOX_CMD_DCB_UPDATE 7
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD 8
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL 9
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index cc97b3d00414..dfedb52b7e70 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -313,6 +313,10 @@ static const struct nfp_eth_media_link_mode {
.ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
.speed = NFP_SPEED_10G,
},
+ [NFP_MEDIA_10GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
[NFP_MEDIA_10GBASE_CX4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
.speed = NFP_SPEED_10G,
@@ -349,6 +353,14 @@ static const struct nfp_eth_media_link_mode {
.ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
.speed = NFP_SPEED_25G,
},
+ [NFP_MEDIA_25GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
[NFP_MEDIA_40GBASE_CR4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
.speed = NFP_SPEED_40G,
@@ -2027,16 +2039,16 @@ static int
nfp_net_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
u8 buf[NFP_EEPROM_LEN] = {};
- if (eeprom->len == 0)
- return -EINVAL;
-
if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
return -EOPNOTSUPP;
- eeprom->magic = nn->pdev->vendor | (nn->pdev->device << 16);
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = app->pdev->vendor | (app->pdev->device << 16);
memcpy(bytes, buf + eeprom->offset, eeprom->len);
return 0;
@@ -2046,18 +2058,18 @@ static int
nfp_net_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
u8 buf[NFP_EEPROM_LEN] = {};
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
if (eeprom->len == 0)
return -EINVAL;
- if (eeprom->magic != (nn->pdev->vendor | nn->pdev->device << 16))
+ if (eeprom->magic != (app->pdev->vendor | app->pdev->device << 16))
return -EINVAL;
- if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
- return -EOPNOTSUPP;
-
memcpy(buf + eeprom->offset, bytes, eeprom->len);
if (nfp_net_set_port_mac_by_hwinfo(netdev, buf))
return -EOPNOTSUPP;
@@ -2117,6 +2129,9 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_eeprom_len = nfp_net_get_eeprom_len,
+ .get_eeprom = nfp_net_get_eeprom,
+ .set_eeprom = nfp_net_set_eeprom,
.get_module_info = nfp_port_get_module_info,
.get_module_eeprom = nfp_port_get_module_eeprom,
.get_link_ksettings = nfp_net_get_link_ksettings,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index abfe788d558f..cbe4972ba104 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -754,11 +754,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_devlink_unreg;
+ devl_lock(devlink);
err = nfp_devlink_params_register(pf);
if (err)
goto err_shared_buf_unreg;
- devl_lock(devlink);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -791,9 +791,9 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
- devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
err_shared_buf_unreg:
+ devl_unlock(devlink);
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work);
@@ -821,9 +821,10 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
+ nfp_devlink_params_unregister(pf);
+
devl_unlock(devlink);
- nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
nfp_net_pf_free_irqs(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 8f5cab0032d0..781edc451bd4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -140,6 +140,9 @@ enum nfp_ethtool_link_mode_list {
NFP_MEDIA_100GBASE_CR4,
NFP_MEDIA_100GBASE_KP4,
NFP_MEDIA_100GBASE_CR10,
+ NFP_MEDIA_10GBASE_LR,
+ NFP_MEDIA_25GBASE_LR,
+ NFP_MEDIA_25GBASE_ER,
NFP_MEDIA_LINK_MODES_NUMBER
};
diff --git a/drivers/net/ethernet/netronome/nfp/nic/dcb.c b/drivers/net/ethernet/netronome/nfp/nic/dcb.c
new file mode 100644
index 000000000000..bb498ac6bd7d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nic/dcb.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2023 Corigine, Inc. */
+
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <net/dcbnl.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_main.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
+#include "../nfp_net_sriov.h"
+
+#include "main.h"
+
+#define NFP_DCB_TRUST_PCP 1
+#define NFP_DCB_TRUST_DSCP 2
+#define NFP_DCB_TRUST_INVALID 0xff
+
+#define NFP_DCB_TSA_VENDOR 1
+#define NFP_DCB_TSA_STRICT 2
+#define NFP_DCB_TSA_ETS 3
+
+#define NFP_DCB_GBL_ENABLE BIT(0)
+#define NFP_DCB_QOS_ENABLE BIT(1)
+#define NFP_DCB_DISABLE 0
+#define NFP_DCB_ALL_QOS_ENABLE (NFP_DCB_GBL_ENABLE | NFP_DCB_QOS_ENABLE)
+
+#define NFP_DCB_UPDATE_MSK_SZ 4
+#define NFP_DCB_TC_RATE_MAX 0xffff
+
+#define NFP_DCB_DATA_OFF_DSCP2IDX 0
+#define NFP_DCB_DATA_OFF_PCP2IDX 64
+#define NFP_DCB_DATA_OFF_TSA 80
+#define NFP_DCB_DATA_OFF_IDX_BW_PCT 88
+#define NFP_DCB_DATA_OFF_RATE 96
+#define NFP_DCB_DATA_OFF_CAP 112
+#define NFP_DCB_DATA_OFF_ENABLE 116
+#define NFP_DCB_DATA_OFF_TRUST 120
+
+#define NFP_DCB_MSG_MSK_ENABLE BIT(31)
+#define NFP_DCB_MSG_MSK_TRUST BIT(30)
+#define NFP_DCB_MSG_MSK_TSA BIT(29)
+#define NFP_DCB_MSG_MSK_DSCP BIT(28)
+#define NFP_DCB_MSG_MSK_PCP BIT(27)
+#define NFP_DCB_MSG_MSK_RATE BIT(26)
+#define NFP_DCB_MSG_MSK_PCT BIT(25)
+
+static struct nfp_dcb *get_dcb_priv(struct nfp_net *nn)
+{
+ struct nfp_dcb *dcb = &((struct nfp_app_nic_private *)nn->app_priv)->dcb;
+
+ return dcb;
+}
+
+static u8 nfp_tsa_ieee2nfp(u8 tsa)
+{
+ switch (tsa) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ return NFP_DCB_TSA_STRICT;
+ case IEEE_8021QAZ_TSA_ETS:
+ return NFP_DCB_TSA_ETS;
+ default:
+ return NFP_DCB_TSA_VENDOR;
+ }
+}
+
+static int nfp_nic_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ ets->prio_tc[i] = dcb->prio2tc[i];
+ ets->tc_tx_bw[i] = dcb->tc_tx_pct[i];
+ ets->tc_tsa[i] = dcb->tc_tsa[i];
+ }
+
+ return 0;
+}
+
+static bool nfp_refresh_tc2idx(struct nfp_net *nn)
+{
+ u8 tc2idx[IEEE_8021QAZ_MAX_TCS];
+ bool change = false;
+ struct nfp_dcb *dcb;
+ int maxstrict = 0;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ tc2idx[i] = i;
+ if (dcb->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT)
+ maxstrict = i;
+ }
+
+ if (maxstrict > 0 && dcb->tc_tsa[0] != IEEE_8021QAZ_TSA_STRICT) {
+ tc2idx[0] = maxstrict;
+ tc2idx[maxstrict] = 0;
+ }
+
+ for (unsigned int j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
+ if (dcb->tc2idx[j] != tc2idx[j]) {
+ change = true;
+ dcb->tc2idx[j] = tc2idx[j];
+ }
+ }
+
+ return change;
+}
+
+static int nfp_fill_maxrate(struct nfp_net *nn, u64 *max_rate_array)
+{
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ u32 ratembps;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ /* Convert bandwidth from kbps to mbps. */
+ ratembps = max_rate_array[i] / 1024;
+
+ /* Reject input values >= NFP_DCB_TC_RATE_MAX */
+ if (ratembps >= NFP_DCB_TC_RATE_MAX) {
+ nfp_warn(app->cpp, "ratembps(%d) must less than %d.",
+ ratembps, NFP_DCB_TC_RATE_MAX);
+ return -EINVAL;
+ }
+ /* Input value 0 mapped to NFP_DCB_TC_RATE_MAX for firmware. */
+ if (ratembps == 0)
+ ratembps = NFP_DCB_TC_RATE_MAX;
+
+ writew((u16)ratembps, dcb->dcbcfg_tbl +
+ dcb->cfg_offset + NFP_DCB_DATA_OFF_RATE + dcb->tc2idx[i] * 2);
+ /* for rate value from user space, need to sync to dcb structure */
+ if (dcb->tc_maxrate != max_rate_array)
+ dcb->tc_maxrate[i] = max_rate_array[i];
+ }
+
+ return 0;
+}
+
+static int update_dscp_maxrate(struct net_device *dev, u32 *update)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+ int err;
+
+ dcb = get_dcb_priv(nn);
+
+ err = nfp_fill_maxrate(nn, dcb->tc_maxrate);
+ if (err)
+ return err;
+
+ *update |= NFP_DCB_MSG_MSK_RATE;
+
+ /* We only refresh dscp in dscp trust mode. */
+ if (dcb->dscp_cnt > 0) {
+ for (unsigned int i = 0; i < NFP_NET_MAX_DSCP; i++) {
+ writeb(dcb->tc2idx[dcb->prio2tc[dcb->dscp2prio[i]]],
+ dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_DSCP2IDX + i);
+ }
+ *update |= NFP_DCB_MSG_MSK_DSCP;
+ }
+
+ return 0;
+}
+
+static void nfp_nic_set_trust(struct nfp_net *nn, u32 *update)
+{
+ struct nfp_dcb *dcb;
+ u8 trust;
+
+ dcb = get_dcb_priv(nn);
+
+ if (dcb->trust_status != NFP_DCB_TRUST_INVALID)
+ return;
+
+ trust = dcb->dscp_cnt > 0 ? NFP_DCB_TRUST_DSCP : NFP_DCB_TRUST_PCP;
+ writeb(trust, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_TRUST);
+
+ dcb->trust_status = trust;
+ *update |= NFP_DCB_MSG_MSK_TRUST;
+}
+
+static void nfp_nic_set_enable(struct nfp_net *nn, u32 enable, u32 *update)
+{
+ struct nfp_dcb *dcb;
+ u32 value = 0;
+
+ dcb = get_dcb_priv(nn);
+
+ value = readl(dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_ENABLE);
+ if (value != enable) {
+ writel(enable, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_ENABLE);
+ *update |= NFP_DCB_MSG_MSK_ENABLE;
+ }
+}
+
+static int dcb_ets_check(struct net_device *dev, struct ieee_ets *ets)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_app *app = nn->app;
+ bool ets_exists = false;
+ int sum = 0;
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ /* For ets mode, check bw percentage sum. */
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ ets_exists = true;
+ sum += ets->tc_tx_bw[i];
+ } else if (ets->tc_tx_bw[i]) {
+ nfp_warn(app->cpp, "ETS BW for strict/vendor TC must be 0.");
+ return -EINVAL;
+ }
+ }
+
+ if (ets_exists && sum != 100) {
+ nfp_warn(app->cpp, "Failed to validate ETS BW: sum must be 100.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void nfp_nic_fill_ets(struct nfp_net *nn)
+{
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ writeb(dcb->tc2idx[dcb->prio2tc[i]],
+ dcb->dcbcfg_tbl + dcb->cfg_offset + NFP_DCB_DATA_OFF_PCP2IDX + i);
+ writeb(dcb->tc_tx_pct[i], dcb->dcbcfg_tbl +
+ dcb->cfg_offset + NFP_DCB_DATA_OFF_IDX_BW_PCT + dcb->tc2idx[i]);
+ writeb(nfp_tsa_ieee2nfp(dcb->tc_tsa[i]), dcb->dcbcfg_tbl +
+ dcb->cfg_offset + NFP_DCB_DATA_OFF_TSA + dcb->tc2idx[i]);
+ }
+}
+
+static void nfp_nic_ets_init(struct nfp_net *nn, u32 *update)
+{
+ struct nfp_dcb *dcb = get_dcb_priv(nn);
+
+ if (dcb->ets_init)
+ return;
+
+ nfp_nic_fill_ets(nn);
+ dcb->ets_init = true;
+ *update |= NFP_DCB_MSG_MSK_TSA | NFP_DCB_MSG_MSK_PCT | NFP_DCB_MSG_MSK_PCP;
+}
+
+static int nfp_nic_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ u32 update = 0;
+ bool change;
+ int err;
+
+ err = dcb_ets_check(dev, ets);
+ if (err)
+ return err;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ dcb->prio2tc[i] = ets->prio_tc[i];
+ dcb->tc_tx_pct[i] = ets->tc_tx_bw[i];
+ dcb->tc_tsa[i] = ets->tc_tsa[i];
+ }
+
+ change = nfp_refresh_tc2idx(nn);
+ nfp_nic_fill_ets(nn);
+ dcb->ets_init = true;
+ if (change || !dcb->rate_init) {
+ err = update_dscp_maxrate(dev, &update);
+ if (err) {
+ nfp_warn(app->cpp,
+ "nfp dcbnl ieee setets ERROR:%d.",
+ err);
+ return err;
+ }
+
+ dcb->rate_init = true;
+ }
+ nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update);
+ nfp_nic_set_trust(nn, &update);
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL,
+ update | NFP_DCB_MSG_MSK_TSA | NFP_DCB_MSG_MSK_PCT |
+ NFP_DCB_MSG_MSK_PCP);
+
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+}
+
+static int nfp_nic_dcbnl_ieee_getmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ maxrate->tc_maxrate[i] = dcb->tc_maxrate[i];
+
+ return 0;
+}
+
+static int nfp_nic_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ u32 update = 0;
+ int err;
+
+ err = nfp_fill_maxrate(nn, maxrate->tc_maxrate);
+ if (err) {
+ nfp_warn(app->cpp,
+ "nfp dcbnl ieee setmaxrate ERROR:%d.",
+ err);
+ return err;
+ }
+
+ dcb = get_dcb_priv(nn);
+
+ dcb->rate_init = true;
+ nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update);
+ nfp_nic_set_trust(nn, &update);
+ nfp_nic_ets_init(nn, &update);
+
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL,
+ update | NFP_DCB_MSG_MSK_RATE);
+
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+}
+
+static int nfp_nic_set_trust_status(struct nfp_net *nn, u8 status)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_dcb *dcb;
+ u32 update = 0;
+ int err;
+
+ dcb = get_dcb_priv(nn);
+ if (!dcb->rate_init) {
+ err = nfp_fill_maxrate(nn, dcb->tc_maxrate);
+ if (err)
+ return err;
+
+ update |= NFP_DCB_MSG_MSK_RATE;
+ dcb->rate_init = true;
+ }
+
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ nfp_nic_ets_init(nn, &update);
+ writeb(status, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_TRUST);
+ nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update);
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL,
+ update | NFP_DCB_MSG_MSK_TRUST);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+ if (err)
+ return err;
+
+ dcb->trust_status = status;
+
+ return 0;
+}
+
+static int nfp_nic_set_dscp2prio(struct nfp_net *nn, u8 dscp, u8 prio)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_dcb *dcb;
+ u8 idx, tc;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ dcb = get_dcb_priv(nn);
+
+ tc = dcb->prio2tc[prio];
+ idx = dcb->tc2idx[tc];
+
+ writeb(idx, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_DSCP2IDX + dscp);
+
+ nn_writel(nn, nn->tlv_caps.mbox_off +
+ NFP_NET_CFG_MBOX_SIMPLE_VAL, NFP_DCB_MSG_MSK_DSCP);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+ if (err)
+ return err;
+
+ dcb->dscp2prio[dscp] = prio;
+
+ return 0;
+}
+
+static int nfp_nic_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct dcb_app old_app;
+ struct nfp_dcb *dcb;
+ bool is_new;
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ dcb = get_dcb_priv(nn);
+
+ /* Save the old entry info */
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = dcb->dscp2prio[app->protocol];
+
+ /* Check trust status */
+ if (!dcb->dscp_cnt) {
+ err = nfp_nic_set_trust_status(nn, NFP_DCB_TRUST_DSCP);
+ if (err)
+ return err;
+ }
+
+ /* Check if the new mapping is same as old or in init stage */
+ if (app->priority != old_app.priority || app->priority == 0) {
+ err = nfp_nic_set_dscp2prio(nn, app->protocol, app->priority);
+ if (err)
+ return err;
+ }
+
+ /* Delete the old entry if exists */
+ is_new = !!dcb_ieee_delapp(dev, &old_app);
+
+ /* Add new entry and update counter */
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ if (is_new)
+ dcb->dscp_cnt++;
+
+ return 0;
+}
+
+static int nfp_nic_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ dcb = get_dcb_priv(nn);
+
+ /* Check if the dcb_app param match fw */
+ if (app->priority != dcb->dscp2prio[app->protocol])
+ return -ENOENT;
+
+ /* Set fw dscp mapping to 0 */
+ err = nfp_nic_set_dscp2prio(nn, app->protocol, 0);
+ if (err)
+ return err;
+
+ /* Delete app from dcb list */
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ /* Decrease dscp counter */
+ dcb->dscp_cnt--;
+
+ /* If no dscp mapping is configured, trust pcp */
+ if (dcb->dscp_cnt == 0)
+ return nfp_nic_set_trust_status(nn, NFP_DCB_TRUST_PCP);
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops nfp_nic_dcbnl_ops = {
+ /* ieee 802.1Qaz std */
+ .ieee_getets = nfp_nic_dcbnl_ieee_getets,
+ .ieee_setets = nfp_nic_dcbnl_ieee_setets,
+ .ieee_getmaxrate = nfp_nic_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = nfp_nic_dcbnl_ieee_setmaxrate,
+ .ieee_setapp = nfp_nic_dcbnl_ieee_setapp,
+ .ieee_delapp = nfp_nic_dcbnl_ieee_delapp,
+};
+
+int nfp_nic_dcb_init(struct nfp_net *nn)
+{
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ int err;
+
+ dcb = get_dcb_priv(nn);
+ dcb->cfg_offset = NFP_DCB_CFG_STRIDE * nn->id;
+ dcb->dcbcfg_tbl = nfp_pf_map_rtsym(app->pf, "net.dcbcfg_tbl",
+ "_abi_dcb_cfg",
+ dcb->cfg_offset, &dcb->dcbcfg_tbl_area);
+ if (IS_ERR(dcb->dcbcfg_tbl)) {
+ if (PTR_ERR(dcb->dcbcfg_tbl) != -ENOENT) {
+ err = PTR_ERR(dcb->dcbcfg_tbl);
+ dcb->dcbcfg_tbl = NULL;
+ nfp_err(app->cpp,
+ "Failed to map dcbcfg_tbl area, min_size %u.\n",
+ dcb->cfg_offset);
+ return err;
+ }
+ dcb->dcbcfg_tbl = NULL;
+ }
+
+ if (dcb->dcbcfg_tbl) {
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ dcb->prio2tc[i] = i;
+ dcb->tc2idx[i] = i;
+ dcb->tc_tx_pct[i] = 0;
+ dcb->tc_maxrate[i] = 0;
+ dcb->tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
+ }
+ dcb->trust_status = NFP_DCB_TRUST_INVALID;
+ dcb->rate_init = false;
+ dcb->ets_init = false;
+
+ nn->dp.netdev->dcbnl_ops = &nfp_nic_dcbnl_ops;
+ }
+
+ return 0;
+}
+
+void nfp_nic_dcb_clean(struct nfp_net *nn)
+{
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+ if (dcb->dcbcfg_tbl_area)
+ nfp_cpp_area_release_free(dcb->dcbcfg_tbl_area);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c
index aea8579206ee..9dd5afe37f6e 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.c
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.c
@@ -5,6 +5,8 @@
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "main.h"
static int nfp_nic_init(struct nfp_app *app)
{
@@ -28,13 +30,50 @@ static void nfp_nic_sriov_disable(struct nfp_app *app)
{
}
+static int nfp_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn)
+{
+ return nfp_nic_dcb_init(nn);
+}
+
+static void nfp_nic_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
+{
+ nfp_nic_dcb_clean(nn);
+}
+
+static int nfp_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id)
+{
+ struct nfp_app_nic_private *app_pri = nn->app_priv;
+ int err;
+
+ err = nfp_app_nic_vnic_alloc(app, nn, id);
+ if (err)
+ return err;
+
+ if (sizeof(*app_pri)) {
+ nn->app_priv = kzalloc(sizeof(*app_pri), GFP_KERNEL);
+ if (!nn->app_priv)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nfp_nic_vnic_free(struct nfp_app *app, struct nfp_net *nn)
+{
+ kfree(nn->app_priv);
+}
+
const struct nfp_app_type app_nic = {
.id = NFP_APP_CORE_NIC,
.name = "nic",
.init = nfp_nic_init,
- .vnic_alloc = nfp_app_nic_vnic_alloc,
-
+ .vnic_alloc = nfp_nic_vnic_alloc,
+ .vnic_free = nfp_nic_vnic_free,
.sriov_enable = nfp_nic_sriov_enable,
.sriov_disable = nfp_nic_sriov_disable,
+
+ .vnic_init = nfp_nic_vnic_init,
+ .vnic_clean = nfp_nic_vnic_clean,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.h b/drivers/net/ethernet/netronome/nfp/nic/main.h
new file mode 100644
index 000000000000..094374df42b8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2023 Corigine, Inc. */
+
+#ifndef __NFP_NIC_H__
+#define __NFP_NIC_H__ 1
+
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_DCB
+/* DCB feature definitions */
+#define NFP_NET_MAX_DSCP 4
+#define NFP_NET_MAX_TC IEEE_8021QAZ_MAX_TCS
+#define NFP_NET_MAX_PRIO 8
+#define NFP_DCB_CFG_STRIDE 256
+
+struct nfp_dcb {
+ u8 dscp2prio[NFP_NET_MAX_DSCP];
+ u8 prio2tc[NFP_NET_MAX_PRIO];
+ u8 tc2idx[IEEE_8021QAZ_MAX_TCS];
+ u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS];
+ u8 tc_tx_pct[IEEE_8021QAZ_MAX_TCS];
+ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
+ u8 dscp_cnt;
+ u8 trust_status;
+ bool rate_init;
+ bool ets_init;
+
+ struct nfp_cpp_area *dcbcfg_tbl_area;
+ u8 __iomem *dcbcfg_tbl;
+ u32 cfg_offset;
+};
+
+int nfp_nic_dcb_init(struct nfp_net *nn);
+void nfp_nic_dcb_clean(struct nfp_net *nn);
+#else
+static inline int nfp_nic_dcb_init(struct nfp_net *nn) { return 0; }
+static inline void nfp_nic_dcb_clean(struct nfp_net *nn) {}
+#endif
+
+struct nfp_app_nic_private {
+#ifdef CONFIG_DCB
+ struct nfp_dcb dcb;
+#endif
+};
+
+#endif
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 62320be4de5a..56e02cba0b8a 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1081,40 +1081,59 @@ static const struct ethtool_ops nixge_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+static int nixge_mdio_read_c22(struct mii_bus *bus, int phy_id, int reg)
{
struct nixge_priv *priv = bus->priv;
u32 status, tmp;
int err;
u16 device;
- if (reg & MII_ADDR_C45) {
- device = (reg >> 16) & 0x1f;
+ device = reg & 0x1f;
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
+ tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
- tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
- | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting read command");
+ return err;
+ }
- err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
- !status, 10, 1000);
- if (err) {
- dev_err(priv->dev, "timeout setting address");
- return err;
- }
+ status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
- tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
- NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
- } else {
- device = reg & 0x1f;
+ return status;
+}
- tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
- NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+static int nixge_mdio_read_c45(struct mii_bus *bus, int phy_id, int device,
+ int reg)
+{
+ struct nixge_priv *priv = bus->priv;
+ u32 status, tmp;
+ int err;
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
+
+ tmp = NIXGE_MDIO_CLAUSE45 |
+ NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting address");
+ return err;
}
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
@@ -1130,57 +1149,65 @@ static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
return status;
}
-static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+static int nixge_mdio_write_c22(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
{
struct nixge_priv *priv = bus->priv;
u32 status, tmp;
u16 device;
int err;
- if (reg & MII_ADDR_C45) {
- device = (reg >> 16) & 0x1f;
+ device = reg & 0x1f;
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
+ tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
- tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
- | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err)
+ dev_err(priv->dev, "timeout setting write command");
- err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
- !status, 10, 1000);
- if (err) {
- dev_err(priv->dev, "timeout setting address");
- return err;
- }
+ return err;
+}
- tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
- | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+static int nixge_mdio_write_c45(struct mii_bus *bus, int phy_id,
+ int device, int reg, u16 val)
+{
+ struct nixge_priv *priv = bus->priv;
+ u32 status, tmp;
+ int err;
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
- err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
- !status, 10, 1000);
- if (err)
- dev_err(priv->dev, "timeout setting write command");
- } else {
- device = reg & 0x1f;
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
- tmp = NIXGE_MDIO_CLAUSE22 |
- NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
- NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ tmp = NIXGE_MDIO_CLAUSE45 |
+ NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
- err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
- !status, 10, 1000);
- if (err)
- dev_err(priv->dev, "timeout setting write command");
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting address");
+ return err;
}
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err)
+ dev_err(priv->dev, "timeout setting write command");
+
return err;
}
@@ -1195,8 +1222,10 @@ static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
bus->priv = priv;
bus->name = "nixge_mii_bus";
- bus->read = nixge_mdio_read;
- bus->write = nixge_mdio_write;
+ bus->read = nixge_mdio_read_c22;
+ bus->write = nixge_mdio_write_c22;
+ bus->read_c45 = nixge_mdio_read_c45;
+ bus->write_c45 = nixge_mdio_write_c45;
bus->parent = priv->dev;
priv->mii_bus = bus;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index ce436e97324a..e508f8eb43bf 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -121,7 +121,7 @@ static void ionic_vf_dealloc_locked(struct ionic *ionic)
if (v->stats_pa) {
vfc.stats_pa = 0;
- (void)ionic_set_vf_config(ionic, i, &vfc);
+ ionic_set_vf_config(ionic, i, &vfc);
dma_unmap_single(ionic->dev, v->stats_pa,
sizeof(v->stats), DMA_FROM_DEVICE);
v->stats_pa = 0;
@@ -169,7 +169,7 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
/* ignore failures from older FW, we just won't get stats */
vfc.stats_pa = cpu_to_le64(v->stats_pa);
- (void)ionic_set_vf_config(ionic, i, &vfc);
+ ionic_set_vf_config(ionic, i, &vfc);
}
out:
@@ -352,6 +352,7 @@ err_out_port_reset:
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
+ ionic_dev_teardown(ionic);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
@@ -390,6 +391,7 @@ static void ionic_remove(struct pci_dev *pdev)
ionic_port_reset(ionic);
ionic_reset(ionic);
+ ionic_dev_teardown(ionic);
pci_clear_master(pdev);
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d911f4fd9af6..c06576f43916 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -92,6 +92,7 @@ int ionic_dev_setup(struct ionic *ionic)
unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev;
+ int size;
u32 sig;
/* BAR0: dev_cmd and interrupts */
@@ -133,9 +134,36 @@ int ionic_dev_setup(struct ionic *ionic)
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
+ /* BAR2: optional controller memory mapping */
+ bar++;
+ mutex_init(&idev->cmb_inuse_lock);
+ if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
+ idev->cmb_inuse = NULL;
+ return 0;
+ }
+
+ idev->phy_cmb_pages = bar->bus_addr;
+ idev->cmb_npages = bar->len / PAGE_SIZE;
+ size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
+ idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
+ if (!idev->cmb_inuse)
+ dev_warn(dev, "No memory for CMB, disabling\n");
+
return 0;
}
+void ionic_dev_teardown(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+
+ kfree(idev->cmb_inuse);
+ idev->cmb_inuse = NULL;
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
+
+ mutex_destroy(&idev->cmb_inuse_lock);
+}
+
/* Devcmd Interface */
bool ionic_is_fw_running(struct ionic_dev *idev)
{
@@ -571,6 +599,33 @@ int ionic_db_page_num(struct ionic_lif *lif, int pid)
return (lif->hw_index * lif->dbid_count) + pid;
}
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+ int ret;
+
+ mutex_lock(&idev->cmb_inuse_lock);
+ ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
+ mutex_unlock(&idev->cmb_inuse_lock);
+
+ if (ret < 0)
+ return ret;
+
+ *pgid = ret;
+ *pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
+
+ return 0;
+}
+
+void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+
+ mutex_lock(&idev->cmb_inuse_lock);
+ bitmap_release_region(idev->cmb_inuse, pgid, order);
+ mutex_unlock(&idev->cmb_inuse_lock);
+}
+
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size)
@@ -679,6 +734,18 @@ void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
cur->desc = base + (i * q->desc_size);
}
+void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
+{
+ struct ionic_desc_info *cur;
+ unsigned int i;
+
+ q->cmb_base = base;
+ q->cmb_base_pa = base_pa;
+
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ cur->cmb_desc = base + (i * q->desc_size);
+}
+
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
{
struct ionic_desc_info *cur;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index bce3ca38669b..0bea208bfba2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -159,6 +159,11 @@ struct ionic_dev {
struct ionic_intr __iomem *intr_ctrl;
u64 __iomem *intr_status;
+ struct mutex cmb_inuse_lock; /* for cmb_inuse */
+ unsigned long *cmb_inuse;
+ dma_addr_t phy_cmb_pages;
+ u32 cmb_npages;
+
u32 port_info_sz;
struct ionic_port_info *port_info;
dma_addr_t port_info_pa;
@@ -203,6 +208,7 @@ struct ionic_desc_info {
struct ionic_rxq_desc *rxq_desc;
struct ionic_admin_cmd *adminq_desc;
};
+ void __iomem *cmb_desc;
union {
void *sg_desc;
struct ionic_txq_sg_desc *txq_sg_desc;
@@ -241,12 +247,14 @@ struct ionic_queue {
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
+ void __iomem *cmb_base;
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl;
};
dma_addr_t base_pa;
+ dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
unsigned int desc_size;
unsigned int sg_desc_size;
@@ -309,6 +317,7 @@ static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
void ionic_init_devinfo(struct ionic *ionic);
int ionic_dev_setup(struct ionic *ionic);
+void ionic_dev_teardown(struct ionic *ionic);
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
u8 ionic_dev_cmd_status(struct ionic_dev *idev);
@@ -344,6 +353,9 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
int ionic_db_page_num(struct ionic_lif *lif, int pid);
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
+void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size);
@@ -360,6 +372,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid);
void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
+void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 01c22701482d..cf33503468a3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -511,6 +511,87 @@ static int ionic_set_coalesce(struct net_device *netdev,
return 0;
}
+static int ionic_validate_cmb_config(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ int pages_have, pages_required = 0;
+ unsigned long sz;
+
+ if (!lif->ionic->idev.cmb_inuse &&
+ (qparam->cmb_tx || qparam->cmb_rx)) {
+ netdev_info(lif->netdev, "CMB rings are not supported on this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (qparam->cmb_tx) {
+ if (!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_CMB)) {
+ netdev_info(lif->netdev,
+ "CMB rings for tx-push are not supported on this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ sz = sizeof(struct ionic_txq_desc) * qparam->ntxq_descs * qparam->nxqs;
+ pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
+ }
+
+ if (qparam->cmb_rx) {
+ if (!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_CMB)) {
+ netdev_info(lif->netdev,
+ "CMB rings for rx-push are not supported on this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ sz = sizeof(struct ionic_rxq_desc) * qparam->nrxq_descs * qparam->nxqs;
+ pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
+ }
+
+ pages_have = lif->ionic->bars[IONIC_PCI_BAR_CMB].len / PAGE_SIZE;
+ if (pages_required > pages_have) {
+ netdev_info(lif->netdev,
+ "Not enough CMB pages for number of queues and size of descriptor rings, need %d have %d",
+ pages_required, pages_have);
+ return -ENOMEM;
+ }
+
+ return pages_required;
+}
+
+static int ionic_cmb_rings_toggle(struct ionic_lif *lif, bool cmb_tx, bool cmb_rx)
+{
+ struct ionic_queue_params qparam;
+ int pages_used;
+
+ if (netif_running(lif->netdev)) {
+ netdev_info(lif->netdev, "Please stop device to toggle CMB for tx/rx-push\n");
+ return -EBUSY;
+ }
+
+ ionic_init_queue_params(lif, &qparam);
+ qparam.cmb_tx = cmb_tx;
+ qparam.cmb_rx = cmb_rx;
+ pages_used = ionic_validate_cmb_config(lif, &qparam);
+ if (pages_used < 0)
+ return pages_used;
+
+ if (cmb_tx)
+ set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+
+ if (cmb_rx)
+ set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
+
+ if (cmb_tx || cmb_rx)
+ netdev_info(lif->netdev, "Enabling CMB %s %s rings - %d pages\n",
+ cmb_tx ? "TX" : "", cmb_rx ? "RX" : "", pages_used);
+ else
+ netdev_info(lif->netdev, "Disabling CMB rings\n");
+
+ return 0;
+}
+
static void ionic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -522,6 +603,8 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->tx_pending = lif->ntxq_descs;
ring->rx_max_pending = IONIC_MAX_RX_DESC;
ring->rx_pending = lif->nrxq_descs;
+ kernel_ring->tx_push = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+ kernel_ring->rx_push = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static int ionic_set_ringparam(struct net_device *netdev,
@@ -551,9 +634,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
/* if nothing to do return success */
if (ring->tx_pending == lif->ntxq_descs &&
- ring->rx_pending == lif->nrxq_descs)
+ ring->rx_pending == lif->nrxq_descs &&
+ kernel_ring->tx_push == test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) &&
+ kernel_ring->rx_push == test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
return 0;
+ qparam.ntxq_descs = ring->tx_pending;
+ qparam.nrxq_descs = ring->rx_pending;
+ qparam.cmb_tx = kernel_ring->tx_push;
+ qparam.cmb_rx = kernel_ring->rx_push;
+
+ err = ionic_validate_cmb_config(lif, &qparam);
+ if (err < 0)
+ return err;
+
+ if (kernel_ring->tx_push != test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) ||
+ kernel_ring->rx_push != test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) {
+ err = ionic_cmb_rings_toggle(lif, kernel_ring->tx_push,
+ kernel_ring->rx_push);
+ if (err < 0)
+ return err;
+ }
+
if (ring->tx_pending != lif->ntxq_descs)
netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
lif->ntxq_descs, ring->tx_pending);
@@ -569,9 +671,6 @@ static int ionic_set_ringparam(struct net_device *netdev,
return 0;
}
- qparam.ntxq_descs = ring->tx_pending;
- qparam.nrxq_descs = ring->rx_pending;
-
mutex_lock(&lif->queue_lock);
err = ionic_reconfigure_queues(lif, &qparam);
mutex_unlock(&lif->queue_lock);
@@ -638,7 +737,7 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->combined_count);
qparam.nxqs = ch->combined_count;
- qparam.intr_split = 0;
+ qparam.intr_split = false;
} else {
max_cnt /= 2;
if (ch->rx_count > max_cnt)
@@ -654,9 +753,13 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->rx_count);
qparam.nxqs = ch->rx_count;
- qparam.intr_split = 1;
+ qparam.intr_split = true;
}
+ err = ionic_validate_cmb_config(lif, &qparam);
+ if (err < 0)
+ return err;
+
/* if we're not running, just set the values and return */
if (!netif_running(lif->netdev)) {
lif->nxqs = qparam.nxqs;
@@ -965,6 +1068,8 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
+ .supported_ring_params = ETHTOOL_RING_USE_TX_PUSH |
+ ETHTOOL_RING_USE_RX_PUSH,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index eac09b2375b8..9a1825edf0d0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -3073,9 +3073,10 @@ union ionic_adminq_comp {
#define IONIC_BARS_MAX 6
#define IONIC_PCI_BAR_DBELL 1
+#define IONIC_PCI_BAR_CMB 2
-/* BAR0 */
#define IONIC_BAR0_SIZE 0x8000
+#define IONIC_BAR2_SIZE 0x800000
#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000
#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 63a78a9ac241..957027e546b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -26,9 +26,12 @@
static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
[IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
[IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
- [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
- [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
- * 1 = ... with Tx SG version 1
+ [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support
+ * 2 = ... with CMB rings
+ */
+ [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support
+ * 1 = ... with Tx SG version 1
+ * 3 = ... with CMB rings
*/
};
@@ -149,7 +152,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
mutex_lock(&lif->queue_lock);
err = ionic_start_queues(lif);
if (err && err != -EBUSY) {
- netdev_err(lif->netdev,
+ netdev_err(netdev,
"Failed to start queues: %d\n", err);
set_bit(IONIC_LIF_F_BROKEN, lif->state);
netif_carrier_off(lif->netdev);
@@ -397,6 +400,15 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->q_base_pa = 0;
}
+ if (qcq->cmb_q_base) {
+ iounmap(qcq->cmb_q_base);
+ ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order);
+ qcq->cmb_pgid = 0;
+ qcq->cmb_order = 0;
+ qcq->cmb_q_base = NULL;
+ qcq->cmb_q_base_pa = 0;
+ }
+
if (qcq->cq_base) {
dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
qcq->cq_base = NULL;
@@ -608,6 +620,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_cq_map(&new->cq, cq_base, cq_base_pa);
ionic_cq_bind(&new->cq, &new->q);
} else {
+ /* regular DMA q descriptors */
new->q_size = PAGE_SIZE + (num_descs * desc_size);
new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
GFP_KERNEL);
@@ -620,6 +633,33 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
ionic_q_map(&new->q, q_base, q_base_pa);
+ if (flags & IONIC_QCQ_F_CMB_RINGS) {
+ /* on-chip CMB q descriptors */
+ new->cmb_q_size = num_descs * desc_size;
+ new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
+
+ err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
+ new->cmb_order);
+ if (err) {
+ netdev_err(lif->netdev,
+ "Cannot allocate queue order %d from cmb: err %d\n",
+ new->cmb_order, err);
+ goto err_out_free_q;
+ }
+
+ new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size);
+ if (!new->cmb_q_base) {
+ netdev_err(lif->netdev, "Cannot map queue from cmb\n");
+ ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
+ err = -ENOMEM;
+ goto err_out_free_q;
+ }
+
+ new->cmb_q_base_pa -= idev->phy_cmb_pages;
+ ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
+ }
+
+ /* cq DMA descriptors */
new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
GFP_KERNEL);
@@ -658,6 +698,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err_out_free_cq:
dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
err_out_free_q:
+ if (new->cmb_q_base) {
+ iounmap(new->cmb_q_base);
+ ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
+ }
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
err_out_free_cq_info:
vfree(new->cq.info);
@@ -739,6 +783,8 @@ static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
qcq->cq.tail_idx = 0;
qcq->cq.done_color = 1;
memset(qcq->q_base, 0, qcq->q_size);
+ if (qcq->cmb_q_base)
+ memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size);
memset(qcq->cq_base, 0, qcq->cq_size);
memset(qcq->sg_base, 0, qcq->sg_size);
}
@@ -758,6 +804,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
+ .intr_index = cpu_to_le16(qcq->intr.index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
.ring_base = cpu_to_le64(q->base_pa),
@@ -766,17 +813,19 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.features = cpu_to_le64(q->features),
},
};
- unsigned int intr_index;
int err;
- intr_index = qcq->intr.index;
-
- ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
+ if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
+ ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
+ ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
+ }
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base);
+ dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base);
dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
@@ -834,6 +883,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
};
int err;
+ if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
+ ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
+ ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
+ }
+
dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
@@ -2010,8 +2064,13 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
+
+ if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state))
+ flags |= IONIC_QCQ_F_CMB_RINGS;
+
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
flags |= IONIC_QCQ_F_INTR;
+
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
@@ -2032,6 +2091,9 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
+ if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
+ flags |= IONIC_QCQ_F_CMB_RINGS;
+
num_desc = lif->nrxq_descs;
desc_sz = sizeof(struct ionic_rxq_desc);
comp_sz = sizeof(struct ionic_rxq_comp);
@@ -2507,7 +2569,7 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
- lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
+ ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
up_write(&ionic->vf_op_lock);
@@ -2707,6 +2769,55 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_stats = ionic_get_vf_stats,
};
+static int ionic_cmb_reconfig(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ struct ionic_queue_params start_qparams;
+ int err = 0;
+
+ /* When changing CMB queue parameters, we're using limited
+ * on-device memory and don't have extra memory to use for
+ * duplicate allocations, so we free it all first then
+ * re-allocate with the new parameters.
+ */
+
+ /* Checkpoint for possible unwind */
+ ionic_init_queue_params(lif, &start_qparams);
+
+ /* Stop and free the queues */
+ ionic_stop_queues_reconfig(lif);
+ ionic_txrx_free(lif);
+
+ /* Set up new qparams */
+ ionic_set_queue_params(lif, qparam);
+
+ if (netif_running(lif->netdev)) {
+ /* Alloc and start the new configuration */
+ err = ionic_txrx_alloc(lif);
+ if (err) {
+ dev_warn(lif->ionic->dev,
+ "CMB reconfig failed, restoring values: %d\n", err);
+
+ /* Back out the changes */
+ ionic_set_queue_params(lif, &start_qparams);
+ err = ionic_txrx_alloc(lif);
+ if (err) {
+ dev_err(lif->ionic->dev,
+ "CMB restore failed: %d\n", err);
+ goto errout;
+ }
+ }
+
+ ionic_start_queues_reconfig(lif);
+ } else {
+ /* This was detached in ionic_stop_queues_reconfig() */
+ netif_device_attach(lif->netdev);
+ }
+
+errout:
+ return err;
+}
+
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{
/* only swapping the queues, not the napi, flags, or other stuff */
@@ -2749,6 +2860,11 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
unsigned int flags, i;
int err = 0;
+ /* Are we changing q params while CMB is on */
+ if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) ||
+ (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx))
+ return ionic_cmb_reconfig(lif, qparam);
+
/* allocate temporary qcq arrays to hold new queue structs */
if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
@@ -2785,6 +2901,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
for (i = 0; i < qparam->nxqs; i++) {
+ /* If missing, short placeholder qcq needed for swap */
+ if (!lif->txqcqs[i]) {
+ flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
+ 4, desc_sz, comp_sz, sg_desc_sz,
+ lif->kern_pid, &lif->txqcqs[i]);
+ if (err)
+ goto err_out;
+ }
+
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
@@ -2804,6 +2930,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
comp_sz *= 2;
for (i = 0; i < qparam->nxqs; i++) {
+ /* If missing, short placeholder qcq needed for swap */
+ if (!lif->rxqcqs[i]) {
+ flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
+ 4, desc_sz, comp_sz, sg_desc_sz,
+ lif->kern_pid, &lif->rxqcqs[i]);
+ if (err)
+ goto err_out;
+ }
+
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
@@ -2853,10 +2989,15 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
}
- /* clear existing interrupt assignments */
+ /* Clear existing interrupt assignments. We check for NULL here
+ * because we're checking the whole array for potential qcqs, not
+ * just those qcqs that have just been set up.
+ */
for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
- ionic_qcq_intr_free(lif, lif->txqcqs[i]);
- ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
+ if (lif->txqcqs[i])
+ ionic_qcq_intr_free(lif, lif->txqcqs[i]);
+ if (lif->rxqcqs[i])
+ ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
}
/* re-assign the interrupts */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 734519895614..c9c4c46d5a16 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -59,6 +59,7 @@ struct ionic_rx_stats {
#define IONIC_QCQ_F_TX_STATS BIT(3)
#define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5)
+#define IONIC_QCQ_F_CMB_RINGS BIT(6)
struct ionic_qcq {
void *q_base;
@@ -70,6 +71,11 @@ struct ionic_qcq {
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
+ void __iomem *cmb_q_base;
+ phys_addr_t cmb_q_base_pa;
+ u32 cmb_q_size;
+ u32 cmb_pgid;
+ u32 cmb_order;
struct dim dim;
struct ionic_queue q;
struct ionic_cq cq;
@@ -142,6 +148,8 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR,
IONIC_LIF_F_RX_DIM_INTR,
+ IONIC_LIF_F_CMB_TX_RINGS,
+ IONIC_LIF_F_CMB_RX_RINGS,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
@@ -245,8 +253,10 @@ struct ionic_queue_params {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
- unsigned int intr_split;
u64 rxq_features;
+ bool intr_split;
+ bool cmb_tx;
+ bool cmb_rx;
};
static inline void ionic_init_queue_params(struct ionic_lif *lif,
@@ -255,8 +265,34 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->nxqs = lif->nxqs;
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
- qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->rxq_features = lif->rxq_features;
+ qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+ qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
+}
+
+static inline void ionic_set_queue_params(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ lif->nxqs = qparam->nxqs;
+ lif->ntxq_descs = qparam->ntxq_descs;
+ lif->nrxq_descs = qparam->nrxq_descs;
+ lif->rxq_features = qparam->rxq_features;
+
+ if (qparam->intr_split)
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+
+ if (qparam->cmb_tx)
+ set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+
+ if (qparam->cmb_rx)
+ set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 08c42b039d92..1dc79cecc5cc 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -388,7 +388,7 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
break;
/* force a check of FW status and break out if FW reset */
- (void)ionic_heartbeat_check(lif->ionic);
+ ionic_heartbeat_check(lif->ionic);
if ((test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
!lif->ionic->idev.fw_status_ready) ||
test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
@@ -676,7 +676,7 @@ int ionic_port_init(struct ionic *ionic)
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP);
- (void)ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index 887046838b3b..eac2f0e3576e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -268,7 +268,7 @@ static u64 ionic_hwstamp_read(struct ionic *ionic,
u32 tick_high_before, tick_high, tick_low;
/* read and discard low part to defeat hw staging of high part */
- (void)ioread32(&ionic->idev.hwstamp_regs->tick_low);
+ ioread32(&ionic->idev.hwstamp_regs->tick_low);
tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index b7363376dfc8..1ee2f285cb42 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -604,14 +604,14 @@ loop_out:
* they can clear room for some new filters
*/
list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
- (void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
+ ionic_lif_filter_del(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
- (void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
+ ionic_lif_filter_add(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index f761780f0162..26798fc635db 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -402,6 +402,14 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
+static inline void ionic_write_cmb_desc(struct ionic_queue *q,
+ void __iomem *cmb_desc,
+ void *desc)
+{
+ if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
+ memcpy_toio(cmb_desc, desc, q->desc_size);
+}
+
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
@@ -480,6 +488,8 @@ void ionic_rx_fill(struct ionic_queue *q)
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->nbufs = nfrags;
+ ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+
ionic_rxq_post(q, false, ionic_rx_clean, NULL);
}
@@ -943,7 +953,8 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
return 0;
}
-static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
+static void ionic_tx_tso_post(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
@@ -951,6 +962,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
+ struct ionic_txq_desc *desc = desc_info->desc;
u8 flags = 0;
u64 cmd;
@@ -966,6 +978,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss);
+ ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+
if (start) {
skb_tx_timestamp(skb);
if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
@@ -1084,7 +1098,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
- ionic_tx_tso_post(q, desc, skb,
+ ionic_tx_tso_post(q, desc_info, skb,
desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done);
@@ -1133,6 +1147,8 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
+ ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+
if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
else
@@ -1170,6 +1186,8 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = 0;
desc->csum_offset = 0;
+ ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+
stats->csum_none++;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 922c47797af6..be5cc8b79bd5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -198,7 +198,6 @@ static const struct devlink_ops qed_dl_ops = {
struct devlink *qed_devlink_register(struct qed_dev *cdev)
{
- union devlink_param_value value;
struct qed_devlink *qdevlink;
struct devlink *dl;
int rc;
@@ -216,11 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
if (rc)
goto err_unregister;
- value.vbool = false;
- devlink_param_driverinit_value_set(dl,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
- value);
-
cdev->iwarp_cmt = false;
qed_fw_reporters_create(dl);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 0848b5529d48..2bf18748581d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -831,7 +831,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
* @p_hwfn: HW device data.
* @p_ptt: PTT window for writing the registers.
* @vf: VF info data.
- * @enable: The actual permision for this VF.
+ * @enable: The actual permission for this VF.
*
* In E4, queue zone permission table size is 320x9. There
* are 320 VF queues for single engine device (256 for dual
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 953f304b8588..4a3c3b5fb4a1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -892,6 +892,9 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->hw_features = hw_features;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
/* MTU range: 46 - 9600 */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
@@ -970,8 +973,15 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err;
}
- mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
- sizeof(*edev->coal_entry), GFP_KERNEL);
+ if (!edev->coal_entry) {
+ mem = kcalloc(QEDE_MAX_RSS_CNT(edev),
+ sizeof(*edev->coal_entry), GFP_KERNEL);
+ } else {
+ mem = krealloc(edev->coal_entry,
+ QEDE_QUEUE_CNT(edev) * sizeof(*edev->coal_entry),
+ GFP_KERNEL);
+ }
+
if (!mem) {
DP_ERR(edev, "coalesce entry allocation failed\n");
kfree(edev->coal_entry);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 27b1663c476e..39d24e07f306 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -12,6 +12,7 @@
#include "rmnet_handlers.h"
#include "rmnet_vnd.h"
#include "rmnet_private.h"
+#include "rmnet_map.h"
/* Local Definitions and Declarations */
@@ -39,6 +40,8 @@ static int rmnet_unregister_real_device(struct net_device *real_dev)
if (port->nr_rmnet_devs)
return -EINVAL;
+ rmnet_map_tx_aggregate_exit(port);
+
netdev_rx_handler_unregister(real_dev);
kfree(port);
@@ -79,6 +82,8 @@ static int rmnet_register_real_device(struct net_device *real_dev,
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
+ rmnet_map_tx_aggregate_init(port);
+
netdev_dbg(real_dev, "registered with rmnet\n");
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 3d3cba56c516..ed112d51ac5a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -6,6 +6,7 @@
*/
#include <linux/skbuff.h>
+#include <linux/time.h>
#include <net/gro_cells.h>
#ifndef _RMNET_CONFIG_H_
@@ -19,6 +20,12 @@ struct rmnet_endpoint {
struct hlist_node hlnode;
};
+struct rmnet_egress_agg_params {
+ u32 bytes;
+ u32 count;
+ u64 time_nsec;
+};
+
/* One instance of this structure is instantiated for each real_dev associated
* with rmnet.
*/
@@ -30,6 +37,19 @@ struct rmnet_port {
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
struct net_device *rmnet_dev;
+
+ /* Egress aggregation information */
+ struct rmnet_egress_agg_params egress_agg_params;
+ /* Protect aggregation related elements */
+ spinlock_t agg_lock;
+ struct sk_buff *skbagg_head;
+ struct sk_buff *skbagg_tail;
+ int agg_state;
+ u8 agg_count;
+ struct timespec64 agg_time;
+ struct timespec64 agg_last;
+ struct hrtimer hrtimer;
+ struct work_struct agg_wq;
};
extern struct rtnl_link_ops rmnet_link_ops;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index a313242a762e..9f3479500f85 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -164,8 +164,18 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
map_header->mux_id = mux_id;
- skb->protocol = htons(ETH_P_MAP);
+ if (READ_ONCE(port->egress_agg_params.count) > 1) {
+ unsigned int len;
+
+ len = rmnet_map_tx_aggregate(skb, port, orig_dev);
+ if (likely(len)) {
+ rmnet_vnd_tx_fixup_len(len, orig_dev);
+ return -EINPROGRESS;
+ }
+ return -ENOMEM;
+ }
+ skb->protocol = htons(ETH_P_MAP);
return 0;
}
@@ -235,6 +245,7 @@ void rmnet_egress_handler(struct sk_buff *skb)
struct rmnet_port *port;
struct rmnet_priv *priv;
u8 mux_id;
+ int err;
sk_pacing_shift_update(skb->sk, 8);
@@ -247,8 +258,11 @@ void rmnet_egress_handler(struct sk_buff *skb)
if (!port)
goto drop;
- if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
+ err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
+ if (err == -ENOMEM)
goto drop;
+ else if (err == -EINPROGRESS)
+ return;
rmnet_vnd_tx_fixup(skb, orig_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 2b033060fc20..b70284095568 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -53,5 +53,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
int csum_type);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);
+unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
+ struct net_device *orig_dev);
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
+ u32 count, u32 time);
#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index ba194698cc14..a5e3d1a88305 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -12,6 +12,7 @@
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
+#include "rmnet_vnd.h"
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
@@ -518,3 +519,193 @@ int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
return 0;
}
+
+#define RMNET_AGG_BYPASS_TIME_NSEC 10000000L
+
+static void reset_aggr_params(struct rmnet_port *port)
+{
+ port->skbagg_head = NULL;
+ port->agg_count = 0;
+ port->agg_state = 0;
+ memset(&port->agg_time, 0, sizeof(struct timespec64));
+}
+
+static void rmnet_send_skb(struct rmnet_port *port, struct sk_buff *skb)
+{
+ if (skb_needs_linearize(skb, port->dev->features)) {
+ if (unlikely(__skb_linearize(skb))) {
+ struct rmnet_priv *priv;
+
+ priv = netdev_priv(port->rmnet_dev);
+ this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
+
+ dev_queue_xmit(skb);
+}
+
+static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ struct rmnet_port *port;
+
+ port = container_of(work, struct rmnet_port, agg_wq);
+
+ spin_lock_bh(&port->agg_lock);
+ if (likely(port->agg_state == -EINPROGRESS)) {
+ /* Buffer may have already been shipped out */
+ if (likely(port->skbagg_head)) {
+ skb = port->skbagg_head;
+ reset_aggr_params(port);
+ }
+ port->agg_state = 0;
+ }
+
+ spin_unlock_bh(&port->agg_lock);
+ if (skb)
+ rmnet_send_skb(port, skb);
+}
+
+static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
+{
+ struct rmnet_port *port;
+
+ port = container_of(t, struct rmnet_port, hrtimer);
+
+ schedule_work(&port->agg_wq);
+
+ return HRTIMER_NORESTART;
+}
+
+unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
+ struct net_device *orig_dev)
+{
+ struct timespec64 diff, last;
+ unsigned int len = skb->len;
+ struct sk_buff *agg_skb;
+ int size;
+
+ spin_lock_bh(&port->agg_lock);
+ memcpy(&last, &port->agg_last, sizeof(struct timespec64));
+ ktime_get_real_ts64(&port->agg_last);
+
+ if (!port->skbagg_head) {
+ /* Check to see if we should agg first. If the traffic is very
+ * sparse, don't aggregate.
+ */
+new_packet:
+ diff = timespec64_sub(port->agg_last, last);
+ size = port->egress_agg_params.bytes - skb->len;
+
+ if (size < 0) {
+ /* dropped */
+ spin_unlock_bh(&port->agg_lock);
+ return 0;
+ }
+
+ if (diff.tv_sec > 0 || diff.tv_nsec > RMNET_AGG_BYPASS_TIME_NSEC ||
+ size == 0)
+ goto no_aggr;
+
+ port->skbagg_head = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
+ if (!port->skbagg_head)
+ goto no_aggr;
+
+ dev_kfree_skb_any(skb);
+ port->skbagg_head->protocol = htons(ETH_P_MAP);
+ port->agg_count = 1;
+ ktime_get_real_ts64(&port->agg_time);
+ skb_frag_list_init(port->skbagg_head);
+ goto schedule;
+ }
+ diff = timespec64_sub(port->agg_last, port->agg_time);
+ size = port->egress_agg_params.bytes - port->skbagg_head->len;
+
+ if (skb->len > size) {
+ agg_skb = port->skbagg_head;
+ reset_aggr_params(port);
+ spin_unlock_bh(&port->agg_lock);
+ hrtimer_cancel(&port->hrtimer);
+ rmnet_send_skb(port, agg_skb);
+ spin_lock_bh(&port->agg_lock);
+ goto new_packet;
+ }
+
+ if (skb_has_frag_list(port->skbagg_head))
+ port->skbagg_tail->next = skb;
+ else
+ skb_shinfo(port->skbagg_head)->frag_list = skb;
+
+ port->skbagg_head->len += skb->len;
+ port->skbagg_head->data_len += skb->len;
+ port->skbagg_head->truesize += skb->truesize;
+ port->skbagg_tail = skb;
+ port->agg_count++;
+
+ if (diff.tv_sec > 0 || diff.tv_nsec > port->egress_agg_params.time_nsec ||
+ port->agg_count >= port->egress_agg_params.count ||
+ port->skbagg_head->len == port->egress_agg_params.bytes) {
+ agg_skb = port->skbagg_head;
+ reset_aggr_params(port);
+ spin_unlock_bh(&port->agg_lock);
+ hrtimer_cancel(&port->hrtimer);
+ rmnet_send_skb(port, agg_skb);
+ return len;
+ }
+
+schedule:
+ if (!hrtimer_active(&port->hrtimer) && port->agg_state != -EINPROGRESS) {
+ port->agg_state = -EINPROGRESS;
+ hrtimer_start(&port->hrtimer,
+ ns_to_ktime(port->egress_agg_params.time_nsec),
+ HRTIMER_MODE_REL);
+ }
+ spin_unlock_bh(&port->agg_lock);
+
+ return len;
+
+no_aggr:
+ spin_unlock_bh(&port->agg_lock);
+ skb->protocol = htons(ETH_P_MAP);
+ dev_queue_xmit(skb);
+
+ return len;
+}
+
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
+ u32 count, u32 time)
+{
+ spin_lock_bh(&port->agg_lock);
+ port->egress_agg_params.bytes = size;
+ WRITE_ONCE(port->egress_agg_params.count, count);
+ port->egress_agg_params.time_nsec = time * NSEC_PER_USEC;
+ spin_unlock_bh(&port->agg_lock);
+}
+
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
+{
+ hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ spin_lock_init(&port->agg_lock);
+ rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
+ INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
+}
+
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
+{
+ hrtimer_cancel(&port->hrtimer);
+ cancel_work_sync(&port->agg_wq);
+
+ spin_lock_bh(&port->agg_lock);
+ if (port->agg_state == -EINPROGRESS) {
+ if (port->skbagg_head) {
+ dev_kfree_skb_any(port->skbagg_head);
+ reset_aggr_params(port);
+ }
+
+ port->agg_state = 0;
+ }
+ spin_unlock_bh(&port->agg_lock);
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 3f5e6572d20e..046b5f7d8e7c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -29,7 +29,7 @@ void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_end(&pcpu_ptr->syncp);
}
-void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
@@ -38,10 +38,15 @@ void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.tx_pkts++;
- pcpu_ptr->stats.tx_bytes += skb->len;
+ pcpu_ptr->stats.tx_bytes += len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
+void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+ rmnet_vnd_tx_fixup_len(skb->len, dev);
+}
+
/* Network Device Operations */
static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
@@ -210,7 +215,52 @@ static void rmnet_get_ethtool_stats(struct net_device *dev,
memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
}
+static int rmnet_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_port *port;
+
+ port = rmnet_get_port_rtnl(priv->real_dev);
+
+ memset(kernel_coal, 0, sizeof(*kernel_coal));
+ kernel_coal->tx_aggr_max_bytes = port->egress_agg_params.bytes;
+ kernel_coal->tx_aggr_max_frames = port->egress_agg_params.count;
+ kernel_coal->tx_aggr_time_usecs = div_u64(port->egress_agg_params.time_nsec,
+ NSEC_PER_USEC);
+
+ return 0;
+}
+
+static int rmnet_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_port *port;
+
+ port = rmnet_get_port_rtnl(priv->real_dev);
+
+ if (kernel_coal->tx_aggr_max_frames < 1 || kernel_coal->tx_aggr_max_frames > 64)
+ return -EINVAL;
+
+ if (kernel_coal->tx_aggr_max_bytes > 32768)
+ return -EINVAL;
+
+ rmnet_map_update_ul_agg_config(port, kernel_coal->tx_aggr_max_bytes,
+ kernel_coal->tx_aggr_max_frames,
+ kernel_coal->tx_aggr_time_usecs);
+
+ return 0;
+}
+
static const struct ethtool_ops rmnet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_AGGR,
+ .get_coalesce = rmnet_get_coalesce,
+ .set_coalesce = rmnet_set_coalesce,
.get_ethtool_stats = rmnet_get_ethtool_stats,
.get_strings = rmnet_get_strings,
.get_sset_count = rmnet_get_sset_count,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index dc3a4443ef0a..c2b2baf86894 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -16,6 +16,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
+void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_setup(struct net_device *dev);
int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index dadd61bccfe7..45147a1016be 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -576,6 +576,7 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING,
+ RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -3928,7 +3929,7 @@ static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_TX_TIMEOUT);
}
static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
@@ -4522,6 +4523,7 @@ static void rtl_task(struct work_struct *work)
{
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work);
+ int ret;
rtnl_lock();
@@ -4529,7 +4531,27 @@ static void rtl_task(struct work_struct *work)
!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
goto out_unlock;
+ if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
+ /* if chip isn't accessible, reset bus to revive it */
+ if (RTL_R32(tp, TxConfig) == ~0) {
+ ret = pci_reset_bus(tp->pci_dev);
+ if (ret < 0) {
+ netdev_err(tp->dev, "Can't reset secondary PCI bus, detach NIC\n");
+ netif_device_detach(tp->dev);
+ goto out_unlock;
+ }
+ }
+
+ /* ASPM compatibility issues are a typical reason for tx timeouts */
+ ret = pci_disable_link_state(tp->pci_dev, PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_L0S);
+ if (!ret)
+ netdev_warn_once(tp->dev, "ASPM disabled on Tx timeout\n");
+ goto reset;
+ }
+
if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
+reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
}
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 2370c7797a0a..853394e5bb8b 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -16,7 +16,6 @@
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/rtnetlink.h>
@@ -124,13 +123,6 @@ static void rswitch_fwd_init(struct rswitch_private *priv)
iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
}
-/* gPTP timer (gPTP) */
-static void rswitch_get_timestamp(struct rswitch_private *priv,
- struct timespec64 *ts)
-{
- priv->ptp_priv->info.gettime64(&priv->ptp_priv->info, ts);
-}
-
/* Gateway CPU agent block (GWCA) */
static int rswitch_gwca_change_mode(struct rswitch_private *priv,
enum rswitch_gwca_mode mode)
@@ -241,7 +233,7 @@ static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
{
- struct rswitch_ext_ts_desc *desc = &gq->ts_ring[gq->dirty];
+ struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
return true;
@@ -281,36 +273,43 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
{
int i;
- if (gq->gptp) {
+ if (!gq->dir_tx) {
dma_free_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_ts_desc) *
- (gq->ring_size + 1), gq->ts_ring, gq->ring_dma);
- gq->ts_ring = NULL;
- } else {
- dma_free_coherent(ndev->dev.parent,
- sizeof(struct rswitch_ext_desc) *
- (gq->ring_size + 1), gq->ring, gq->ring_dma);
- gq->ring = NULL;
- }
+ (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
+ gq->rx_ring = NULL;
- if (!gq->dir_tx) {
for (i = 0; i < gq->ring_size; i++)
dev_kfree_skb(gq->skbs[i]);
+ } else {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(struct rswitch_ext_desc) *
+ (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
+ gq->tx_ring = NULL;
}
kfree(gq->skbs);
gq->skbs = NULL;
}
+static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
+ gq->ts_ring, gq->ring_dma);
+ gq->ts_ring = NULL;
+}
+
static int rswitch_gwca_queue_alloc(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq,
- bool dir_tx, bool gptp, int ring_size)
+ bool dir_tx, int ring_size)
{
int i, bit;
gq->dir_tx = dir_tx;
- gq->gptp = gptp;
gq->ring_size = ring_size;
gq->ndev = ndev;
@@ -318,18 +317,19 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
if (!gq->skbs)
return -ENOMEM;
- if (!dir_tx)
+ if (!dir_tx) {
rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
- if (gptp)
- gq->ts_ring = dma_alloc_coherent(ndev->dev.parent,
+ gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_ts_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
- else
- gq->ring = dma_alloc_coherent(ndev->dev.parent,
- sizeof(struct rswitch_ext_desc) *
- (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
- if (!gq->ts_ring && !gq->ring)
+ } else {
+ gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rswitch_ext_desc) *
+ (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+ }
+
+ if (!gq->rx_ring && !gq->tx_ring)
goto out;
i = gq->index / 32;
@@ -347,6 +347,17 @@ out:
return -ENOMEM;
}
+static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+
+ gq->ring_size = TS_RING_SIZE;
+ gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct rswitch_ts_desc) *
+ (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+ return !gq->ts_ring ? -ENOMEM : 0;
+}
+
static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
{
desc->dptrl = cpu_to_le32(lower_32_bits(addr));
@@ -362,14 +373,14 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq)
{
- int tx_ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
+ int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
struct rswitch_ext_desc *desc;
struct rswitch_desc *linkfix;
dma_addr_t dma_addr;
int i;
- memset(gq->ring, 0, tx_ring_size);
- for (i = 0, desc = gq->ring; i < gq->ring_size; i++, desc++) {
+ memset(gq->tx_ring, 0, ring_size);
+ for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
gq->skbs[i]->data, PKT_BUF_SZ,
@@ -387,7 +398,7 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
desc->desc.die_dt = DT_LINKFIX;
- linkfix = &priv->linkfix_table[gq->index];
+ linkfix = &priv->gwca.linkfix_table[gq->index];
linkfix->die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(linkfix, gq->ring_dma);
@@ -398,7 +409,7 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
err:
if (!gq->dir_tx) {
- for (i--, desc = gq->ring; i >= 0; i--, desc++) {
+ for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
DMA_FROM_DEVICE);
@@ -408,9 +419,23 @@ err:
return -ENOMEM;
}
-static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
- struct rswitch_gwca_queue *gq,
- int start_index, int num)
+static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
+ int start_index, int num)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+ struct rswitch_ts_desc *desc;
+ int i, index;
+
+ for (i = 0; i < num; i++) {
+ index = (i + start_index) % gq->ring_size;
+ desc = &gq->ts_ring[index];
+ desc->desc.die_dt = DT_FEMPTY_ND | DIE;
+ }
+}
+
+static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
+ struct rswitch_gwca_queue *gq,
+ int start_index, int num)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_ext_ts_desc *desc;
@@ -419,7 +444,7 @@ static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size;
- desc = &gq->ts_ring[index];
+ desc = &gq->rx_ring[index];
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
gq->skbs[index]->data, PKT_BUF_SZ,
@@ -443,7 +468,7 @@ err:
if (!gq->dir_tx) {
for (i--; i >= 0; i--) {
index = (i + start_index) % gq->ring_size;
- desc = &gq->ts_ring[index];
+ desc = &gq->rx_ring[index];
dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
DMA_FROM_DEVICE);
@@ -453,25 +478,25 @@ err:
return -ENOMEM;
}
-static int rswitch_gwca_queue_ts_format(struct net_device *ndev,
- struct rswitch_private *priv,
- struct rswitch_gwca_queue *gq)
+static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
+ struct rswitch_private *priv,
+ struct rswitch_gwca_queue *gq)
{
- int tx_ts_ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
+ int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
struct rswitch_ext_ts_desc *desc;
struct rswitch_desc *linkfix;
int err;
- memset(gq->ts_ring, 0, tx_ts_ring_size);
- err = rswitch_gwca_queue_ts_fill(ndev, gq, 0, gq->ring_size);
+ memset(gq->rx_ring, 0, ring_size);
+ err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
if (err < 0)
return err;
- desc = &gq->ts_ring[gq->ring_size]; /* Last */
+ desc = &gq->rx_ring[gq->ring_size]; /* Last */
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
desc->desc.die_dt = DT_LINKFIX;
- linkfix = &priv->linkfix_table[gq->index];
+ linkfix = &priv->gwca.linkfix_table[gq->index];
linkfix->die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(linkfix, gq->ring_dma);
@@ -481,28 +506,31 @@ static int rswitch_gwca_queue_ts_format(struct net_device *ndev,
return 0;
}
-static int rswitch_gwca_desc_alloc(struct rswitch_private *priv)
+static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
{
int i, num_queues = priv->gwca.num_queues;
+ struct rswitch_gwca *gwca = &priv->gwca;
struct device *dev = &priv->pdev->dev;
- priv->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
- priv->linkfix_table = dma_alloc_coherent(dev, priv->linkfix_table_size,
- &priv->linkfix_table_dma, GFP_KERNEL);
- if (!priv->linkfix_table)
+ gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
+ gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
+ &gwca->linkfix_table_dma, GFP_KERNEL);
+ if (!gwca->linkfix_table)
return -ENOMEM;
for (i = 0; i < num_queues; i++)
- priv->linkfix_table[i].die_dt = DT_EOS;
+ gwca->linkfix_table[i].die_dt = DT_EOS;
return 0;
}
-static void rswitch_gwca_desc_free(struct rswitch_private *priv)
+static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
{
- if (priv->linkfix_table)
- dma_free_coherent(&priv->pdev->dev, priv->linkfix_table_size,
- priv->linkfix_table, priv->linkfix_table_dma);
- priv->linkfix_table = NULL;
+ struct rswitch_gwca *gwca = &priv->gwca;
+
+ if (gwca->linkfix_table)
+ dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
+ gwca->linkfix_table, gwca->linkfix_table_dma);
+ gwca->linkfix_table = NULL;
}
static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
@@ -537,8 +565,7 @@ static int rswitch_txdmac_alloc(struct net_device *ndev)
if (!rdev->tx_queue)
return -EBUSY;
- err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
- TX_RING_SIZE);
+ err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
if (err < 0) {
rswitch_gwca_put(priv, rdev->tx_queue);
return err;
@@ -572,8 +599,7 @@ static int rswitch_rxdmac_alloc(struct net_device *ndev)
if (!rdev->rx_queue)
return -EBUSY;
- err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
- RX_RING_SIZE);
+ err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
if (err < 0) {
rswitch_gwca_put(priv, rdev->rx_queue);
return err;
@@ -595,7 +621,7 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
- return rswitch_gwca_queue_ts_format(ndev, priv, rdev->rx_queue);
+ return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
}
static int rswitch_gwca_hw_init(struct rswitch_private *priv)
@@ -618,8 +644,11 @@ static int rswitch_gwca_hw_init(struct rswitch_private *priv)
iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
iowrite32(0, priv->addr + GWTTFC);
- iowrite32(lower_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC1);
- iowrite32(upper_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC0);
+ iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
+ iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
+ iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
+ iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
+ iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
rswitch_gwca_set_rate_limit(priv, priv->gwca.speed);
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
@@ -676,7 +705,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
boguscnt = min_t(int, gq->ring_size, *quota);
limit = boguscnt;
- desc = &gq->ts_ring[gq->cur];
+ desc = &gq->rx_ring[gq->cur];
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
if (--boguscnt < 0)
break;
@@ -704,14 +733,14 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
rdev->ndev->stats.rx_bytes += pkt_len;
gq->cur = rswitch_next_queue_index(gq, true, 1);
- desc = &gq->ts_ring[gq->cur];
+ desc = &gq->rx_ring[gq->cur];
}
num = rswitch_get_num_cur_queues(gq);
ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
if (ret < 0)
goto err;
- ret = rswitch_gwca_queue_ts_fill(ndev, gq, gq->dirty, num);
+ ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
if (ret < 0)
goto err;
gq->dirty = rswitch_next_queue_index(gq, false, num);
@@ -738,7 +767,7 @@ static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
for (; rswitch_get_num_cur_queues(gq) > 0;
gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
- desc = &gq->ring[gq->dirty];
+ desc = &gq->tx_ring[gq->dirty];
if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
break;
@@ -746,15 +775,6 @@ static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
skb = gq->skbs[gq->dirty];
if (skb) {
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- struct skb_shared_hwtstamps shhwtstamps;
- struct timespec64 ts;
-
- rswitch_get_timestamp(rdev->priv, &ts);
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
- skb_tstamp_tx(skb, &shhwtstamps);
- }
dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr,
size, DMA_TO_DEVICE);
@@ -880,6 +900,73 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
return 0;
}
+static void rswitch_ts(struct rswitch_private *priv)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+ struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct rswitch_ts_desc *desc;
+ struct timespec64 ts;
+ u32 tag, port;
+ int num;
+
+ desc = &gq->ts_ring[gq->cur];
+ while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
+ dma_rmb();
+
+ port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
+ tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
+
+ list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
+ if (!(ts_info->port == port && ts_info->tag == tag))
+ continue;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(ts_info->skb, &shhwtstamps);
+ dev_consume_skb_irq(ts_info->skb);
+ list_del(&ts_info->list);
+ kfree(ts_info);
+ break;
+ }
+
+ gq->cur = rswitch_next_queue_index(gq, true, 1);
+ desc = &gq->ts_ring[gq->cur];
+ }
+
+ num = rswitch_get_num_cur_queues(gq);
+ rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
+ gq->dirty = rswitch_next_queue_index(gq, false, num);
+}
+
+static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
+{
+ struct rswitch_private *priv = dev_id;
+
+ if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
+ iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
+ rswitch_ts(priv);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
+{
+ int irq;
+
+ irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
+ if (irq < 0)
+ return irq;
+
+ return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
+ 0, GWCA_TS_IRQ_NAME, priv);
+}
+
/* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
static int rswitch_etha_change_mode(struct rswitch_etha *etha,
enum rswitch_etha_mode mode)
@@ -1024,34 +1111,18 @@ static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
return ret;
}
-static int rswitch_etha_mii_read(struct mii_bus *bus, int addr, int regnum)
+static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
+ int regad)
{
struct rswitch_etha *etha = bus->priv;
- int mode, devad, regad;
-
- mode = regnum & MII_ADDR_C45;
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- regad = regnum & MII_REGADDR_C45_MASK;
-
- /* Not support Clause 22 access method */
- if (!mode)
- return -EOPNOTSUPP;
return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
}
-static int rswitch_etha_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
+ int regad, u16 val)
{
struct rswitch_etha *etha = bus->priv;
- int mode, devad, regad;
-
- mode = regnum & MII_ADDR_C45;
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- regad = regnum & MII_REGADDR_C45_MASK;
-
- /* Not support Clause 22 access method */
- if (!mode)
- return -EOPNOTSUPP;
return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
}
@@ -1087,33 +1158,25 @@ out:
return port;
}
-/* Call of_node_put(mdio) after done */
-static struct device_node *rswitch_get_mdio_node(struct rswitch_device *rdev)
-{
- struct device_node *port, *mdio;
-
- port = rswitch_get_port_node(rdev);
- if (!port)
- return NULL;
-
- mdio = of_get_child_by_name(port, "mdio");
- of_node_put(port);
-
- return mdio;
-}
-
static int rswitch_etha_get_params(struct rswitch_device *rdev)
{
- struct device_node *port;
+ u32 max_speed;
int err;
- port = rswitch_get_port_node(rdev);
- if (!port)
+ if (!rdev->np_port)
return 0; /* ignored */
- err = of_get_phy_mode(port, &rdev->etha->phy_interface);
- of_node_put(port);
+ err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
+ if (err)
+ return err;
+ err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
+ if (!err) {
+ rdev->etha->speed = max_speed;
+ return 0;
+ }
+
+ /* if no "max-speed" property, let's use default speed */
switch (rdev->etha->phy_interface) {
case PHY_INTERFACE_MODE_MII:
rdev->etha->speed = SPEED_100;
@@ -1125,11 +1188,10 @@ static int rswitch_etha_get_params(struct rswitch_device *rdev)
rdev->etha->speed = SPEED_2500;
break;
default:
- err = -EINVAL;
- break;
+ return -EINVAL;
}
- return err;
+ return 0;
}
static int rswitch_mii_register(struct rswitch_device *rdev)
@@ -1145,11 +1207,11 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->name = "rswitch_mii";
sprintf(mii_bus->id, "etha%d", rdev->etha->index);
mii_bus->priv = rdev->etha;
- mii_bus->read = rswitch_etha_mii_read;
- mii_bus->write = rswitch_etha_mii_write;
+ mii_bus->read_c45 = rswitch_etha_mii_read_c45;
+ mii_bus->write_c45 = rswitch_etha_mii_write_c45;
mii_bus->parent = &rdev->priv->pdev->dev;
- mdio_np = rswitch_get_mdio_node(rdev);
+ mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
err = of_mdiobus_register(mii_bus, mdio_np);
if (err < 0) {
mdiobus_free(mii_bus);
@@ -1173,114 +1235,107 @@ static void rswitch_mii_unregister(struct rswitch_device *rdev)
}
}
-static void rswitch_mac_config(struct phylink_config *config,
- unsigned int mode,
- const struct phylink_link_state *state)
+static void rswitch_adjust_link(struct net_device *ndev)
{
-}
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
-static void rswitch_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
+ /* Current hardware has a restriction not to change speed at runtime */
+ if (phydev->link != rdev->etha->link) {
+ phy_print_status(phydev);
+ if (phydev->link)
+ phy_power_on(rdev->serdes);
+ else
+ phy_power_off(rdev->serdes);
+
+ rdev->etha->link = phydev->link;
+ }
}
-static void rswitch_mac_link_up(struct phylink_config *config,
- struct phy_device *phydev, unsigned int mode,
- phy_interface_t interface, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
+ struct phy_device *phydev)
{
- /* Current hardware cannot change speed at runtime */
-}
+ /* Current hardware has a restriction not to change speed at runtime */
+ switch (rdev->etha->speed) {
+ case SPEED_2500:
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ break;
+ case SPEED_1000:
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ break;
+ case SPEED_100:
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ break;
+ default:
+ break;
+ }
-static const struct phylink_mac_ops rswitch_phylink_ops = {
- .mac_config = rswitch_mac_config,
- .mac_link_down = rswitch_mac_link_down,
- .mac_link_up = rswitch_mac_link_up,
-};
+ phy_set_max_speed(phydev, rdev->etha->speed);
+}
-static int rswitch_phylink_init(struct rswitch_device *rdev)
+static int rswitch_phy_device_init(struct rswitch_device *rdev)
{
- struct device_node *port;
- struct phylink *phylink;
- int err;
+ struct phy_device *phydev;
+ struct device_node *phy;
+ int err = -ENOENT;
- port = rswitch_get_port_node(rdev);
- if (!port)
+ if (!rdev->np_port)
return -ENODEV;
- rdev->phylink_config.dev = &rdev->ndev->dev;
- rdev->phylink_config.type = PHYLINK_NETDEV;
- __set_bit(PHY_INTERFACE_MODE_SGMII, rdev->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_USXGMII, rdev->phylink_config.supported_interfaces);
- rdev->phylink_config.mac_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
+ phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
+ if (!phy)
+ return -ENODEV;
- phylink = phylink_create(&rdev->phylink_config, &port->fwnode,
- rdev->etha->phy_interface, &rswitch_phylink_ops);
- if (IS_ERR(phylink)) {
- err = PTR_ERR(phylink);
+ /* Set phydev->host_interfaces before calling of_phy_connect() to
+ * configure the PHY with the information of host_interfaces.
+ */
+ phydev = of_phy_find_device(phy);
+ if (!phydev)
goto out;
- }
+ __set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
- rdev->phylink = phylink;
- err = phylink_of_phy_connect(rdev->phylink, port, rdev->etha->phy_interface);
+ phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
+ rdev->etha->phy_interface);
+ if (!phydev)
+ goto out;
+
+ phy_set_max_speed(phydev, SPEED_2500);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ rswitch_phy_remove_link_mode(rdev, phydev);
+
+ phy_attached_info(phydev);
+
+ err = 0;
out:
- of_node_put(port);
+ of_node_put(phy);
return err;
}
-static void rswitch_phylink_deinit(struct rswitch_device *rdev)
+static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
{
- rtnl_lock();
- phylink_disconnect_phy(rdev->phylink);
- rtnl_unlock();
- phylink_destroy(rdev->phylink);
+ if (rdev->ndev->phydev) {
+ phy_disconnect(rdev->ndev->phydev);
+ rdev->ndev->phydev = NULL;
+ }
}
static int rswitch_serdes_set_params(struct rswitch_device *rdev)
{
- struct device_node *port = rswitch_get_port_node(rdev);
- struct phy *serdes;
int err;
- serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
- of_node_put(port);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET,
+ err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
rdev->etha->phy_interface);
if (err < 0)
return err;
- return phy_set_speed(serdes, rdev->etha->speed);
-}
-
-static int rswitch_serdes_init(struct rswitch_device *rdev)
-{
- struct device_node *port = rswitch_get_port_node(rdev);
- struct phy *serdes;
-
- serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
- of_node_put(port);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- return phy_init(serdes);
-}
-
-static int rswitch_serdes_deinit(struct rswitch_device *rdev)
-{
- struct device_node *port = rswitch_get_port_node(rdev);
- struct phy *serdes;
-
- serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
- of_node_put(port);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- return phy_exit(serdes);
+ return phy_set_speed(rdev->serdes, rdev->etha->speed);
}
static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
@@ -1298,9 +1353,15 @@ static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
if (err < 0)
return err;
- err = rswitch_phylink_init(rdev);
+ err = rswitch_phy_device_init(rdev);
if (err < 0)
- goto err_phylink_init;
+ goto err_phy_device_init;
+
+ rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
+ if (IS_ERR(rdev->serdes)) {
+ err = PTR_ERR(rdev->serdes);
+ goto err_serdes_phy_get;
+ }
err = rswitch_serdes_set_params(rdev);
if (err < 0)
@@ -1309,9 +1370,10 @@ static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
return 0;
err_serdes_set_params:
- rswitch_phylink_deinit(rdev);
+err_serdes_phy_get:
+ rswitch_phy_device_deinit(rdev);
-err_phylink_init:
+err_phy_device_init:
rswitch_mii_unregister(rdev);
return err;
@@ -1319,7 +1381,7 @@ err_phylink_init:
static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
{
- rswitch_phylink_deinit(rdev);
+ rswitch_phy_device_deinit(rdev);
rswitch_mii_unregister(rdev);
}
@@ -1334,7 +1396,7 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
}
rswitch_for_each_enabled_port(priv, i) {
- err = rswitch_serdes_init(priv->rdev[i]);
+ err = phy_init(priv->rdev[i]->serdes);
if (err)
goto err_serdes;
}
@@ -1343,7 +1405,7 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
err_serdes:
rswitch_for_each_enabled_port_continue_reverse(priv, i)
- rswitch_serdes_deinit(priv->rdev[i]);
+ phy_exit(priv->rdev[i]->serdes);
i = RSWITCH_NUM_PORTS;
err_init_one:
@@ -1358,7 +1420,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
int i;
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- rswitch_serdes_deinit(priv->rdev[i]);
+ phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
}
@@ -1367,7 +1429,7 @@ static int rswitch_open(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- phylink_start(rdev->phylink);
+ phy_start(ndev->phydev);
napi_enable(&rdev->napi);
netif_start_queue(ndev);
@@ -1375,19 +1437,32 @@ static int rswitch_open(struct net_device *ndev)
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+
return 0;
};
static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_gwca_ts_info *ts_info, *ts_info2;
netif_tx_stop_all_queues(ndev);
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+
+ list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
+ if (ts_info->port != rdev->port)
+ continue;
+ dev_kfree_skb_irq(ts_info->skb);
+ list_del(&ts_info->list);
+ kfree(ts_info);
+ }
+
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
- phylink_stop(rdev->phylink);
+ phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
return 0;
@@ -1416,17 +1491,31 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
}
gq->skbs[gq->cur] = skb;
- desc = &gq->ring[gq->cur];
+ desc = &gq->tx_ring[gq->cur];
rswitch_desc_set_dptr(&desc->desc, dma_addr);
desc->desc.info_ds = cpu_to_le16(skb->len);
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct rswitch_gwca_ts_info *ts_info;
+
+ ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
+ if (!ts_info) {
+ dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
rdev->ts_tag++;
desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
+
+ ts_info->skb = skb_get(skb);
+ ts_info->port = rdev->port;
+ ts_info->tag = rdev->ts_tag;
+ list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
+
+ skb_tx_timestamp(skb);
}
- skb_tx_timestamp(skb);
dma_wmb();
@@ -1515,8 +1604,6 @@ static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
- struct rswitch_device *rdev = netdev_priv(ndev);
-
if (!netif_running(ndev))
return -EINVAL;
@@ -1526,7 +1613,7 @@ static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd
case SIOCSHWTSTAMP:
return rswitch_hwstamp_set(ndev, req);
default:
- return phylink_mii_ioctl(rdev->phylink, req, cmd);
+ return phy_mii_ioctl(ndev->phydev, req, cmd);
}
}
@@ -1581,7 +1668,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
{
struct platform_device *pdev = priv->pdev;
struct rswitch_device *rdev;
- struct device_node *port;
struct net_device *ndev;
int err;
@@ -1610,10 +1696,10 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
netif_napi_add(ndev, &rdev->napi, rswitch_poll);
- port = rswitch_get_port_node(rdev);
- rdev->disabled = !port;
- err = of_get_ethdev_address(port, ndev);
- of_node_put(port);
+ rdev->np_port = rswitch_get_port_node(rdev);
+ rdev->disabled = !rdev->np_port;
+ err = of_get_ethdev_address(rdev->np_port, ndev);
+ of_node_put(rdev->np_port);
if (err) {
if (is_valid_ether_addr(rdev->etha->mac_addr))
eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@@ -1679,10 +1765,17 @@ static int rswitch_init(struct rswitch_private *priv)
if (err < 0)
return err;
- err = rswitch_gwca_desc_alloc(priv);
+ err = rswitch_gwca_linkfix_alloc(priv);
if (err < 0)
return -ENOMEM;
+ err = rswitch_gwca_ts_queue_alloc(priv);
+ if (err < 0)
+ goto err_ts_queue_alloc;
+
+ rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
+ INIT_LIST_HEAD(&priv->gwca.ts_info_list);
+
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
err = rswitch_device_alloc(priv, i);
if (err < 0) {
@@ -1703,6 +1796,10 @@ static int rswitch_init(struct rswitch_private *priv)
if (err < 0)
goto err_gwca_request_irq;
+ err = rswitch_gwca_ts_request_irqs(priv);
+ if (err < 0)
+ goto err_gwca_ts_request_irq;
+
err = rswitch_gwca_hw_init(priv);
if (err < 0)
goto err_gwca_hw_init;
@@ -1733,6 +1830,7 @@ err_ether_port_init_all:
rswitch_gwca_hw_deinit(priv);
err_gwca_hw_init:
+err_gwca_ts_request_irq:
err_gwca_request_irq:
rcar_gen4_ptp_unregister(priv->ptp_priv);
@@ -1741,7 +1839,10 @@ err_ptp_register:
rswitch_device_free(priv, i);
err_device_alloc:
- rswitch_gwca_desc_free(priv);
+ rswitch_gwca_ts_queue_free(priv);
+
+err_ts_queue_alloc:
+ rswitch_gwca_linkfix_free(priv);
return err;
}
@@ -1814,13 +1915,14 @@ static void rswitch_deinit(struct rswitch_private *priv)
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
struct rswitch_device *rdev = priv->rdev[i];
- rswitch_serdes_deinit(rdev);
+ phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(rdev);
unregister_netdev(rdev->ndev);
rswitch_device_free(priv, i);
}
- rswitch_gwca_desc_free(priv);
+ rswitch_gwca_ts_queue_free(priv);
+ rswitch_gwca_linkfix_free(priv);
rswitch_clock_disable(priv);
}
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 49efb0f31c77..27d3d38c055f 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -27,6 +27,7 @@
#define TX_RING_SIZE 1024
#define RX_RING_SIZE 1024
+#define TS_RING_SIZE (TX_RING_SIZE * RSWITCH_NUM_PORTS)
#define PKT_BUF_SZ 1584
#define RSWITCH_ALIGN 128
@@ -49,6 +50,10 @@
#define AGENT_INDEX_GWCA 3
#define GWRO RSWITCH_GWCA0_OFFSET
+#define GWCA_TS_IRQ_RESOURCE_NAME "gwca0_rxts0"
+#define GWCA_TS_IRQ_NAME "rswitch: gwca0_rxts0"
+#define GWCA_TS_IRQ_BIT BIT(0)
+
#define FWRO 0
#define TPRO RSWITCH_TOP_OFFSET
#define CARO RSWITCH_COMA_OFFSET
@@ -831,7 +836,7 @@ enum DIE_DT {
DT_FSINGLE = 0x80,
DT_FSTART = 0x90,
DT_FMID = 0xa0,
- DT_FEND = 0xb8,
+ DT_FEND = 0xb0,
/* Chain control */
DT_LEMPTY = 0xc0,
@@ -843,7 +848,7 @@ enum DIE_DT {
DT_FEMPTY = 0x40,
DT_FEMPTY_IS = 0x10,
DT_FEMPTY_IC = 0x20,
- DT_FEMPTY_ND = 0x38,
+ DT_FEMPTY_ND = 0x30,
DT_FEMPTY_START = 0x50,
DT_FEMPTY_MID = 0x60,
DT_FEMPTY_END = 0x70,
@@ -865,6 +870,12 @@ enum DIE_DT {
/* For reception */
#define INFO1_SPN(port) ((u64)(port) << 36ULL)
+/* For timestamp descriptor in dptrl (Byte 4 to 7) */
+#define TS_DESC_TSUN(dptrl) ((dptrl) & GENMASK(7, 0))
+#define TS_DESC_SPN(dptrl) (((dptrl) & GENMASK(10, 8)) >> 8)
+#define TS_DESC_DPN(dptrl) (((dptrl) & GENMASK(17, 16)) >> 16)
+#define TS_DESC_TN(dptrl) ((dptrl) & BIT(24))
+
struct rswitch_desc {
__le16 info_ds; /* Descriptor size */
u8 die_dt; /* Descriptor interrupt enable and type */
@@ -911,27 +922,43 @@ struct rswitch_etha {
* name, this driver calls "queue".
*/
struct rswitch_gwca_queue {
- int index;
- bool dir_tx;
- bool gptp;
union {
- struct rswitch_ext_desc *ring;
- struct rswitch_ext_ts_desc *ts_ring;
+ struct rswitch_ext_desc *tx_ring;
+ struct rswitch_ext_ts_desc *rx_ring;
+ struct rswitch_ts_desc *ts_ring;
};
+
+ /* Common */
dma_addr_t ring_dma;
int ring_size;
int cur;
int dirty;
- struct sk_buff **skbs;
+ /* For [rt]_ring */
+ int index;
+ bool dir_tx;
+ struct sk_buff **skbs;
struct net_device *ndev; /* queue to ndev for irq */
};
+struct rswitch_gwca_ts_info {
+ struct sk_buff *skb;
+ struct list_head list;
+
+ int port;
+ u8 tag;
+};
+
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
int index;
+ struct rswitch_desc *linkfix_table;
+ dma_addr_t linkfix_table_dma;
+ u32 linkfix_table_size;
struct rswitch_gwca_queue *queues;
int num_queues;
+ struct rswitch_gwca_queue ts_queue;
+ struct list_head ts_info_list;
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
@@ -943,8 +970,6 @@ struct rswitch_device {
struct rswitch_private *priv;
struct net_device *ndev;
struct napi_struct napi;
- struct phylink *phylink;
- struct phylink_config phylink_config;
void __iomem *addr;
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
@@ -953,6 +978,8 @@ struct rswitch_device {
int port;
struct rswitch_etha *etha;
+ struct device_node *np_port;
+ struct phy *serdes;
};
struct rswitch_mfwd_mac_table_entry {
@@ -969,9 +996,6 @@ struct rswitch_private {
struct platform_device *pdev;
void __iomem *addr;
struct rcar_gen4_ptp_private *ptp_priv;
- struct rswitch_desc *linkfix_table;
- dma_addr_t linkfix_table_dma;
- u32 linkfix_table_size;
struct rswitch_device *rdev[RSWITCH_NUM_PORTS];
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 71a499113308..ed17163d7811 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3044,23 +3044,46 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
return 0;
}
-static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+static int sh_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg)
{
int res;
pm_runtime_get_sync(bus->parent);
- res = mdiobb_read(bus, phy, reg);
+ res = mdiobb_read_c22(bus, phy, reg);
pm_runtime_put(bus->parent);
return res;
}
-static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+static int sh_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val)
{
int res;
pm_runtime_get_sync(bus->parent);
- res = mdiobb_write(bus, phy, reg, val);
+ res = mdiobb_write_c22(bus, phy, reg, val);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, int reg)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_read_c45(bus, phy, devad, reg);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad,
+ int reg, u16 val)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_write_c45(bus, phy, devad, reg, val);
pm_runtime_put(bus->parent);
return res;
@@ -3091,8 +3114,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
return -ENOMEM;
/* Wrap accessors with Runtime PM-aware ops */
- mdp->mii_bus->read = sh_mdiobb_read;
- mdp->mii_bus->write = sh_mdiobb_write;
+ mdp->mii_bus->read = sh_mdiobb_read_c22;
+ mdp->mii_bus->write = sh_mdiobb_write_c22;
+ mdp->mii_bus->read_c45 = sh_mdiobb_read_c45;
+ mdp->mii_bus->write_c45 = sh_mdiobb_write_c45;
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
index fceb6d637235..0227223c06fa 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -50,12 +50,12 @@ static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd,
}
static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
- int phyreg, u16 phydata)
+ int devad, int phyreg, u16 phydata)
{
u32 reg;
/* set mdio address register */
- reg = ((phyreg >> 16) & 0x1f) << 21;
+ reg = (devad & 0x1f) << 21;
reg |= (phyaddr << 16) | (phyreg & 0xffff);
writel(reg, sp->ioaddr + sp->hw->mii.addr);
@@ -76,8 +76,8 @@ static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
sxgbe_mdio_ctrl_data(sp, cmd, phydata);
}
-static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
- int phyreg, u16 phydata)
+static int sxgbe_mdio_access_c22(struct sxgbe_priv_data *sp, u32 cmd,
+ int phyaddr, int phyreg, u16 phydata)
{
const struct mii_regs *mii = &sp->hw->mii;
int rc;
@@ -86,33 +86,46 @@ static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
if (rc < 0)
return rc;
- if (phyreg & MII_ADDR_C45) {
- sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata);
- } else {
- /* Ports 0-3 only support C22. */
- if (phyaddr >= 4)
- return -ENODEV;
+ /* Ports 0-3 only support C22. */
+ if (phyaddr >= 4)
+ return -ENODEV;
- sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
- }
+ sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
+
+ return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+}
+
+static int sxgbe_mdio_access_c45(struct sxgbe_priv_data *sp, u32 cmd,
+ int phyaddr, int devad, int phyreg,
+ u16 phydata)
+{
+ const struct mii_regs *mii = &sp->hw->mii;
+ int rc;
+
+ rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+ if (rc < 0)
+ return rc;
+
+ sxgbe_mdio_c45(sp, cmd, phyaddr, devad, phyreg, phydata);
return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
}
/**
- * sxgbe_mdio_read
+ * sxgbe_mdio_read_c22
* @bus: points to the mii_bus structure
* @phyaddr: address of phy port
* @phyreg: address of register with in phy register
- * Description: this function used for C45 and C22 MDIO Read
+ * Description: this function used for C22 MDIO Read
*/
-static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+static int sxgbe_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
{
struct net_device *ndev = bus->priv;
struct sxgbe_priv_data *priv = netdev_priv(ndev);
int rc;
- rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0);
+ rc = sxgbe_mdio_access_c22(priv, SXGBE_SMA_READ_CMD, phyaddr,
+ phyreg, 0);
if (rc < 0)
return rc;
@@ -120,21 +133,63 @@ static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
}
/**
- * sxgbe_mdio_write
+ * sxgbe_mdio_read_c45
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @devad: device (MMD) address
+ * @phyreg: address of register with in phy register
+ * Description: this function used for C45 MDIO Read
+ */
+static int sxgbe_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad,
+ int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+ int rc;
+
+ rc = sxgbe_mdio_access_c45(priv, SXGBE_SMA_READ_CMD, phyaddr,
+ devad, phyreg, 0);
+ if (rc < 0)
+ return rc;
+
+ return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff;
+}
+
+/**
+ * sxgbe_mdio_write_c22
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @phyreg: address of phy registers
+ * @phydata: data to be written into phy register
+ * Description: this function is used for C22 MDIO write
+ */
+static int sxgbe_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ return sxgbe_mdio_access_c22(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
+ phydata);
+}
+
+/**
+ * sxgbe_mdio_write_c45
* @bus: points to the mii_bus structure
* @phyaddr: address of phy port
* @phyreg: address of phy registers
+ * @devad: device (MMD) address
* @phydata: data to be written into phy register
- * Description: this function is used for C45 and C22 MDIO write
+ * Description: this function is used for C45 MDIO write
*/
-static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
- u16 phydata)
+static int sxgbe_mdio_write_c45(struct mii_bus *bus, int phyaddr, int devad,
+ int phyreg, u16 phydata)
{
struct net_device *ndev = bus->priv;
struct sxgbe_priv_data *priv = netdev_priv(ndev);
- return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
- phydata);
+ return sxgbe_mdio_access_c45(priv, SXGBE_SMA_WRITE_CMD, phyaddr,
+ devad, phyreg, phydata);
}
int sxgbe_mdio_register(struct net_device *ndev)
@@ -161,8 +216,10 @@ int sxgbe_mdio_register(struct net_device *ndev)
/* assign mii bus fields */
mdio_bus->name = "sxgbe";
- mdio_bus->read = &sxgbe_mdio_read;
- mdio_bus->write = &sxgbe_mdio_write;
+ mdio_bus->read = sxgbe_mdio_read_c22;
+ mdio_bus->write = sxgbe_mdio_write_c22;
+ mdio_bus->read_c45 = sxgbe_mdio_read_c45;
+ mdio_bus->write_c45 = sxgbe_mdio_write_c45;
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
mdio_bus->name, priv->plat->bus_id);
mdio_bus->priv = ndev;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 0950e6b0508f..4af36ba8906b 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -22,6 +22,7 @@ config SFC
depends on PTP_1588_CLOCK_OPTIONAL
select MDIO
select CRC32
+ select NET_DEVLINK
help
This driver supports 10/40-gigabit Ethernet cards based on
the Solarflare SFC9100-family controllers.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 712a48d00069..55b9c73cd8ef 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -6,7 +6,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
mcdi.o mcdi_port.o mcdi_port_common.o \
mcdi_functions.o mcdi_filters.o mcdi_mon.o \
ef100.o ef100_nic.o ef100_netdev.o \
- ef100_ethtool.o ef100_rx.o ef100_tx.o
+ ef100_ethtool.o ef100_rx.o ef100_tx.o \
+ efx_devlink.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index ddcc325ed570..d916877b5a9a 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -24,6 +24,7 @@
#include "rx_common.h"
#include "ef100_sriov.h"
#include "tc_bindings.h"
+#include "efx_devlink.h"
static void ef100_update_name(struct efx_nic *efx)
{
@@ -332,9 +333,11 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data)
efx_ef100_pci_sriov_disable(efx, true);
#endif
+ efx_fini_devlink_lock(efx);
ef100_unregister_netdev(efx);
#ifdef CONFIG_SFC_SRIOV
+ ef100_pf_unset_devlink_port(efx);
efx_fini_tc(efx);
#endif
@@ -345,6 +348,8 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data)
kfree(efx->phy_data);
efx->phy_data = NULL;
+ efx_fini_devlink_and_unlock(efx);
+
free_netdev(efx->net_dev);
efx->net_dev = NULL;
efx->state = STATE_PROBED;
@@ -354,6 +359,7 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
{
struct efx_nic *efx = &probe_data->efx;
struct efx_probe_data **probe_ptr;
+ struct ef100_nic_data *nic_data;
struct net_device *net_dev;
int rc;
@@ -405,6 +411,20 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
/* Don't fail init if RSS setup doesn't work. */
efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
+ nic_data = efx->nic_data;
+ rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF,
+ efx->type->is_vf);
+ if (rc)
+ return rc;
+ /* Assign MAC address */
+ eth_hw_addr_set(net_dev, net_dev->perm_addr);
+ ether_addr_copy(nic_data->port_id, net_dev->perm_addr);
+
+ /* devlink creation, registration and lock */
+ rc = efx_probe_devlink_and_lock(efx);
+ if (rc)
+ pci_info(efx->pci_dev, "devlink registration failed");
+
rc = ef100_register_netdev(efx);
if (rc)
goto fail;
@@ -413,6 +433,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
rc = ef100_probe_netdev_pf(efx);
if (rc)
goto fail;
+#ifdef CONFIG_SFC_SRIOV
+ ef100_pf_set_devlink_port(efx);
+#endif
}
efx->netdev_notifier.notifier_call = ef100_netdev_event;
@@ -423,6 +446,13 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
goto fail;
}
+ efx_probe_devlink_unlock(efx);
+ return rc;
fail:
+#ifdef CONFIG_SFC_SRIOV
+ /* remove devlink port if does exist */
+ ef100_pf_unset_devlink_port(efx);
+#endif
+ efx_probe_devlink_unlock(efx);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index ad686c671ab8..4dc643b0d2db 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -130,23 +130,34 @@ static void ef100_mcdi_reboot_detected(struct efx_nic *efx)
/* MCDI calls
*/
-static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address,
+ int client_handle, bool empty_ok)
{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN);
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
+ MCDI_SET_DWORD(inbuf, GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE,
+ client_handle);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLIENT_MAC_ADDRESSES, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
- if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
- return -EIO;
- ether_addr_copy(mac_address,
- MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
+ if (outlen >= MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1)) {
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS));
+ } else if (empty_ok) {
+ pci_warn(efx->pci_dev,
+ "No MAC address provisioned for client ID %#x.\n",
+ client_handle);
+ eth_zero_addr(mac_address);
+ } else {
+ return -ENOENT;
+ }
return 0;
}
@@ -388,14 +399,14 @@ static int ef100_filter_table_up(struct efx_nic *efx)
* filter insertion will need to take the lock for read.
*/
up_write(&efx->filter_sem);
-#ifdef CONFIG_SFC_SRIOV
- rc = efx_tc_insert_rep_filters(efx);
+ if (IS_ENABLED(CONFIG_SFC_SRIOV))
+ rc = efx_tc_insert_rep_filters(efx);
+
/* Rep filter failure is nonfatal */
if (rc)
netif_warn(efx, drv, efx->net_dev,
"Failed to insert representor filters, rc %d\n",
rc);
-#endif
return 0;
fail_vlan0:
@@ -408,9 +419,8 @@ fail_unspec:
static void ef100_filter_table_down(struct efx_nic *efx)
{
-#ifdef CONFIG_SFC_SRIOV
- efx_tc_remove_rep_filters(efx);
-#endif
+ if (IS_ENABLED(CONFIG_SFC_SRIOV))
+ efx_tc_remove_rep_filters(efx);
down_write(&efx->filter_sem);
efx_mcdi_filter_del_vlan(efx, 0);
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
@@ -726,7 +736,6 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
return 10 * EFX_RECYCLE_RING_SIZE_10G;
}
-#ifdef CONFIG_SFC_SRIOV
static int efx_ef100_get_base_mport(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
@@ -736,7 +745,7 @@ static int efx_ef100_get_base_mport(struct efx_nic *efx)
/* Construct mport selector for "physical network port" */
efx_mae_mport_wire(efx, &selector);
/* Look up actual mport ID */
- rc = efx_mae_lookup_mport(efx, selector, &id);
+ rc = efx_mae_fw_lookup_mport(efx, selector, &id);
if (rc)
return rc;
/* The ID should always fit in 16 bits, because that's how wide the
@@ -747,9 +756,21 @@ static int efx_ef100_get_base_mport(struct efx_nic *efx)
id);
nic_data->base_mport = id;
nic_data->have_mport = true;
+
+ /* Construct mport selector for "calling PF" */
+ efx_mae_mport_uplink(efx, &selector);
+ /* Look up actual mport ID */
+ rc = efx_mae_fw_lookup_mport(efx, selector, &id);
+ if (rc)
+ return rc;
+ if (id >> 16)
+ netif_warn(efx, probe, efx->net_dev, "Bad own m-port id %#x\n",
+ id);
+ nic_data->own_mport = id;
+ nic_data->have_own_mport = true;
+
return 0;
}
-#endif
static int compare_versions(const char *a, const char *b)
{
@@ -1098,23 +1119,42 @@ fail:
return rc;
}
+/* MCDI commands are related to the same device issuing them. This function
+ * allows to do an MCDI command on behalf of another device, mainly PFs setting
+ * things for VFs.
+ */
+int efx_ef100_lookup_client_id(struct efx_nic *efx, efx_qword_t pciefn, u32 *id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLIENT_HANDLE_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_CLIENT_HANDLE_IN_LEN);
+ u64 pciefn_flat = le64_to_cpu(pciefn.u64[0]);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, GET_CLIENT_HANDLE_IN_TYPE,
+ MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC);
+ MCDI_SET_QWORD(inbuf, GET_CLIENT_HANDLE_IN_FUNC,
+ pciefn_flat);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLIENT_HANDLE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, GET_CLIENT_HANDLE_OUT_HANDLE);
+ return 0;
+}
+
int ef100_probe_netdev_pf(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
int rc;
- rc = ef100_get_mac_address(efx, net_dev->perm_addr);
- if (rc)
- goto fail;
- /* Assign MAC address */
- eth_hw_addr_set(net_dev, net_dev->perm_addr);
- memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
-
- if (!nic_data->grp_mae)
+ if (!IS_ENABLED(CONFIG_SFC_SRIOV) || !nic_data->grp_mae)
return 0;
-#ifdef CONFIG_SFC_SRIOV
rc = efx_init_struct_tc(efx);
if (rc)
return rc;
@@ -1126,6 +1166,14 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
rc);
}
+ rc = efx_init_mae(efx);
+ if (rc)
+ netif_warn(efx, probe, net_dev,
+ "Failed to init MAE rc %d; representors will not function\n",
+ rc);
+ else
+ efx_ef100_init_reps(efx);
+
rc = efx_init_tc(efx);
if (rc) {
/* Either we don't have an MAE at all (i.e. legacy v-switching),
@@ -1141,10 +1189,6 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
net_dev->features |= NETIF_F_HW_TC;
efx->fixed_features |= NETIF_F_HW_TC;
}
-#endif
- return 0;
-
-fail:
return rc;
}
@@ -1157,6 +1201,11 @@ void ef100_remove(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
+ if (IS_ENABLED(CONFIG_SFC_SRIOV) && efx->mae) {
+ efx_ef100_fini_reps(efx);
+ efx_fini_mae(efx);
+ }
+
efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
if (nic_data)
@@ -1249,9 +1298,8 @@ const struct efx_nic_type ef100_pf_nic_type = {
.update_stats = ef100_update_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
-#ifdef CONFIG_SFC_SRIOV
- .sriov_configure = efx_ef100_sriov_configure,
-#endif
+ .sriov_configure = IS_ENABLED(CONFIG_SFC_SRIOV) ?
+ efx_ef100_sriov_configure : NULL,
/* Per-type bar/size configuration not used on ef100. Location of
* registers is defined by extended capabilities.
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
index 0295933145fa..f1ed481c1260 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.h
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -74,6 +74,10 @@ struct ef100_nic_data {
u64 stats[EF100_STAT_COUNT];
u32 base_mport;
bool have_mport; /* base_mport was populated successfully */
+ u32 own_mport;
+ u32 local_mae_intf; /* interface_idx that corresponds to us, in mport enumerate */
+ bool have_own_mport; /* own_mport was populated successfully */
+ bool have_local_intf; /* local_mae_intf was populated successfully */
bool grp_mae; /* MAE Privilege */
u16 tso_max_hdr_len;
u16 tso_max_payload_num_segs;
@@ -88,4 +92,7 @@ int efx_ef100_init_datapath_caps(struct efx_nic *efx);
int ef100_phy_probe(struct efx_nic *efx);
int ef100_filter_table_probe(struct efx_nic *efx);
+int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address,
+ int client_handle, bool empty_ok);
+int efx_ef100_lookup_client_id(struct efx_nic *efx, efx_qword_t pciefn, u32 *id);
#endif /* EFX_EF100_NIC_H */
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index 81ab22c74635..0b3083ef0ead 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -9,12 +9,14 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <linux/rhashtable.h>
#include "ef100_rep.h"
#include "ef100_netdev.h"
#include "ef100_nic.h"
#include "mae.h"
#include "rx_common.h"
#include "tc_bindings.h"
+#include "efx_devlink.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
@@ -242,14 +244,11 @@ fail1:
static int efx_ef100_configure_rep(struct efx_rep *efv)
{
struct efx_nic *efx = efv->parent;
- u32 selector;
int rc;
efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
- /* Construct mport selector for corresponding VF */
- efx_mae_mport_vf(efx, efv->idx, &selector);
/* Look up actual mport ID */
- rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
+ rc = efx_mae_lookup_mport(efx, efv->idx, &efv->mport);
if (rc)
return rc;
pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
@@ -299,6 +298,7 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
i, rc);
goto fail1;
}
+ ef100_rep_set_devlink_port(efv);
rc = register_netdev(efv->net_dev);
if (rc) {
pci_err(efx->pci_dev,
@@ -310,6 +310,7 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
efv->net_dev->name);
return 0;
fail2:
+ ef100_rep_unset_devlink_port(efv);
efx_ef100_deconfigure_rep(efv);
fail1:
efx_ef100_rep_destroy_netdev(efv);
@@ -325,6 +326,7 @@ void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
return;
netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
unregister_netdev(rep_dev);
+ ef100_rep_unset_devlink_port(efv);
efx_ef100_deconfigure_rep(efv);
efx_ef100_rep_destroy_netdev(efv);
}
@@ -341,6 +343,53 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx)
efx_ef100_vfrep_destroy(efx, efv);
}
+static bool ef100_mport_is_pcie_vnic(struct mae_mport_desc *mport_desc)
+{
+ return mport_desc->mport_type == MAE_MPORT_DESC_MPORT_TYPE_VNIC &&
+ mport_desc->vnic_client_type == MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION;
+}
+
+bool ef100_mport_on_local_intf(struct efx_nic *efx,
+ struct mae_mport_desc *mport_desc)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ bool pcie_func;
+
+ pcie_func = ef100_mport_is_pcie_vnic(mport_desc);
+
+ return nic_data->have_local_intf && pcie_func &&
+ mport_desc->interface_idx == nic_data->local_mae_intf;
+}
+
+bool ef100_mport_is_vf(struct mae_mport_desc *mport_desc)
+{
+ bool pcie_func;
+
+ pcie_func = ef100_mport_is_pcie_vnic(mport_desc);
+ return pcie_func && (mport_desc->vf_idx != MAE_MPORT_DESC_VF_IDX_NULL);
+}
+
+void efx_ef100_init_reps(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ nic_data->have_local_intf = false;
+ rc = efx_mae_enumerate_mports(efx);
+ if (rc)
+ pci_warn(efx->pci_dev,
+ "Could not enumerate mports (rc=%d), are we admin?",
+ rc);
+}
+
+void efx_ef100_fini_reps(struct efx_nic *efx)
+{
+ struct efx_mae *mae = efx->mae;
+
+ rhashtable_free_and_destroy(&mae->mports_ht, efx_mae_remove_mport,
+ NULL);
+}
+
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
{
struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h
index c21bc716f847..a042525a2240 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.h
+++ b/drivers/net/ethernet/sfc/ef100_rep.h
@@ -22,6 +22,8 @@ struct efx_rep_sw_stats {
atomic64_t rx_dropped, tx_errors;
};
+struct devlink_port;
+
/**
* struct efx_rep - Private data for an Efx representor
*
@@ -39,6 +41,7 @@ struct efx_rep_sw_stats {
* @rx_lock: protects @rx_list
* @napi: NAPI control structure
* @stats: software traffic counters for netdev stats
+ * @dl_port: devlink port associated to this netdev representor
*/
struct efx_rep {
struct efx_nic *parent;
@@ -54,6 +57,7 @@ struct efx_rep {
spinlock_t rx_lock;
struct napi_struct napi;
struct efx_rep_sw_stats stats;
+ struct devlink_port *dl_port;
};
int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
@@ -67,4 +71,10 @@ void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
*/
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
extern const struct net_device_ops efx_ef100_rep_netdev_ops;
+void efx_ef100_init_reps(struct efx_nic *efx);
+void efx_ef100_fini_reps(struct efx_nic *efx);
+struct mae_mport_desc;
+bool ef100_mport_on_local_intf(struct efx_nic *efx,
+ struct mae_mport_desc *mport_desc);
+bool ef100_mport_is_vf(struct mae_mport_desc *mport_desc);
#endif /* EF100_REP_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3a86f1213a05..02c2adeb0a12 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1028,6 +1028,10 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
rc = efx_register_netdev(efx);
if (!rc)
return 0;
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
new file mode 100644
index 000000000000..381b805659d3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "ef100_nic.h"
+#include "efx_devlink.h"
+#include <linux/rtc.h>
+#include "mcdi.h"
+#include "mcdi_functions.h"
+#include "mcdi_pcol.h"
+#ifdef CONFIG_SFC_SRIOV
+#include "mae.h"
+#include "ef100_rep.h"
+#endif
+
+struct efx_devlink {
+ struct efx_nic *efx;
+};
+
+#ifdef CONFIG_SFC_SRIOV
+static void efx_devlink_del_port(struct devlink_port *dl_port)
+{
+ if (!dl_port)
+ return;
+ devl_port_unregister(dl_port);
+}
+
+static int efx_devlink_add_port(struct efx_nic *efx,
+ struct mae_mport_desc *mport)
+{
+ bool external = false;
+
+ if (!ef100_mport_on_local_intf(efx, mport))
+ external = true;
+
+ switch (mport->mport_type) {
+ case MAE_MPORT_DESC_MPORT_TYPE_VNIC:
+ if (mport->vf_idx != MAE_MPORT_DESC_VF_IDX_NULL)
+ devlink_port_attrs_pci_vf_set(&mport->dl_port, 0, mport->pf_idx,
+ mport->vf_idx,
+ external);
+ else
+ devlink_port_attrs_pci_pf_set(&mport->dl_port, 0, mport->pf_idx,
+ external);
+ break;
+ default:
+ /* MAE_MPORT_DESC_MPORT_ALIAS and UNDEFINED */
+ return 0;
+ }
+
+ mport->dl_port.index = mport->mport_id;
+
+ return devl_port_register(efx->devlink, &mport->dl_port, mport->mport_id);
+}
+
+static int efx_devlink_port_addr_get(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink = devlink_priv(port->devlink);
+ struct mae_mport_desc *mport_desc;
+ efx_qword_t pciefn;
+ u32 client_id;
+ int rc = 0;
+
+ mport_desc = container_of(port, struct mae_mport_desc, dl_port);
+
+ if (!ef100_mport_on_local_intf(devlink->efx, mport_desc)) {
+ rc = -EINVAL;
+ NL_SET_ERR_MSG_FMT(extack,
+ "Port not on local interface (mport: %u)",
+ mport_desc->mport_id);
+ goto out;
+ }
+
+ if (ef100_mport_is_vf(mport_desc))
+ EFX_POPULATE_QWORD_3(pciefn,
+ PCIE_FUNCTION_PF, PCIE_FUNCTION_PF_NULL,
+ PCIE_FUNCTION_VF, mport_desc->vf_idx,
+ PCIE_FUNCTION_INTF, PCIE_INTERFACE_CALLER);
+ else
+ EFX_POPULATE_QWORD_3(pciefn,
+ PCIE_FUNCTION_PF, mport_desc->pf_idx,
+ PCIE_FUNCTION_VF, PCIE_FUNCTION_VF_NULL,
+ PCIE_FUNCTION_INTF, PCIE_INTERFACE_CALLER);
+
+ rc = efx_ef100_lookup_client_id(devlink->efx, pciefn, &client_id);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "No internal client_ID for port (mport: %u)",
+ mport_desc->mport_id);
+ goto out;
+ }
+
+ rc = ef100_get_mac_address(devlink->efx, hw_addr, client_id, true);
+ if (rc != 0)
+ NL_SET_ERR_MSG_FMT(extack,
+ "No available MAC for port (mport: %u)",
+ mport_desc->mport_id);
+out:
+ *hw_addr_len = ETH_ALEN;
+ return rc;
+}
+
+static int efx_devlink_port_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(1));
+ struct efx_devlink *devlink = devlink_priv(port->devlink);
+ struct mae_mport_desc *mport_desc;
+ efx_qword_t pciefn;
+ u32 client_id;
+ int rc;
+
+ mport_desc = container_of(port, struct mae_mport_desc, dl_port);
+
+ if (!ef100_mport_is_vf(mport_desc)) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "port mac change not allowed (mport: %u)",
+ mport_desc->mport_id);
+ return -EPERM;
+ }
+
+ EFX_POPULATE_QWORD_3(pciefn,
+ PCIE_FUNCTION_PF, PCIE_FUNCTION_PF_NULL,
+ PCIE_FUNCTION_VF, mport_desc->vf_idx,
+ PCIE_FUNCTION_INTF, PCIE_INTERFACE_CALLER);
+
+ rc = efx_ef100_lookup_client_id(devlink->efx, pciefn, &client_id);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "No internal client_ID for port (mport: %u)",
+ mport_desc->mport_id);
+ return rc;
+ }
+
+ MCDI_SET_DWORD(inbuf, SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE,
+ client_id);
+
+ ether_addr_copy(MCDI_PTR(inbuf, SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS),
+ hw_addr);
+
+ rc = efx_mcdi_rpc(devlink->efx, MC_CMD_SET_CLIENT_MAC_ADDRESSES, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ if (rc)
+ NL_SET_ERR_MSG_FMT(extack,
+ "sfc MC_CMD_SET_CLIENT_MAC_ADDRESSES mcdi error (mport: %u)",
+ mport_desc->mport_id);
+
+ return rc;
+}
+
+#endif
+
+static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int partition_type,
+ const char *version_name)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ u16 version[4];
+ int rc;
+
+ rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL,
+ 0);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed\n",
+ version_name);
+ return rc;
+ }
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", version[0],
+ version[1], version[2], version[3]);
+ devlink_info_version_stored_put(req, version_name, buf);
+
+ return 0;
+}
+
+static int efx_devlink_info_stored_versions(struct efx_nic *efx,
+ struct devlink_info_req *req)
+{
+ int rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_BUNDLE,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
+ if (rc)
+ return rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_MC_FIRMWARE,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
+ if (rc)
+ return rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
+ if (rc)
+ return rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_EXPANSION_ROM,
+ EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
+ if (rc)
+ return rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
+ EFX_DEVLINK_INFO_VERSION_FW_UEFI);
+ return rc;
+}
+
+#define EFX_VER_FLAG(_f) \
+ (MC_CMD_GET_VERSION_V5_OUT_ ## _f ## _PRESENT_LBN)
+
+static void efx_devlink_info_running_v2(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int flags, efx_dword_t *outbuf)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+ struct rtc_time build_date;
+ unsigned int build_id;
+ size_t offset;
+ __maybe_unused u64 tstamp;
+
+ if (flags & BIT(EFX_VER_FLAG(BOARD_EXT_INFO))) {
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%s",
+ MCDI_PTR(outbuf, GET_VERSION_V2_OUT_BOARD_NAME));
+ devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ buf);
+
+ /* Favour full board version if present (in V5 or later) */
+ if (~flags & BIT(EFX_VER_FLAG(BOARD_VERSION))) {
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u",
+ MCDI_DWORD(outbuf,
+ GET_VERSION_V2_OUT_BOARD_REVISION));
+ devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ }
+
+ ver.str = MCDI_PTR(outbuf, GET_VERSION_V2_OUT_BOARD_SERIAL);
+ if (ver.str[0])
+ devlink_info_board_serial_number_put(req, ver.str);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(FPGA_EXT_INFO))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V2_OUT_FPGA_VERSION);
+ offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u_%c%u",
+ le32_to_cpu(ver.dwords[0]),
+ 'A' + le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]));
+
+ ver.str = MCDI_PTR(outbuf, GET_VERSION_V2_OUT_FPGA_EXTRA);
+ if (ver.str[0])
+ snprintf(&buf[offset], EFX_MAX_VERSION_INFO_LEN - offset,
+ " (%s)", ver.str);
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_FPGA_REV,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(CMC_EXT_INFO))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V2_OUT_CMCFW_VERSION);
+ offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]),
+ le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+#ifdef CONFIG_RTC_LIB
+ tstamp = MCDI_QWORD(outbuf,
+ GET_VERSION_V2_OUT_CMCFW_BUILD_DATE);
+ if (tstamp) {
+ rtc_time64_to_tm(tstamp, &build_date);
+ snprintf(&buf[offset], EFX_MAX_VERSION_INFO_LEN - offset,
+ " (%ptRd)", &build_date);
+ }
+#endif
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_CMC,
+ buf);
+ }
+
+ ver.words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_V2_OUT_VERSION);
+ offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le16_to_cpu(ver.words[0]), le16_to_cpu(ver.words[1]),
+ le16_to_cpu(ver.words[2]), le16_to_cpu(ver.words[3]));
+ if (flags & BIT(EFX_VER_FLAG(MCFW_EXT_INFO))) {
+ build_id = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_MCFW_BUILD_ID);
+ snprintf(&buf[offset], EFX_MAX_VERSION_INFO_LEN - offset,
+ " (%x) %s", build_id,
+ MCDI_PTR(outbuf, GET_VERSION_V2_OUT_MCFW_BUILD_NAME));
+ }
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT,
+ buf);
+
+ if (flags & BIT(EFX_VER_FLAG(SUCFW_EXT_INFO))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V2_OUT_SUCFW_VERSION);
+#ifdef CONFIG_RTC_LIB
+ tstamp = MCDI_QWORD(outbuf,
+ GET_VERSION_V2_OUT_SUCFW_BUILD_DATE);
+ rtc_time64_to_tm(tstamp, &build_date);
+#else
+ memset(&build_date, 0, sizeof(build_date));
+#endif
+ build_id = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_SUCFW_CHIP_ID);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN,
+ "%u.%u.%u.%u type %x (%ptRd)",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]), le32_to_cpu(ver.dwords[3]),
+ build_id, &build_date);
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC,
+ buf);
+ }
+}
+
+static void efx_devlink_info_running_v3(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int flags, efx_dword_t *outbuf)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+
+ if (flags & BIT(EFX_VER_FLAG(DATAPATH_HW_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V3_OUT_DATAPATH_HW_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_DATAPATH_HW,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(DATAPATH_FW_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V3_OUT_DATAPATH_FW_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_DATAPATH_FW,
+ buf);
+ }
+}
+
+static void efx_devlink_info_running_v4(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int flags, efx_dword_t *outbuf)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+
+ if (flags & BIT(EFX_VER_FLAG(SOC_BOOT_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SOC_BOOT_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_SOC_BOOT,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(SOC_UBOOT_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SOC_UBOOT_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_SOC_UBOOT,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(SOC_MAIN_ROOTFS_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_SOC_MAIN,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(SOC_RECOVERY_BUILDROOT_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_SOC_RECOVERY,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(SUCFW_VERSION)) &&
+ ~flags & BIT(EFX_VER_FLAG(SUCFW_EXT_INFO))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SUCFW_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC,
+ buf);
+ }
+}
+
+static void efx_devlink_info_running_v5(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int flags, efx_dword_t *outbuf)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+
+ if (flags & BIT(EFX_VER_FLAG(BOARD_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V5_OUT_BOARD_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(BUNDLE_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V5_OUT_BUNDLE_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ buf);
+ }
+}
+
+static int efx_devlink_info_running_versions(struct efx_nic *efx,
+ struct devlink_info_req *req)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_V5_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_VERSION_EXT_IN_LEN);
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+ size_t outlength;
+ unsigned int flags;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc || outlength < MC_CMD_GET_VERSION_OUT_LEN) {
+ netif_err(efx, drv, efx->net_dev,
+ "mcdi MC_CMD_GET_VERSION failed\n");
+ return rc;
+ }
+
+ /* Handle previous output */
+ if (outlength < MC_CMD_GET_VERSION_V2_OUT_LEN) {
+ ver.words = (__le16 *)MCDI_PTR(outbuf,
+ GET_VERSION_EXT_OUT_VERSION);
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le16_to_cpu(ver.words[0]),
+ le16_to_cpu(ver.words[1]),
+ le16_to_cpu(ver.words[2]),
+ le16_to_cpu(ver.words[3]));
+
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT,
+ buf);
+ return 0;
+ }
+
+ /* Handle V2 additions */
+ flags = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_FLAGS);
+ efx_devlink_info_running_v2(efx, req, flags, outbuf);
+
+ if (outlength < MC_CMD_GET_VERSION_V3_OUT_LEN)
+ return 0;
+
+ /* Handle V3 additions */
+ efx_devlink_info_running_v3(efx, req, flags, outbuf);
+
+ if (outlength < MC_CMD_GET_VERSION_V4_OUT_LEN)
+ return 0;
+
+ /* Handle V4 additions */
+ efx_devlink_info_running_v4(efx, req, flags, outbuf);
+
+ if (outlength < MC_CMD_GET_VERSION_V5_OUT_LEN)
+ return 0;
+
+ /* Handle V5 additions */
+ efx_devlink_info_running_v5(efx, req, flags, outbuf);
+
+ return 0;
+}
+
+#define EFX_MAX_SERIALNUM_LEN (ETH_ALEN * 2 + 1)
+
+static int efx_devlink_info_board_cfg(struct efx_nic *efx,
+ struct devlink_info_req *req)
+{
+ char sn[EFX_MAX_SERIALNUM_LEN];
+ u8 mac_address[ETH_ALEN];
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, (u8 *)mac_address, NULL, NULL);
+ if (!rc) {
+ snprintf(sn, EFX_MAX_SERIALNUM_LEN, "%pm", mac_address);
+ devlink_info_serial_number_put(req, sn);
+ }
+ return rc;
+}
+
+static int efx_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink_private = devlink_priv(devlink);
+ struct efx_nic *efx = devlink_private->efx;
+ int rc;
+
+ /* Several different MCDI commands are used. We report first error
+ * through extack returning at that point. Specific error
+ * information via system messages.
+ */
+ rc = efx_devlink_info_board_cfg(efx, req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Getting board info failed");
+ return rc;
+ }
+ rc = efx_devlink_info_stored_versions(efx, req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Getting stored versions failed");
+ return rc;
+ }
+ rc = efx_devlink_info_running_versions(efx, req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Getting running versions failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops sfc_devlink_ops = {
+ .info_get = efx_devlink_info_get,
+#ifdef CONFIG_SFC_SRIOV
+ .port_function_hw_addr_get = efx_devlink_port_addr_get,
+ .port_function_hw_addr_set = efx_devlink_port_addr_set,
+#endif
+};
+
+#ifdef CONFIG_SFC_SRIOV
+static struct devlink_port *ef100_set_devlink_port(struct efx_nic *efx, u32 idx)
+{
+ struct mae_mport_desc *mport;
+ u32 id;
+ int rc;
+
+ if (efx_mae_lookup_mport(efx, idx, &id)) {
+ /* This should not happen. */
+ if (idx == MAE_MPORT_DESC_VF_IDX_NULL)
+ pci_warn_once(efx->pci_dev, "No mport ID found for PF.\n");
+ else
+ pci_warn_once(efx->pci_dev, "No mport ID found for VF %u.\n",
+ idx);
+ return NULL;
+ }
+
+ mport = efx_mae_get_mport(efx, id);
+ if (!mport) {
+ /* This should not happen. */
+ if (idx == MAE_MPORT_DESC_VF_IDX_NULL)
+ pci_warn_once(efx->pci_dev, "No mport found for PF.\n");
+ else
+ pci_warn_once(efx->pci_dev, "No mport found for VF %u.\n",
+ idx);
+ return NULL;
+ }
+
+ rc = efx_devlink_add_port(efx, mport);
+ if (rc) {
+ if (idx == MAE_MPORT_DESC_VF_IDX_NULL)
+ pci_warn(efx->pci_dev,
+ "devlink port creation for PF failed.\n");
+ else
+ pci_warn(efx->pci_dev,
+ "devlink_port creation for VF %u failed.\n",
+ idx);
+ return NULL;
+ }
+
+ return &mport->dl_port;
+}
+
+void ef100_rep_set_devlink_port(struct efx_rep *efv)
+{
+ efv->dl_port = ef100_set_devlink_port(efv->parent, efv->idx);
+}
+
+void ef100_pf_set_devlink_port(struct efx_nic *efx)
+{
+ efx->dl_port = ef100_set_devlink_port(efx, MAE_MPORT_DESC_VF_IDX_NULL);
+}
+
+void ef100_rep_unset_devlink_port(struct efx_rep *efv)
+{
+ efx_devlink_del_port(efv->dl_port);
+}
+
+void ef100_pf_unset_devlink_port(struct efx_nic *efx)
+{
+ efx_devlink_del_port(efx->dl_port);
+}
+#endif
+
+void efx_fini_devlink_lock(struct efx_nic *efx)
+{
+ if (efx->devlink)
+ devl_lock(efx->devlink);
+}
+
+void efx_fini_devlink_and_unlock(struct efx_nic *efx)
+{
+ if (efx->devlink) {
+ devl_unregister(efx->devlink);
+ devl_unlock(efx->devlink);
+ devlink_free(efx->devlink);
+ efx->devlink = NULL;
+ }
+}
+
+int efx_probe_devlink_and_lock(struct efx_nic *efx)
+{
+ struct efx_devlink *devlink_private;
+
+ if (efx->type->is_vf)
+ return 0;
+
+ efx->devlink = devlink_alloc(&sfc_devlink_ops,
+ sizeof(struct efx_devlink),
+ &efx->pci_dev->dev);
+ if (!efx->devlink)
+ return -ENOMEM;
+
+ devl_lock(efx->devlink);
+ devlink_private = devlink_priv(efx->devlink);
+ devlink_private->efx = efx;
+
+ devl_register(efx->devlink);
+
+ return 0;
+}
+
+void efx_probe_devlink_unlock(struct efx_nic *efx)
+{
+ if (!efx->devlink)
+ return;
+
+ devl_unlock(efx->devlink);
+}
diff --git a/drivers/net/ethernet/sfc/efx_devlink.h b/drivers/net/ethernet/sfc/efx_devlink.h
new file mode 100644
index 000000000000..e5fd5e1dcc27
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_devlink.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_DEVLINK_H
+#define _EFX_DEVLINK_H
+
+#include "net_driver.h"
+#include <net/devlink.h>
+
+/* Custom devlink-info version object names for details that do not map to the
+ * generic standardized names.
+ */
+#define EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC "fw.mgmt.suc"
+#define EFX_DEVLINK_INFO_VERSION_FW_MGMT_CMC "fw.mgmt.cmc"
+#define EFX_DEVLINK_INFO_VERSION_FPGA_REV "fpga.rev"
+#define EFX_DEVLINK_INFO_VERSION_DATAPATH_HW "fpga.app"
+#define EFX_DEVLINK_INFO_VERSION_DATAPATH_FW DEVLINK_INFO_VERSION_GENERIC_FW_APP
+#define EFX_DEVLINK_INFO_VERSION_SOC_BOOT "coproc.boot"
+#define EFX_DEVLINK_INFO_VERSION_SOC_UBOOT "coproc.uboot"
+#define EFX_DEVLINK_INFO_VERSION_SOC_MAIN "coproc.main"
+#define EFX_DEVLINK_INFO_VERSION_SOC_RECOVERY "coproc.recovery"
+#define EFX_DEVLINK_INFO_VERSION_FW_EXPROM "fw.exprom"
+#define EFX_DEVLINK_INFO_VERSION_FW_UEFI "fw.uefi"
+
+#define EFX_MAX_VERSION_INFO_LEN 64
+
+int efx_probe_devlink_and_lock(struct efx_nic *efx);
+void efx_probe_devlink_unlock(struct efx_nic *efx);
+void efx_fini_devlink_lock(struct efx_nic *efx);
+void efx_fini_devlink_and_unlock(struct efx_nic *efx);
+
+#ifdef CONFIG_SFC_SRIOV
+struct efx_rep;
+
+void ef100_pf_set_devlink_port(struct efx_nic *efx);
+void ef100_rep_set_devlink_port(struct efx_rep *efv);
+void ef100_pf_unset_devlink_port(struct efx_nic *efx);
+void ef100_rep_unset_devlink_port(struct efx_rep *efv);
+#endif
+#endif /* _EFX_DEVLINK_H */
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 583baf69981c..2d32abe5f478 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -9,8 +9,11 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <linux/rhashtable.h>
+#include "ef100_nic.h"
#include "mae.h"
#include "mcdi.h"
+#include "mcdi_pcol.h"
#include "mcdi_pcol_mae.h"
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
@@ -94,7 +97,7 @@ void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32
}
/* id is really only 24 bits wide */
-int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
+int efx_mae_fw_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN);
@@ -485,11 +488,193 @@ int efx_mae_free_counter(struct efx_nic *efx, struct efx_tc_counter *cnt)
return 0;
}
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 vf_idx, u32 *id)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct efx_mae *mae = efx->mae;
+ struct rhashtable_iter walk;
+ struct mae_mport_desc *m;
+ int rc = -ENOENT;
+
+ rhashtable_walk_enter(&mae->mports_ht, &walk);
+ rhashtable_walk_start(&walk);
+ while ((m = rhashtable_walk_next(&walk)) != NULL) {
+ if (m->mport_type == MAE_MPORT_DESC_MPORT_TYPE_VNIC &&
+ m->interface_idx == nic_data->local_mae_intf &&
+ m->pf_idx == 0 &&
+ m->vf_idx == vf_idx) {
+ *id = m->mport_id;
+ rc = 0;
+ break;
+ }
+ }
+ rhashtable_walk_stop(&walk);
+ rhashtable_walk_exit(&walk);
+ return rc;
+}
+
static bool efx_mae_asl_id(u32 id)
{
return !!(id & BIT(31));
}
+/* mport handling */
+static const struct rhashtable_params efx_mae_mports_ht_params = {
+ .key_len = sizeof(u32),
+ .key_offset = offsetof(struct mae_mport_desc, mport_id),
+ .head_offset = offsetof(struct mae_mport_desc, linkage),
+};
+
+struct mae_mport_desc *efx_mae_get_mport(struct efx_nic *efx, u32 mport_id)
+{
+ return rhashtable_lookup_fast(&efx->mae->mports_ht, &mport_id,
+ efx_mae_mports_ht_params);
+}
+
+static int efx_mae_add_mport(struct efx_nic *efx, struct mae_mport_desc *desc)
+{
+ struct efx_mae *mae = efx->mae;
+ int rc;
+
+ rc = rhashtable_insert_fast(&mae->mports_ht, &desc->linkage,
+ efx_mae_mports_ht_params);
+
+ if (rc) {
+ pci_err(efx->pci_dev, "Failed to insert MPORT %08x, rc %d\n",
+ desc->mport_id, rc);
+ kfree(desc);
+ return rc;
+ }
+
+ return rc;
+}
+
+void efx_mae_remove_mport(void *desc, void *arg)
+{
+ struct mae_mport_desc *mport = desc;
+
+ synchronize_rcu();
+ kfree(mport);
+}
+
+static int efx_mae_process_mport(struct efx_nic *efx,
+ struct mae_mport_desc *desc)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct mae_mport_desc *mport;
+
+ mport = efx_mae_get_mport(efx, desc->mport_id);
+ if (!IS_ERR_OR_NULL(mport)) {
+ netif_err(efx, drv, efx->net_dev,
+ "mport with id %u does exist!!!\n", desc->mport_id);
+ return -EEXIST;
+ }
+
+ if (nic_data->have_own_mport &&
+ desc->mport_id == nic_data->own_mport) {
+ WARN_ON(desc->mport_type != MAE_MPORT_DESC_MPORT_TYPE_VNIC);
+ WARN_ON(desc->vnic_client_type !=
+ MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION);
+ nic_data->local_mae_intf = desc->interface_idx;
+ nic_data->have_local_intf = true;
+ pci_dbg(efx->pci_dev, "MAE interface_idx is %u\n",
+ nic_data->local_mae_intf);
+ }
+
+ return efx_mae_add_mport(efx, desc);
+}
+
+#define MCDI_MPORT_JOURNAL_LEN \
+ ALIGN(MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX_MCDI2, 4)
+
+int efx_mae_enumerate_mports(struct efx_nic *efx)
+{
+ efx_dword_t *outbuf = kzalloc(MCDI_MPORT_JOURNAL_LEN, GFP_KERNEL);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_READ_JOURNAL_IN_LEN);
+ MCDI_DECLARE_STRUCT_PTR(desc);
+ size_t outlen, stride, count;
+ int rc = 0, i;
+
+ if (!outbuf)
+ return -ENOMEM;
+ do {
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_READ_JOURNAL, inbuf,
+ sizeof(inbuf), outbuf,
+ MCDI_MPORT_JOURNAL_LEN, &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST) {
+ rc = -EIO;
+ goto fail;
+ }
+ count = MCDI_DWORD(outbuf, MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT);
+ if (!count)
+ continue; /* not break; we want to look at MORE flag */
+ stride = MCDI_DWORD(outbuf, MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC);
+ if (stride < MAE_MPORT_DESC_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+ if (outlen < MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LEN(count * stride)) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct mae_mport_desc *d;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ desc = (efx_dword_t *)
+ _MCDI_PTR(outbuf, MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST +
+ i * stride);
+ d->mport_id = MCDI_STRUCT_DWORD(desc, MAE_MPORT_DESC_MPORT_ID);
+ d->flags = MCDI_STRUCT_DWORD(desc, MAE_MPORT_DESC_FLAGS);
+ d->caller_flags = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_CALLER_FLAGS);
+ d->mport_type = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_MPORT_TYPE);
+ switch (d->mport_type) {
+ case MAE_MPORT_DESC_MPORT_TYPE_NET_PORT:
+ d->port_idx = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_NET_PORT_IDX);
+ break;
+ case MAE_MPORT_DESC_MPORT_TYPE_ALIAS:
+ d->alias_mport_id = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID);
+ break;
+ case MAE_MPORT_DESC_MPORT_TYPE_VNIC:
+ d->vnic_client_type = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_VNIC_CLIENT_TYPE);
+ d->interface_idx = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE);
+ d->pf_idx = MCDI_STRUCT_WORD(desc,
+ MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX);
+ d->vf_idx = MCDI_STRUCT_WORD(desc,
+ MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX);
+ break;
+ default:
+ /* Unknown mport_type, just accept it */
+ break;
+ }
+ rc = efx_mae_process_mport(efx, d);
+ /* Any failure will be due to memory allocation faiure,
+ * so there is no point to try subsequent entries.
+ */
+ if (rc)
+ goto fail;
+ }
+ } while (MCDI_FIELD(outbuf, MAE_MPORT_READ_JOURNAL_OUT, MORE) &&
+ !WARN_ON(!count));
+fail:
+ kfree(outbuf);
+ return rc;
+}
+
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
@@ -805,3 +990,34 @@ int efx_mae_delete_rule(struct efx_nic *efx, u32 id)
return -EIO;
return 0;
}
+
+int efx_init_mae(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct efx_mae *mae;
+ int rc;
+
+ if (!nic_data->have_mport)
+ return -EINVAL;
+
+ mae = kmalloc(sizeof(*mae), GFP_KERNEL);
+ if (!mae)
+ return -ENOMEM;
+
+ rc = rhashtable_init(&mae->mports_ht, &efx_mae_mports_ht_params);
+ if (rc < 0) {
+ kfree(mae);
+ return rc;
+ }
+ efx->mae = mae;
+ mae->efx = efx;
+ return 0;
+}
+
+void efx_fini_mae(struct efx_nic *efx)
+{
+ struct efx_mae *mae = efx->mae;
+
+ kfree(mae);
+ efx->mae = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 72343e90e222..bec293a06733 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -13,6 +13,7 @@
#define EF100_MAE_H
/* MCDI interface for the ef100 Match-Action Engine */
+#include <net/devlink.h>
#include "net_driver.h"
#include "tc.h"
#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
@@ -27,6 +28,40 @@ void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+struct mae_mport_desc {
+ u32 mport_id;
+ u32 flags;
+ u32 caller_flags; /* enum mae_mport_desc_caller_flags */
+ u32 mport_type; /* MAE_MPORT_DESC_MPORT_TYPE_* */
+ union {
+ u32 port_idx; /* for mport_type == NET_PORT */
+ u32 alias_mport_id; /* for mport_type == ALIAS */
+ struct { /* for mport_type == VNIC */
+ u32 vnic_client_type; /* MAE_MPORT_DESC_VNIC_CLIENT_TYPE_* */
+ u32 interface_idx;
+ u16 pf_idx;
+ u16 vf_idx;
+ };
+ };
+ struct rhash_head linkage;
+ struct devlink_port dl_port;
+};
+
+int efx_mae_enumerate_mports(struct efx_nic *efx);
+struct mae_mport_desc *efx_mae_get_mport(struct efx_nic *efx, u32 mport_id);
+void efx_mae_put_mport(struct efx_nic *efx, struct mae_mport_desc *desc);
+
+/**
+ * struct efx_mae - MAE information
+ *
+ * @efx: The associated NIC
+ * @mports_ht: m-port descriptions from MC_CMD_MAE_MPORT_READ_JOURNAL
+ */
+struct efx_mae {
+ struct efx_nic *efx;
+ struct rhashtable mports_ht;
+};
+
int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
void efx_mae_counters_grant_credits(struct work_struct *work);
@@ -60,4 +95,9 @@ int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
u32 prio, u32 acts_id, u32 *id);
int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
+int efx_init_mae(struct efx_nic *efx);
+void efx_fini_mae(struct efx_nic *efx);
+void efx_mae_remove_mport(void *desc, void *arg);
+int efx_mae_fw_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 vf, u32 *id);
#endif /* EF100_MAE_H */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index af338208eae9..a7f2c31071e8 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -2175,6 +2175,78 @@ int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask)
return 0;
}
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
+ efx_dword_t *outbuf;
+ size_t outlen;
+ u32 flags;
+ int rc;
+
+ outbuf = kzalloc(MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2, GFP_KERNEL);
+ if (!outbuf)
+ return -ENOMEM;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_NVRAM_METADATA, inbuf,
+ sizeof(inbuf), outbuf,
+ MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2,
+ &outlen);
+ if (rc)
+ goto out_free;
+ if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) {
+ rc = -EIO;
+ goto out_free;
+ }
+
+ flags = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS);
+
+ if (desc && descsize > 0) {
+ if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN)) {
+ if (descsize <=
+ MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)) {
+ rc = -E2BIG;
+ goto out_free;
+ }
+
+ strncpy(desc,
+ MCDI_PTR(outbuf, NVRAM_METADATA_OUT_DESCRIPTION),
+ MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen));
+ desc[MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)] = '\0';
+ } else {
+ desc[0] = '\0';
+ }
+ }
+
+ if (subtype) {
+ if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
+ *subtype = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_SUBTYPE);
+ else
+ *subtype = 0;
+ }
+
+ if (version) {
+ if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN)) {
+ version[0] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_W);
+ version[1] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_X);
+ version[2] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Y);
+ version[3] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Z);
+ } else {
+ version[0] = 0;
+ version[1] = 0;
+ version[2] = 0;
+ version[3] = 0;
+ }
+ }
+
+out_free:
+ kfree(outbuf);
+ return rc;
+}
+
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 7e35fec9da35..b139b76febff 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -229,6 +229,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_STRUCT_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(_field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_STRUCT_PTR(_buf, _field)))
/* Write a 16-bit field defined in the protocol as being big-endian. */
#define MCDI_STRUCT_SET_WORD_BE(_buf, _field, _value) do { \
BUILD_BUG_ON(_field ## _LEN != 2); \
@@ -241,6 +244,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+#define MCDI_STRUCT_DWORD(_buf, _field) \
+ EFX_DWORD_FIELD(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0)
/* Write a 32-bit field defined in the protocol as being big-endian. */
#define MCDI_STRUCT_SET_DWORD_BE(_buf, _field, _value) do { \
BUILD_BUG_ON(_field ## _LEN != 4); \
@@ -378,6 +383,9 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out);
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize);
int efx_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_handle_assertion(struct efx_nic *efx);
int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 3b49e216768b..fcd51d3992fa 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -845,6 +845,8 @@ enum efx_xdp_tx_queues_mode {
EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */
};
+struct efx_mae;
+
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@@ -881,6 +883,7 @@ enum efx_xdp_tx_queues_mode {
* @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
+ * @mae: Details of the Match Action Engine
* @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
* @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
* @xdp_txq_queues_mode: XDP TX queues sharing strategy.
@@ -994,6 +997,8 @@ enum efx_xdp_tx_queues_mode {
* xdp_rxq_info structures?
* @netdev_notifier: Netdevice notifier.
* @tc: state for TC offload (EF100).
+ * @devlink: reference to devlink structure owned by this device
+ * @dl_port: devlink port associated with the PF
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
* @monitor_work: Hardware monitor workitem
@@ -1043,6 +1048,7 @@ struct efx_nic {
struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
+ struct efx_mae *mae;
unsigned int xdp_tx_queue_count;
struct efx_tx_queue **xdp_tx_queues;
@@ -1179,6 +1185,8 @@ struct efx_nic {
struct notifier_block netdev_notifier;
struct efx_tc_state *tc;
+ struct devlink *devlink;
+ struct devlink_port *dl_port;
unsigned int mem_bar;
u32 reg_base;
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
index 60e5b7c8ccf9..ef52ec71d197 100644
--- a/drivers/net/ethernet/sfc/siena/efx.c
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -1007,6 +1007,10 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
rc = efx_register_netdev(efx);
if (!rc)
return 0;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 9b46579b5a10..2d7347b71c41 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -2104,6 +2104,9 @@ static int netsec_probe(struct platform_device *pdev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
ndev->hw_features = ndev->features;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
priv->rx_cksum_offload_flag = true;
ret = netsec_register_mdio(priv, phy_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 80efdeeb0b59..18acf7dd74e5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -159,15 +159,13 @@ disable:
return err;
}
-static int dwc_qos_remove(struct platform_device *pdev)
+static void dwc_qos_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
clk_disable_unprepare(priv->plat->pclk);
clk_disable_unprepare(priv->plat->stmmac_clk);
-
- return 0;
}
#define SDMEMCOMPPADCTRL 0x8800
@@ -384,7 +382,7 @@ error:
return err;
}
-static int tegra_eqos_remove(struct platform_device *pdev)
+static void tegra_eqos_remove(struct platform_device *pdev)
{
struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
@@ -394,15 +392,13 @@ static int tegra_eqos_remove(struct platform_device *pdev)
clk_disable_unprepare(eqos->clk_rx);
clk_disable_unprepare(eqos->clk_slave);
clk_disable_unprepare(eqos->clk_master);
-
- return 0;
}
struct dwc_eth_dwmac_data {
int (*probe)(struct platform_device *pdev,
struct plat_stmmacenet_data *data,
struct stmmac_resources *res);
- int (*remove)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
};
static const struct dwc_eth_dwmac_data dwc_qos_data = {
@@ -473,21 +469,16 @@ static int dwc_eth_dwmac_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
const struct dwc_eth_dwmac_data *data;
- int err;
data = device_get_match_data(&pdev->dev);
- err = stmmac_dvr_remove(&pdev->dev);
- if (err < 0)
- dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+ stmmac_dvr_remove(&pdev->dev);
- err = data->remove(pdev);
- if (err < 0)
- dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
+ data->remove(pdev);
stmmac_remove_config_dt(pdev, priv->plat);
- return err;
+ return 0;
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index bd52fb7cf486..ac8580f501e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -31,6 +31,12 @@
#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20)
#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
+#define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0)
+#define MX93_GPR_ENET_QOS_INTF_SEL_MII (0x0 << 1)
+#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1)
+#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1)
+#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
+
struct imx_dwmac_ops {
u32 addr_width;
bool mac_rgmii_txclk_auto_adj;
@@ -90,6 +96,35 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
return ret;
}
+static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct imx_priv_data *dwmac = plat_dat->bsp_priv;
+ int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = MX93_GPR_ENET_QOS_INTF_SEL_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MX93_GPR_ENET_QOS_INTF_SEL_RGMII;
+ break;
+ default:
+ dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n",
+ plat_dat->interface);
+ return -EINVAL;
+ }
+
+ val |= MX93_GPR_ENET_QOS_CLK_GEN_EN;
+ return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
+ MX93_GPR_ENET_QOS_INTF_MODE_MASK, val);
+};
+
static int imx_dwmac_clks_config(void *priv, bool enabled)
{
struct imx_priv_data *dwmac = priv;
@@ -188,7 +223,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
dwmac->clk_mem = NULL;
- if (of_machine_is_compatible("fsl,imx8dxl")) {
+
+ if (of_machine_is_compatible("fsl,imx8dxl") ||
+ of_machine_is_compatible("fsl,imx93")) {
dwmac->clk_mem = devm_clk_get(dev, "mem");
if (IS_ERR(dwmac->clk_mem)) {
dev_err(dev, "failed to get mem clock\n");
@@ -196,10 +233,11 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
}
- if (of_machine_is_compatible("fsl,imx8mp")) {
- /* Binding doc describes the property:
- is required by i.MX8MP.
- is optional for i.MX8DXL.
+ if (of_machine_is_compatible("fsl,imx8mp") ||
+ of_machine_is_compatible("fsl,imx93")) {
+ /* Binding doc describes the propety:
+ * is required by i.MX8MP, i.MX93.
+ * is optinoal for i.MX8DXL.
*/
dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
if (IS_ERR(dwmac->intf_regmap))
@@ -296,9 +334,16 @@ static struct imx_dwmac_ops imx8dxl_dwmac_data = {
.set_intf_mode = imx8dxl_set_intf_mode,
};
+static struct imx_dwmac_ops imx93_dwmac_data = {
+ .addr_width = 32,
+ .mac_rgmii_txclk_auto_adj = true,
+ .set_intf_mode = imx93_set_intf_mode,
+};
+
static const struct of_device_id imx_dwmac_match[] = {
{ .compatible = "nxp,imx8mp-dwmac-eqos", .data = &imx8mp_dwmac_data },
{ .compatible = "nxp,imx8dxl-dwmac-eqos", .data = &imx8dxl_dwmac_data },
+ { .compatible = "nxp,imx93-dwmac-eqos", .data = &imx93_dwmac_data },
{ }
};
MODULE_DEVICE_TABLE(of, imx_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6656d76b6766..4b8fd11563e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1915,11 +1915,12 @@ err_remove_config_dt:
static int rk_gmac_remove(struct platform_device *pdev)
{
struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
- int ret = stmmac_dvr_remove(&pdev->dev);
+
+ stmmac_dvr_remove(&pdev->dev);
rk_gmac_powerdown(bsp_priv);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 710d7435733e..be3b1ebc06ab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -371,11 +371,12 @@ err_remove_config_dt:
static int sti_dwmac_remove(struct platform_device *pdev)
{
struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
- int ret = stmmac_dvr_remove(&pdev->dev);
+
+ stmmac_dvr_remove(&pdev->dev);
clk_disable_unprepare(dwmac->clk);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 2b38a499a404..0616b3a04ff3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -421,9 +421,10 @@ static int stm32_dwmac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- int ret = stmmac_dvr_remove(&pdev->dev);
struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
+ stmmac_dvr_remove(&pdev->dev);
+
stm32_dwmac_clk_disable(priv->plat->bsp_priv);
if (dwmac->irq_pwr_wakeup >= 0) {
@@ -431,7 +432,7 @@ static int stm32_dwmac_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, false);
}
- return ret;
+ return 0;
}
static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 592b4067f9b8..16a7421715cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -567,6 +567,7 @@ struct tc_cbs_qopt_offload;
struct flow_cls_offload;
struct tc_taprio_qopt_offload;
struct tc_etf_qopt_offload;
+struct tc_query_caps_base;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
@@ -580,6 +581,8 @@ struct stmmac_tc_ops {
struct tc_taprio_qopt_offload *qopt);
int (*setup_etf)(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt);
+ int (*query_caps)(struct stmmac_priv *priv,
+ struct tc_query_caps_base *base);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -594,6 +597,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_taprio, __args)
#define stmmac_tc_setup_etf(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_etf, __args)
+#define stmmac_tc_query_caps(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, query_caps, __args)
struct stmmac_counters;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index bdbf86cb102a..3d15e1e92e18 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -345,7 +345,7 @@ int stmmac_xdp_open(struct net_device *dev);
void stmmac_xdp_release(struct net_device *dev);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
-int stmmac_dvr_remove(struct device *dev);
+void stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1a5b8dab5e9b..e4902a7bb61e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -5992,6 +5992,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
struct stmmac_priv *priv = netdev_priv(ndev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return stmmac_tc_query_caps(priv, priv, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&stmmac_block_cb_list,
@@ -7151,6 +7153,9 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_NDO_XMIT;
ret = stmmac_tc_init(priv, priv);
if (!ret) {
@@ -7347,7 +7352,7 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
* changes the link status, releases the DMA descriptor rings.
*/
-int stmmac_dvr_remove(struct device *dev)
+void stmmac_dvr_remove(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -7383,8 +7388,6 @@ int stmmac_dvr_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
-
- return 0;
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 5f177ea80725..21aaa2730ac8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -45,8 +45,8 @@
#define MII_XGMAC_PA_SHIFT 16
#define MII_XGMAC_DA_SHIFT 21
-static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
- int phyreg, u32 *hw_addr)
+static void stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
+ int devad, int phyreg, u32 *hw_addr)
{
u32 tmp;
@@ -56,19 +56,14 @@ static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
*hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff);
- *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT;
- return 0;
+ *hw_addr |= devad << MII_XGMAC_DA_SHIFT;
}
-static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
- int phyreg, u32 *hw_addr)
+static void stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
{
u32 tmp;
- /* HW does not support C22 addr >= 4 */
- if (phyaddr > MII_XGMAC_MAX_C22ADDR)
- return -ENODEV;
-
/* Set port as Clause 22 */
tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
tmp &= ~MII_XGMAC_C22P_MASK;
@@ -76,16 +71,14 @@ static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
*hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f);
- return 0;
}
-static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
+ u32 value)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp, addr, value = MII_XGMAC_BUSY;
+ u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -99,20 +92,6 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
goto err_disable_clks;
}
- if (phyreg & MII_ADDR_C45) {
- phyreg &= ~MII_ADDR_C45;
-
- ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
- if (ret)
- goto err_disable_clks;
- } else {
- ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
- if (ret)
- goto err_disable_clks;
-
- value |= MII_XGMAC_SADDR;
- }
-
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
value |= MII_XGMAC_READ;
@@ -144,14 +123,44 @@ err_disable_clks:
return ret;
}
-static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
- int phyreg, u16 phydata)
+static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
+ int phyreg)
{
struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_priv *priv;
+ u32 addr;
+
+ priv = netdev_priv(ndev);
+
+ /* HW does not support C22 addr >= 4 */
+ if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ return -ENODEV;
+
+ stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+
+ return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY);
+}
+
+static int stmmac_xgmac2_mdio_read_c45(struct mii_bus *bus, int phyaddr,
+ int devad, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv;
+ u32 addr;
+
+ priv = netdev_priv(ndev);
+
+ stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
+
+ return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY);
+}
+
+static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
+ u32 value, u16 phydata)
+{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 addr, tmp, value = MII_XGMAC_BUSY;
+ u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -165,20 +174,6 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
goto err_disable_clks;
}
- if (phyreg & MII_ADDR_C45) {
- phyreg &= ~MII_ADDR_C45;
-
- ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
- if (ret)
- goto err_disable_clks;
- } else {
- ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
- if (ret)
- goto err_disable_clks;
-
- value |= MII_XGMAC_SADDR;
- }
-
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
value |= phydata;
@@ -205,8 +200,63 @@ err_disable_clks:
return ret;
}
+static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv;
+ u32 addr;
+
+ priv = netdev_priv(ndev);
+
+ /* HW does not support C22 addr >= 4 */
+ if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ return -ENODEV;
+
+ stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+
+ return stmmac_xgmac2_mdio_write(priv, addr,
+ MII_XGMAC_BUSY | MII_XGMAC_SADDR, phydata);
+}
+
+static int stmmac_xgmac2_mdio_write_c45(struct mii_bus *bus, int phyaddr,
+ int devad, int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv;
+ u32 addr;
+
+ priv = netdev_priv(ndev);
+
+ stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
+
+ return stmmac_xgmac2_mdio_write(priv, addr, MII_XGMAC_BUSY,
+ phydata);
+}
+
+static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
+{
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 v;
+
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
+ return -EBUSY;
+
+ writel(data, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ return readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+}
+
/**
- * stmmac_mdio_read
+ * stmmac_mdio_read_c22
* @bus: points to the mii_bus structure
* @phyaddr: MII addr
* @phyreg: MII reg
@@ -215,15 +265,12 @@ err_disable_clks:
* accessing the PHY registers.
* Fortunately, it seems this has no drawback for the 7109 MAC.
*/
-static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
u32 value = MII_BUSY;
int data = 0;
- u32 v;
data = pm_runtime_resume_and_get(priv->device);
if (data < 0)
@@ -236,60 +283,94 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
& priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4) {
value |= MII_GMAC4_READ;
- if (phyreg & MII_ADDR_C45) {
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) <<
- priv->hw->mii.reg_shift) &
- priv->hw->mii.reg_mask;
-
- data |= (phyreg & MII_REGADDR_C45_MASK) <<
- MII_GMAC4_REG_ADDR_SHIFT;
- }
}
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000)) {
- data = -EBUSY;
- goto err_disable_clks;
- }
+ data = stmmac_mdio_read(priv, data, value);
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
+ pm_runtime_put(priv->device);
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000)) {
- data = -EBUSY;
- goto err_disable_clks;
+ return data;
+}
+
+/**
+ * stmmac_mdio_read_c45
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr
+ * @devad: device address to read
+ * @phyreg: MII reg
+ * Description: it reads data from the MII register from within the phy device.
+ * For the 7111 GMAC, we must set the bit 0 in the MII address register while
+ * accessing the PHY registers.
+ * Fortunately, it seems this has no drawback for the 7109 MAC.
+ */
+static int stmmac_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad,
+ int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 value = MII_BUSY;
+ int data = 0;
+
+ data = pm_runtime_get_sync(priv->device);
+ if (data < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return data;
}
- /* Read the data from the MII data register */
- data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+ value |= (phyaddr << priv->hw->mii.addr_shift)
+ & priv->hw->mii.addr_mask;
+ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= MII_GMAC4_READ;
+ value |= MII_GMAC4_C45E;
+ value &= ~priv->hw->mii.reg_mask;
+ value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+
+ data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+
+ data = stmmac_mdio_read(priv, data, value);
-err_disable_clks:
pm_runtime_put(priv->device);
return data;
}
+static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
+{
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 v;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
+ return -EBUSY;
+
+ /* Set the MII address register to write */
+ writel(data, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ return readl_poll_timeout(priv->ioaddr + mii_address, v,
+ !(v & MII_BUSY), 100, 10000);
+}
+
/**
- * stmmac_mdio_write
+ * stmmac_mdio_write_c22
* @bus: points to the mii_bus structure
* @phyaddr: MII addr
* @phyreg: MII reg
* @phydata: phy data
* Description: it writes the data into the MII register from within the device.
*/
-static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
- u16 phydata)
+static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
int ret, data = phydata;
u32 value = MII_BUSY;
- u32 v;
ret = pm_runtime_resume_and_get(priv->device);
if (ret < 0)
@@ -301,38 +382,57 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4) {
+ if (priv->plat->has_gmac4)
value |= MII_GMAC4_WRITE;
- if (phyreg & MII_ADDR_C45) {
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) <<
- priv->hw->mii.reg_shift) &
- priv->hw->mii.reg_mask;
-
- data |= (phyreg & MII_REGADDR_C45_MASK) <<
- MII_GMAC4_REG_ADDR_SHIFT;
- }
- } else {
+ else
value |= MII_WRITE;
- }
- /* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000)) {
- ret = -EBUSY;
- goto err_disable_clks;
+ ret = stmmac_mdio_write(priv, data, value);
+
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+/**
+ * stmmac_mdio_write_c45
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr
+ * @phyreg: MII reg
+ * @devad: device address to read
+ * @phydata: phy data
+ * Description: it writes the data into the MII register from within the device.
+ */
+static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
+ int devad, int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret, data = phydata;
+ u32 value = MII_BUSY;
+
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
}
- /* Set the MII address register to write */
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
+ value |= (phyaddr << priv->hw->mii.addr_shift)
+ & priv->hw->mii.addr_mask;
+ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- /* Wait until any existing MII operation is complete */
- ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000);
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+
+ value |= MII_GMAC4_WRITE;
+ value |= MII_GMAC4_C45E;
+ value &= ~priv->hw->mii.reg_mask;
+ value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+
+ data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+
+ ret = stmmac_mdio_write(priv, data, value);
-err_disable_clks:
pm_runtime_put(priv->device);
return ret;
@@ -453,12 +553,11 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->name = "stmmac";
- if (priv->plat->has_gmac4)
- new_bus->probe_capabilities = MDIOBUS_C22_C45;
-
if (priv->plat->has_xgmac) {
- new_bus->read = &stmmac_xgmac2_mdio_read;
- new_bus->write = &stmmac_xgmac2_mdio_write;
+ new_bus->read = &stmmac_xgmac2_mdio_read_c22;
+ new_bus->write = &stmmac_xgmac2_mdio_write_c22;
+ new_bus->read_c45 = &stmmac_xgmac2_mdio_read_c45;
+ new_bus->write_c45 = &stmmac_xgmac2_mdio_write_c45;
/* Right now only C22 phys are supported */
max_addr = MII_XGMAC_MAX_C22ADDR + 1;
@@ -468,8 +567,13 @@ int stmmac_mdio_register(struct net_device *ndev)
dev_err(dev, "Unsupported phy_addr (max=%d)\n",
MII_XGMAC_MAX_C22ADDR);
} else {
- new_bus->read = &stmmac_mdio_read;
- new_bus->write = &stmmac_mdio_write;
+ new_bus->read = &stmmac_mdio_read_c22;
+ new_bus->write = &stmmac_mdio_write_c22;
+ if (priv->plat->has_gmac4) {
+ new_bus->read_c45 = &stmmac_mdio_read_c45;
+ new_bus->write_c45 = &stmmac_mdio_write_c45;
+ }
+
max_addr = PHY_MAX_ADDR;
}
@@ -490,7 +594,7 @@ int stmmac_mdio_register(struct net_device *ndev)
/* Looks like we need a dummy read for XGMAC only and C45 PHYs */
if (priv->plat->has_xgmac)
- stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
+ stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0);
/* If fixed-link is set, skip PHY scanning */
if (!fwnode)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 0046a4ee6e64..067a40fe0a23 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -711,14 +711,15 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct plat_stmmacenet_data *plat = priv->plat;
- int ret = stmmac_dvr_remove(&pdev->dev);
+
+ stmmac_dvr_remove(&pdev->dev);
if (plat->exit)
plat->exit(pdev, plat->bsp_priv);
stmmac_remove_config_dt(pdev, plat);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 2cfb18cef1d4..9d55226479b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1107,6 +1107,25 @@ static int tc_setup_etf(struct stmmac_priv *priv,
return 0;
}
+static int tc_query_caps(struct stmmac_priv *priv,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!priv->dma_cap.estsel)
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
@@ -1114,4 +1133,5 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.setup_cls = tc_setup_cls,
.setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
};
diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.c b/drivers/net/ethernet/sunplus/spl2sw_mdio.c
index 733ae1704269..c8ef17e34f3c 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_mdio.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.c
@@ -61,9 +61,6 @@ static int spl2sw_mii_read(struct mii_bus *bus, int addr, int regnum)
{
struct spl2sw_common *comm = bus->priv;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
return spl2sw_mdio_access(comm, SPL2SW_MDIO_READ_CMD, addr, regnum, 0);
}
@@ -72,9 +69,6 @@ static int spl2sw_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
struct spl2sw_common *comm = bus->priv;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = spl2sw_mdio_access(comm, SPL2SW_MDIO_WRITE_CMD, addr, regnum, val);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 6cda4b7c10cb..4e3861c47708 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1426,6 +1426,70 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
};
+static void am65_cpsw_disable_phy(struct phy *phy)
+{
+ phy_power_off(phy);
+ phy_exit(phy);
+}
+
+static int am65_cpsw_enable_phy(struct phy *phy)
+{
+ int ret;
+
+ ret = phy_init(phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(phy);
+ if (ret < 0) {
+ phy_exit(phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ struct phy *phy;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ phy = port->slave.serdes_phy;
+ if (phy)
+ am65_cpsw_disable_phy(phy);
+ }
+}
+
+static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np,
+ struct am65_cpsw_port *port)
+{
+ const char *name = "serdes-phy";
+ struct phy *phy;
+ int ret;
+
+ phy = devm_of_phy_get(dev, port_np, name);
+ if (PTR_ERR(phy) == -ENODEV)
+ return 0;
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ /* Serdes PHY exists. Store it. */
+ port->slave.serdes_phy = phy;
+
+ ret = am65_cpsw_enable_phy(phy);
+ if (ret < 0)
+ goto err_phy;
+
+ return 0;
+
+err_phy:
+ devm_phy_put(dev, phy);
+ return ret;
+}
+
static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -1883,11 +1947,6 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
int ret = PTR_ERR(cpts);
of_node_put(node);
- if (ret == -EOPNOTSUPP) {
- dev_info(dev, "cpts disabled\n");
- return 0;
- }
-
dev_err(dev, "cpts create err %d\n", ret);
return ret;
}
@@ -1969,6 +2028,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
goto of_node_put;
}
+ /* Initialize the Serdes PHY for the port */
+ ret = am65_cpsw_init_serdes_phy(dev, port_np, port);
+ if (ret)
+ return ret;
+
port->slave.mac_only =
of_property_read_bool(port_np, "ti,mac-only");
@@ -2694,11 +2758,19 @@ static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
};
+static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
{ .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
{ .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
+ { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
@@ -2852,6 +2924,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
err_free_phylink:
am65_cpsw_nuss_phylink_cleanup(common);
+ am65_cpts_release(common->cpts);
err_of_clear:
of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
@@ -2880,6 +2953,8 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_nuss_phylink_cleanup(common);
+ am65_cpts_release(common->cpts);
+ am65_cpsw_disable_serdes_phy(common);
of_platform_device_destroy(common->mdio_dev, NULL);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index e5f1c44788c1..cad04662739c 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -32,6 +32,7 @@ struct am65_cpsw_slave_data {
struct device_node *phy_node;
phy_interface_t phy_if;
struct phy *ifphy;
+ struct phy *serdes_phy;
bool rx_pause;
bool tx_pause;
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index e162771893af..8dc2c3085dcf 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -585,6 +585,26 @@ static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
return am65_cpsw_set_taprio(ndev, type_data);
}
+static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct tc_query_caps_base *base = type_data;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
struct netlink_ext_ack *extack,
struct flow_cls_offload *cls,
@@ -765,6 +785,8 @@ int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
+ case TC_QUERY_CAPS:
+ return am65_cpsw_tc_query_caps(ndev, type_data);
case TC_SETUP_QDISC_TAPRIO:
return am65_cpsw_setup_taprio(ndev, type_data);
case TC_SETUP_BLOCK:
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 9535396b28cd..16ee9c29cb35 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -176,6 +176,10 @@ struct am65_cpts {
u32 genf_enable;
u32 hw_ts_enable;
struct sk_buff_head txq;
+ bool pps_enabled;
+ bool pps_present;
+ u32 pps_hw_ts_idx;
+ u32 pps_genf_idx;
/* context save/restore */
u64 sr_cpts_ns;
u64 sr_ktime_ns;
@@ -319,8 +323,15 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts)
case AM65_CPTS_EV_HW:
pevent.index = am65_cpts_event_get_port(event) - 1;
pevent.timestamp = event->timestamp;
- pevent.type = PTP_CLOCK_EXTTS;
- dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
+ if (cpts->pps_enabled && pevent.index == cpts->pps_hw_ts_idx) {
+ pevent.type = PTP_CLOCK_PPSUSR;
+ pevent.pps_times.ts_real = ns_to_timespec64(pevent.timestamp);
+ } else {
+ pevent.type = PTP_CLOCK_EXTTS;
+ }
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_HW:%s p:%d t:%llu\n",
+ pevent.type == PTP_CLOCK_EXTTS ?
+ "extts" : "pps",
pevent.index, event->timestamp);
ptp_clock_event(cpts->ptp_clock, &pevent);
@@ -394,10 +405,13 @@ static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u32 pps_ctrl_val = 0, pps_ppm_hi = 0, pps_ppm_low = 0;
s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+ int pps_index = cpts->pps_genf_idx;
+ u64 adj_period, pps_adj_period;
+ u32 ctrl_val, ppm_hi, ppm_low;
+ unsigned long flags;
int neg_adj = 0;
- u64 adj_period;
- u32 val;
if (ppb < 0) {
neg_adj = 1;
@@ -417,17 +431,53 @@ static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
mutex_lock(&cpts->ptp_clk_lock);
- val = am65_cpts_read32(cpts, control);
+ ctrl_val = am65_cpts_read32(cpts, control);
if (neg_adj)
- val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
+ ctrl_val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
else
- val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
- am65_cpts_write32(cpts, val, control);
+ ctrl_val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
+
+ ppm_hi = upper_32_bits(adj_period) & 0x3FF;
+ ppm_low = lower_32_bits(adj_period);
+
+ if (cpts->pps_enabled) {
+ pps_ctrl_val = am65_cpts_read32(cpts, genf[pps_index].control);
+ if (neg_adj)
+ pps_ctrl_val &= ~BIT(1);
+ else
+ pps_ctrl_val |= BIT(1);
+
+ /* GenF PPM will do correction using cpts refclk tick which is
+ * (cpts->ts_add_val + 1) ns, so GenF length PPM adj period
+ * need to be corrected.
+ */
+ pps_adj_period = adj_period * (cpts->ts_add_val + 1);
+ pps_ppm_hi = upper_32_bits(pps_adj_period) & 0x3FF;
+ pps_ppm_low = lower_32_bits(pps_adj_period);
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+
+ /* All below writes must be done extremely fast:
+ * - delay between PPM dir and PPM value changes can cause err due old
+ * PPM correction applied in wrong direction
+ * - delay between CPTS-clock PPM cfg and GenF PPM cfg can cause err
+ * due CPTS-clock PPM working with new cfg while GenF PPM cfg still
+ * with old for short period of time
+ */
+
+ am65_cpts_write32(cpts, ctrl_val, control);
+ am65_cpts_write32(cpts, ppm_hi, ts_ppm_hi);
+ am65_cpts_write32(cpts, ppm_low, ts_ppm_low);
+
+ if (cpts->pps_enabled) {
+ am65_cpts_write32(cpts, pps_ctrl_val, genf[pps_index].control);
+ am65_cpts_write32(cpts, pps_ppm_hi, genf[pps_index].ppm_hi);
+ am65_cpts_write32(cpts, pps_ppm_low, genf[pps_index].ppm_low);
+ }
- val = upper_32_bits(adj_period) & 0x3FF;
- am65_cpts_write32(cpts, val, ts_ppm_hi);
- val = lower_32_bits(adj_period);
- am65_cpts_write32(cpts, val, ts_ppm_low);
+ /* All GenF/EstF can be updated here the same way */
+ spin_unlock_irqrestore(&cpts->lock, flags);
mutex_unlock(&cpts->ptp_clk_lock);
@@ -507,7 +557,13 @@ static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
{
- if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
+ if (index >= cpts->ptp_info.n_ext_ts)
+ return -ENXIO;
+
+ if (cpts->pps_present && index == cpts->pps_hw_ts_idx)
+ return -EINVAL;
+
+ if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
return 0;
mutex_lock(&cpts->ptp_clk_lock);
@@ -591,6 +647,12 @@ static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
static int am65_cpts_perout_enable(struct am65_cpts *cpts,
struct ptp_perout_request *req, int on)
{
+ if (req->index >= cpts->ptp_info.n_per_out)
+ return -ENXIO;
+
+ if (cpts->pps_present && req->index == cpts->pps_genf_idx)
+ return -EINVAL;
+
if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
return 0;
@@ -604,6 +666,48 @@ static int am65_cpts_perout_enable(struct am65_cpts *cpts,
return 0;
}
+static int am65_cpts_pps_enable(struct am65_cpts *cpts, int on)
+{
+ int ret = 0;
+ struct timespec64 ts;
+ struct ptp_clock_request rq;
+ u64 ns;
+
+ if (!cpts->pps_present)
+ return -EINVAL;
+
+ if (cpts->pps_enabled == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+
+ if (on) {
+ am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
+
+ ns = am65_cpts_gettime(cpts, NULL);
+ ts = ns_to_timespec64(ns);
+ rq.perout.period.sec = 1;
+ rq.perout.period.nsec = 0;
+ rq.perout.start.sec = ts.tv_sec + 2;
+ rq.perout.start.nsec = 0;
+ rq.perout.index = cpts->pps_genf_idx;
+
+ am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
+ cpts->pps_enabled = true;
+ } else {
+ rq.perout.index = cpts->pps_genf_idx;
+ am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
+ am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
+ cpts->pps_enabled = false;
+ }
+
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: pps: %s\n",
+ __func__, on ? "enabled" : "disabled");
+ return ret;
+}
+
static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -614,6 +718,8 @@ static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
return am65_cpts_extts_enable(cpts, rq->extts.index, on);
case PTP_CLK_REQ_PEROUT:
return am65_cpts_perout_enable(cpts, &rq->perout, on);
+ case PTP_CLK_REQ_PPS:
+ return am65_cpts_pps_enable(cpts, on);
default:
break;
}
@@ -926,17 +1032,33 @@ static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
cpts->genf_num = prop[0];
+ if (!of_property_read_u32_array(node, "ti,pps", prop, 2)) {
+ cpts->pps_present = true;
+
+ if (prop[0] > 7) {
+ dev_err(cpts->dev, "invalid HWx_TS_PUSH index: %u provided\n", prop[0]);
+ cpts->pps_present = false;
+ }
+ if (prop[1] > 1) {
+ dev_err(cpts->dev, "invalid GENFy index: %u provided\n", prop[1]);
+ cpts->pps_present = false;
+ }
+ if (cpts->pps_present) {
+ cpts->pps_hw_ts_idx = prop[0];
+ cpts->pps_genf_idx = prop[1];
+ }
+ }
+
return cpts_of_mux_clk_setup(cpts, node);
}
-static void am65_cpts_release(void *data)
+void am65_cpts_release(struct am65_cpts *cpts)
{
- struct am65_cpts *cpts = data;
-
ptp_clock_unregister(cpts->ptp_clock);
am65_cpts_disable(cpts);
clk_disable_unprepare(cpts->refclk);
}
+EXPORT_SYMBOL_GPL(am65_cpts_release);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node)
@@ -993,6 +1115,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
if (cpts->genf_num)
cpts->ptp_info.n_per_out = cpts->genf_num;
+ if (cpts->pps_present)
+ cpts->ptp_info.pps = 1;
am65_cpts_set_add_val(cpts);
@@ -1014,26 +1138,22 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
}
cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
- ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
- if (ret) {
- dev_err(dev, "failed to add ptpclk reset action %d", ret);
- return ERR_PTR(ret);
- }
-
ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
am65_cpts_interrupt,
IRQF_ONESHOT, dev_name(dev), cpts);
if (ret < 0) {
dev_err(cpts->dev, "error attaching irq %d\n", ret);
- return ERR_PTR(ret);
+ goto reset_ptpclk;
}
- dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+ dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u pps:%d\n",
am65_cpts_read32(cpts, idver),
- cpts->refclk_freq, cpts->ts_add_val);
+ cpts->refclk_freq, cpts->ts_add_val, cpts->pps_present);
return cpts;
+reset_ptpclk:
+ am65_cpts_release(cpts);
refclk_disable:
clk_disable_unprepare(cpts->refclk);
return ERR_PTR(ret);
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
index bd08f4b2edd2..6e14df0be113 100644
--- a/drivers/net/ethernet/ti/am65-cpts.h
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -18,6 +18,7 @@ struct am65_cpts_estf_cfg {
};
#if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
+void am65_cpts_release(struct am65_cpts *cpts);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
int am65_cpts_phc_index(struct am65_cpts *cpts);
@@ -31,6 +32,10 @@ void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
void am65_cpts_suspend(struct am65_cpts *cpts);
void am65_cpts_resume(struct am65_cpts *cpts);
#else
+static inline void am65_cpts_release(struct am65_cpts *cpts)
+{
+}
+
static inline struct am65_cpts *am65_cpts_create(struct device *dev,
void __iomem *regs,
struct device_node *node)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 13c9c2d6b79b..37f0b62ec5d6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1458,6 +1458,8 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -1635,6 +1637,8 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw->slaves[0].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 83596ec0c7cb..35128dd45ffc 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1405,6 +1405,10 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 758295c898ac..e966dd47e2db 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -20,6 +20,7 @@
#include <linux/skbuff.h>
#include <net/page_pool.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include "cpsw.h"
#include "cpts.h"
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 946b9753ccfb..23169e36a3d4 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -225,7 +225,7 @@ static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl)
return test_bit(MDIO_PIN, &reg);
}
-static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+static int davinci_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg)
{
int ret;
@@ -233,7 +233,7 @@ static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
if (ret < 0)
return ret;
- ret = mdiobb_read(bus, phy, reg);
+ ret = mdiobb_read_c22(bus, phy, reg);
pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
@@ -241,8 +241,8 @@ static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
return ret;
}
-static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
- u16 val)
+static int davinci_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg,
+ u16 val)
{
int ret;
@@ -250,7 +250,41 @@ static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
if (ret < 0)
return ret;
- ret = mdiobb_write(bus, phy, reg, val);
+ ret = mdiobb_write_c22(bus, phy, reg, val);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad,
+ int reg)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_read_c45(bus, phy, devad, reg);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad,
+ int reg, u16 val)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_write_c45(bus, phy, devad, reg, val);
pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
@@ -573,8 +607,10 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->bus->name = dev_name(dev);
if (data->manual_mode) {
- data->bus->read = davinci_mdiobb_read;
- data->bus->write = davinci_mdiobb_write;
+ data->bus->read = davinci_mdiobb_read_c22;
+ data->bus->write = davinci_mdiobb_write_c22;
+ data->bus->read_c45 = davinci_mdiobb_read_c45;
+ data->bus->write_c45 = davinci_mdiobb_write_c45;
data->bus->reset = davinci_mdiobb_reset;
dev_info(dev, "Configuring MDIO in manual mode\n");
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 86310588c6c1..c9d88673d306 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN
config LIBWX
tristate
+ select PAGE_POOL
help
Common library for Wangxun(R) Ethernet drivers.
@@ -25,6 +26,7 @@ config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
select LIBWX
+ select PHYLIB
help
This driver supports Wangxun(R) GbE PCI Express family of
adapters.
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 1ed5e23af944..42ccd6e4052e 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
new file mode 100644
index 000000000000..93cb6f2294e7
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+#include "wx_type.h"
+#include "wx_ethtool.h"
+
+void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ strscpy(info->driver, wx->driver_name, sizeof(info->driver));
+ strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
+}
+EXPORT_SYMBOL(wx_get_drvinfo);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
new file mode 100644
index 000000000000..e85538c69454
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_ETHTOOL_H_
+#define _WX_ETHTOOL_H_
+
+void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info);
+#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index c57dc3238b3f..7db57f934a91 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -2,59 +2,100 @@
/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "wx_type.h"
+#include "wx_lib.h"
#include "wx_hw.h"
-static void wx_intr_disable(struct wx_hw *wxhw, u64 qmask)
+static void wx_intr_disable(struct wx *wx, u64 qmask)
{
u32 mask;
- mask = (qmask & 0xFFFFFFFF);
+ mask = (qmask & U32_MAX);
if (mask)
- wr32(wxhw, WX_PX_IMS(0), mask);
+ wr32(wx, WX_PX_IMS(0), mask);
- if (wxhw->mac.type == wx_mac_sp) {
+ if (wx->mac.type == wx_mac_sp) {
mask = (qmask >> 32);
if (mask)
- wr32(wxhw, WX_PX_IMS(1), mask);
+ wr32(wx, WX_PX_IMS(1), mask);
}
}
+void wx_intr_enable(struct wx *wx, u64 qmask)
+{
+ u32 mask;
+
+ mask = (qmask & U32_MAX);
+ if (mask)
+ wr32(wx, WX_PX_IMC(0), mask);
+ if (wx->mac.type == wx_mac_sp) {
+ mask = (qmask >> 32);
+ if (mask)
+ wr32(wx, WX_PX_IMC(1), mask);
+ }
+}
+EXPORT_SYMBOL(wx_intr_enable);
+
+/**
+ * wx_irq_disable - Mask off interrupt generation on the NIC
+ * @wx: board private structure
+ **/
+void wx_irq_disable(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ wr32(wx, WX_PX_MISC_IEN, 0);
+ wx_intr_disable(wx, WX_INTR_ALL);
+
+ if (pdev->msix_enabled) {
+ int vector;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++)
+ synchronize_irq(wx->msix_entries[vector].vector);
+
+ synchronize_irq(wx->msix_entries[vector].vector);
+ } else {
+ synchronize_irq(pdev->irq);
+ }
+}
+EXPORT_SYMBOL(wx_irq_disable);
+
/* cmd_addr is used for some special command:
* 1. to be sector address, when implemented erase sector command
* 2. to be flash address when implemented read, write flash address
*/
-static int wx_fmgr_cmd_op(struct wx_hw *wxhw, u32 cmd, u32 cmd_addr)
+static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr)
{
u32 cmd_val = 0, val = 0;
cmd_val = WX_SPI_CMD_CMD(cmd) |
WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) |
cmd_addr;
- wr32(wxhw, WX_SPI_CMD, cmd_val);
+ wr32(wx, WX_SPI_CMD, cmd_val);
return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000,
- false, wxhw, WX_SPI_STATUS);
+ false, wx, WX_SPI_STATUS);
}
-static int wx_flash_read_dword(struct wx_hw *wxhw, u32 addr, u32 *data)
+static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data)
{
int ret = 0;
- ret = wx_fmgr_cmd_op(wxhw, WX_SPI_CMD_READ_DWORD, addr);
+ ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr);
if (ret < 0)
return ret;
- *data = rd32(wxhw, WX_SPI_DATA);
+ *data = rd32(wx, WX_SPI_DATA);
return ret;
}
-int wx_check_flash_load(struct wx_hw *hw, u32 check_bit)
+int wx_check_flash_load(struct wx *hw, u32 check_bit)
{
u32 reg = 0;
int err = 0;
@@ -73,29 +114,25 @@ int wx_check_flash_load(struct wx_hw *hw, u32 check_bit)
}
EXPORT_SYMBOL(wx_check_flash_load);
-void wx_control_hw(struct wx_hw *wxhw, bool drv)
+void wx_control_hw(struct wx *wx, bool drv)
{
- if (drv) {
- /* Let firmware know the driver has taken over */
- wr32m(wxhw, WX_CFG_PORT_CTL,
- WX_CFG_PORT_CTL_DRV_LOAD, WX_CFG_PORT_CTL_DRV_LOAD);
- } else {
- /* Let firmware take over control of hw */
- wr32m(wxhw, WX_CFG_PORT_CTL,
- WX_CFG_PORT_CTL_DRV_LOAD, 0);
- }
+ /* True : Let firmware know the driver has taken over
+ * False : Let firmware take over control of hw
+ */
+ wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD,
+ drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0);
}
EXPORT_SYMBOL(wx_control_hw);
/**
* wx_mng_present - returns 0 when management capability is present
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*/
-int wx_mng_present(struct wx_hw *wxhw)
+int wx_mng_present(struct wx *wx)
{
u32 fwsm;
- fwsm = rd32(wxhw, WX_MIS_ST);
+ fwsm = rd32(wx, WX_MIS_ST);
if (fwsm & WX_MIS_ST_MNG_INIT_DN)
return 0;
else
@@ -108,40 +145,40 @@ static DEFINE_MUTEX(wx_sw_sync_lock);
/**
* wx_release_sw_sync - Release SW semaphore
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
* Releases the SW semaphore for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-static void wx_release_sw_sync(struct wx_hw *wxhw, u32 mask)
+static void wx_release_sw_sync(struct wx *wx, u32 mask)
{
mutex_lock(&wx_sw_sync_lock);
- wr32m(wxhw, WX_MNG_SWFW_SYNC, mask, 0);
+ wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0);
mutex_unlock(&wx_sw_sync_lock);
}
/**
* wx_acquire_sw_sync - Acquire SW semaphore
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
*
* Acquires the SW semaphore for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask)
+static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
{
u32 sem = 0;
int ret = 0;
mutex_lock(&wx_sw_sync_lock);
ret = read_poll_timeout(rd32, sem, !(sem & mask),
- 5000, 2000000, false, wxhw, WX_MNG_SWFW_SYNC);
+ 5000, 2000000, false, wx, WX_MNG_SWFW_SYNC);
if (!ret) {
sem |= mask;
- wr32(wxhw, WX_MNG_SWFW_SYNC, sem);
+ wr32(wx, WX_MNG_SWFW_SYNC, sem);
} else {
- wx_err(wxhw, "SW Semaphore not granted: 0x%x.\n", sem);
+ wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem);
}
mutex_unlock(&wx_sw_sync_lock);
@@ -150,7 +187,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask)
/**
* wx_host_interface_command - Issue command to manageability block
- * @wxhw: pointer to the HW structure
+ * @wx: pointer to the HW structure
* @buffer: contains the command to write and where the return status will
* be placed
* @length: length of buffer, must be multiple of 4 bytes
@@ -162,7 +199,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask)
* So we will leave this up to the caller to read back the data
* in these cases.
**/
-int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
+int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 length, u32 timeout, bool return_data)
{
u32 hdr_size = sizeof(struct wx_hic_hdr);
@@ -172,17 +209,17 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
u16 buf_len;
if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
- wx_err(wxhw, "Buffer length failure buffersize=%d.\n", length);
+ wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
return -EINVAL;
}
- status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB);
+ status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
if (status != 0)
return status;
/* Calculate length in DWORDs. We must be DWORD aligned */
if ((length % (sizeof(u32))) != 0) {
- wx_err(wxhw, "Buffer length failure, not aligned to dword");
+ wx_err(wx, "Buffer length failure, not aligned to dword");
status = -EINVAL;
goto rel_out;
}
@@ -193,38 +230,38 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
* into the ram area.
*/
for (i = 0; i < dword_len; i++) {
- wr32a(wxhw, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
+ wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
/* write flush */
- buf[i] = rd32a(wxhw, WX_MNG_MBOX, i);
+ buf[i] = rd32a(wx, WX_MNG_MBOX, i);
}
/* Setting this bit tells the ARC that a new command is pending. */
- wr32m(wxhw, WX_MNG_MBOX_CTL,
+ wr32m(wx, WX_MNG_MBOX_CTL,
WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY);
status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
- timeout * 1000, false, wxhw, WX_MNG_MBOX_CTL);
+ timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
/* Check command completion */
if (status) {
- wx_dbg(wxhw, "Command has failed with no status valid.\n");
+ wx_dbg(wx, "Command has failed with no status valid.\n");
- buf[0] = rd32(wxhw, WX_MNG_MBOX);
+ buf[0] = rd32(wx, WX_MNG_MBOX);
if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
status = -EINVAL;
goto rel_out;
}
if ((buf[0] & 0xff0000) >> 16 == 0x80) {
- wx_dbg(wxhw, "It's unknown cmd.\n");
+ wx_dbg(wx, "It's unknown cmd.\n");
status = -EINVAL;
goto rel_out;
}
- wx_dbg(wxhw, "write value:\n");
+ wx_dbg(wx, "write value:\n");
for (i = 0; i < dword_len; i++)
- wx_dbg(wxhw, "%x ", buffer[i]);
- wx_dbg(wxhw, "read value:\n");
+ wx_dbg(wx, "%x ", buffer[i]);
+ wx_dbg(wx, "read value:\n");
for (i = 0; i < dword_len; i++)
- wx_dbg(wxhw, "%x ", buf[i]);
+ wx_dbg(wx, "%x ", buf[i]);
}
if (!return_data)
@@ -235,7 +272,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
- buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi);
+ buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
le32_to_cpus(&buffer[bi]);
}
@@ -245,7 +282,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
goto rel_out;
if (length < buf_len + hdr_size) {
- wx_err(wxhw, "Buffer not large enough for reply message.\n");
+ wx_err(wx, "Buffer not large enough for reply message.\n");
status = -EFAULT;
goto rel_out;
}
@@ -255,12 +292,12 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
/* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
- buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi);
+ buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
le32_to_cpus(&buffer[bi]);
}
rel_out:
- wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB);
+ wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
return status;
}
EXPORT_SYMBOL(wx_host_interface_command);
@@ -268,13 +305,13 @@ EXPORT_SYMBOL(wx_host_interface_command);
/**
* wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
* assuming that the semaphore is already obtained.
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data)
+static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
{
struct wx_hic_read_shadow_ram buffer;
int status;
@@ -289,33 +326,33 @@ static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data)
/* one word */
buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
- status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer),
+ status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
WX_HI_COMMAND_TIMEOUT, false);
if (status != 0)
return status;
- *data = (u16)rd32a(wxhw, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
return status;
}
/**
* wx_read_ee_hostif - Read EEPROM word using a host interface cmd
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data)
+int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data)
{
int status = 0;
- status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
if (status == 0) {
- status = wx_read_ee_hostif_data(wxhw, offset, data);
- wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ status = wx_read_ee_hostif_data(wx, offset, data);
+ wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
}
return status;
@@ -324,14 +361,14 @@ EXPORT_SYMBOL(wx_read_ee_hostif);
/**
* wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @words: number of words
* @data: word(s) read from the EEPROM
*
* Reads a 16 bit word(s) from the EEPROM using the hostif.
**/
-int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
+int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data)
{
struct wx_hic_read_shadow_ram buffer;
@@ -342,7 +379,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
u32 i;
/* Take semaphore for the entire operation. */
- status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
if (status != 0)
return status;
@@ -361,20 +398,20 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2);
buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
- status = wx_host_interface_command(wxhw, (u32 *)&buffer,
+ status = wx_host_interface_command(wx, (u32 *)&buffer,
sizeof(buffer),
WX_HI_COMMAND_TIMEOUT,
false);
if (status != 0) {
- wx_err(wxhw, "Host interface command failed\n");
+ wx_err(wx, "Host interface command failed\n");
goto out;
}
for (i = 0; i < words_to_read; i++) {
u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
- value = rd32(wxhw, reg);
+ value = rd32(wx, reg);
data[current_word] = (u16)(value & 0xffff);
current_word++;
i++;
@@ -388,7 +425,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
}
out:
- wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
return status;
}
EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
@@ -416,12 +453,12 @@ static u8 wx_calculate_checksum(u8 *buffer, u32 length)
/**
* wx_reset_hostif - send reset cmd to fw
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Sends reset cmd to firmware through the manageability
* block.
**/
-int wx_reset_hostif(struct wx_hw *wxhw)
+int wx_reset_hostif(struct wx *wx)
{
struct wx_hic_reset reset_cmd;
int ret_val = 0;
@@ -430,15 +467,15 @@ int wx_reset_hostif(struct wx_hw *wxhw)
reset_cmd.hdr.cmd = FW_RESET_CMD;
reset_cmd.hdr.buf_len = FW_RESET_LEN;
reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
- reset_cmd.lan_id = wxhw->bus.func;
- reset_cmd.reset_type = (u16)wxhw->reset_type;
+ reset_cmd.lan_id = wx->bus.func;
+ reset_cmd.reset_type = (u16)wx->reset_type;
reset_cmd.hdr.checksum = 0;
reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd,
(FW_CEM_HDR_LEN +
reset_cmd.hdr.buf_len));
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = wx_host_interface_command(wxhw, (u32 *)&reset_cmd,
+ ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd,
sizeof(reset_cmd),
WX_HI_COMMAND_TIMEOUT,
true);
@@ -460,14 +497,14 @@ EXPORT_SYMBOL(wx_reset_hostif);
/**
* wx_init_eeprom_params - Initialize EEPROM params
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Initializes the EEPROM parameters wx_eeprom_info within the
* wx_hw struct in order to set up EEPROM access.
**/
-void wx_init_eeprom_params(struct wx_hw *wxhw)
+void wx_init_eeprom_params(struct wx *wx)
{
- struct wx_eeprom_info *eeprom = &wxhw->eeprom;
+ struct wx_eeprom_info *eeprom = &wx->eeprom;
u16 eeprom_size;
u16 data = 0x80;
@@ -475,21 +512,21 @@ void wx_init_eeprom_params(struct wx_hw *wxhw)
eeprom->semaphore_delay = 10;
eeprom->type = wx_eeprom_none;
- if (!(rd32(wxhw, WX_SPI_STATUS) &
+ if (!(rd32(wx, WX_SPI_STATUS) &
WX_SPI_STATUS_FLASH_BYPASS)) {
eeprom->type = wx_flash;
eeprom_size = 4096;
eeprom->word_size = eeprom_size >> 1;
- wx_dbg(wxhw, "Eeprom params: type = %d, size = %d\n",
+ wx_dbg(wx, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
}
}
- if (wxhw->mac.type == wx_mac_sp) {
- if (wx_read_ee_hostif(wxhw, WX_SW_REGION_PTR, &data)) {
- wx_err(wxhw, "NVM Read Error\n");
+ if (wx->mac.type == wx_mac_sp) {
+ if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
+ wx_err(wx, "NVM Read Error\n");
return;
}
data = data >> 1;
@@ -501,22 +538,22 @@ EXPORT_SYMBOL(wx_init_eeprom_params);
/**
* wx_get_mac_addr - Generic get MAC address
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @mac_addr: Adapter MAC address
*
* Reads the adapter's MAC address from first Receive Address Register (RAR0)
* A reset of the adapter must be performed prior to calling this function
* in order for the MAC address to have been loaded from the EEPROM into RAR0
**/
-void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr)
+void wx_get_mac_addr(struct wx *wx, u8 *mac_addr)
{
u32 rar_high;
u32 rar_low;
u16 i;
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, 0);
- rar_high = rd32(wxhw, WX_PSR_MAC_SWC_AD_H);
- rar_low = rd32(wxhw, WX_PSR_MAC_SWC_AD_L);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, 0);
+ rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H);
+ rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L);
for (i = 0; i < 2; i++)
mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
@@ -528,7 +565,7 @@ EXPORT_SYMBOL(wx_get_mac_addr);
/**
* wx_set_rar - Set Rx address register
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @index: Receive address register to write
* @addr: Address to put into receive address register
* @pools: VMDq "set" or "pool" index
@@ -536,25 +573,25 @@ EXPORT_SYMBOL(wx_get_mac_addr);
*
* Puts an ethernet address into a receive address register.
**/
-int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools,
- u32 enable_addr)
+static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
+ u32 enable_addr)
{
- u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 rar_entries = wx->mac.num_rar_entries;
u32 rar_low, rar_high;
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
- wx_err(wxhw, "RAR index %d is out of range.\n", index);
+ wx_err(wx, "RAR index %d is out of range.\n", index);
return -EINVAL;
}
/* select the MAC address */
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, index);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, index);
/* setup VMDq pool mapping */
- wr32(wxhw, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- if (wxhw->mac.type == wx_mac_sp)
- wr32(wxhw, WX_PSR_MAC_SWC_VM_H, pools >> 32);
+ wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
+ if (wx->mac.type == wx_mac_sp)
+ wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
/* HW expects these in little endian so we reverse the byte
* order from network order (big endian) to little endian
@@ -572,31 +609,30 @@ int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools,
if (enable_addr != 0)
rar_high |= WX_PSR_MAC_SWC_AD_H_AV;
- wr32(wxhw, WX_PSR_MAC_SWC_AD_L, rar_low);
- wr32m(wxhw, WX_PSR_MAC_SWC_AD_H,
- (WX_PSR_MAC_SWC_AD_H_AD(~0) |
- WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
+ wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low);
+ wr32m(wx, WX_PSR_MAC_SWC_AD_H,
+ (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
+ WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
WX_PSR_MAC_SWC_AD_H_AV),
rar_high);
return 0;
}
-EXPORT_SYMBOL(wx_set_rar);
/**
* wx_clear_rar - Remove Rx address register
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @index: Receive address register to write
*
* Clears an ethernet address from a receive address register.
**/
-int wx_clear_rar(struct wx_hw *wxhw, u32 index)
+static int wx_clear_rar(struct wx *wx, u32 index)
{
- u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 rar_entries = wx->mac.num_rar_entries;
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
- wx_err(wxhw, "RAR index %d is out of range.\n", index);
+ wx_err(wx, "RAR index %d is out of range.\n", index);
return -EINVAL;
}
@@ -604,78 +640,77 @@ int wx_clear_rar(struct wx_hw *wxhw, u32 index)
* so save everything except the lower 16 bits that hold part
* of the address and the address valid bit.
*/
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, index);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, index);
- wr32(wxhw, WX_PSR_MAC_SWC_VM_L, 0);
- wr32(wxhw, WX_PSR_MAC_SWC_VM_H, 0);
+ wr32(wx, WX_PSR_MAC_SWC_VM_L, 0);
+ wr32(wx, WX_PSR_MAC_SWC_VM_H, 0);
- wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0);
- wr32m(wxhw, WX_PSR_MAC_SWC_AD_H,
- (WX_PSR_MAC_SWC_AD_H_AD(~0) |
- WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
+ wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
+ wr32m(wx, WX_PSR_MAC_SWC_AD_H,
+ (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
+ WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
WX_PSR_MAC_SWC_AD_H_AV),
0);
return 0;
}
-EXPORT_SYMBOL(wx_clear_rar);
/**
* wx_clear_vmdq - Disassociate a VMDq pool index from a rx address
- * @wxhw: pointer to hardware struct
+ * @wx: pointer to hardware struct
* @rar: receive address register index to disassociate
* @vmdq: VMDq pool index to remove from the rar
**/
-static int wx_clear_vmdq(struct wx_hw *wxhw, u32 rar, u32 __maybe_unused vmdq)
+static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq)
{
- u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 rar_entries = wx->mac.num_rar_entries;
u32 mpsar_lo, mpsar_hi;
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
- wx_err(wxhw, "RAR index %d is out of range.\n", rar);
+ wx_err(wx, "RAR index %d is out of range.\n", rar);
return -EINVAL;
}
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, rar);
- mpsar_lo = rd32(wxhw, WX_PSR_MAC_SWC_VM_L);
- mpsar_hi = rd32(wxhw, WX_PSR_MAC_SWC_VM_H);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, rar);
+ mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L);
+ mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H);
if (!mpsar_lo && !mpsar_hi)
return 0;
/* was that the last pool using this rar? */
if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
- wx_clear_rar(wxhw, rar);
+ wx_clear_rar(wx, rar);
return 0;
}
/**
* wx_init_uta_tables - Initialize the Unicast Table Array
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
**/
-static void wx_init_uta_tables(struct wx_hw *wxhw)
+static void wx_init_uta_tables(struct wx *wx)
{
int i;
- wx_dbg(wxhw, " Clearing UTA\n");
+ wx_dbg(wx, " Clearing UTA\n");
for (i = 0; i < 128; i++)
- wr32(wxhw, WX_PSR_UC_TBL(i), 0);
+ wr32(wx, WX_PSR_UC_TBL(i), 0);
}
/**
* wx_init_rx_addrs - Initializes receive address filters.
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Places the MAC address in receive address register 0 and clears the rest
* of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
**/
-void wx_init_rx_addrs(struct wx_hw *wxhw)
+void wx_init_rx_addrs(struct wx *wx)
{
- u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 rar_entries = wx->mac.num_rar_entries;
u32 psrctl;
int i;
@@ -683,97 +718,829 @@ void wx_init_rx_addrs(struct wx_hw *wxhw)
* to the permanent address.
* Otherwise, use the permanent address from the eeprom.
*/
- if (!is_valid_ether_addr(wxhw->mac.addr)) {
+ if (!is_valid_ether_addr(wx->mac.addr)) {
/* Get the MAC address from the RAR0 for later reference */
- wx_get_mac_addr(wxhw, wxhw->mac.addr);
- wx_dbg(wxhw, "Keeping Current RAR0 Addr = %pM\n", wxhw->mac.addr);
+ wx_get_mac_addr(wx, wx->mac.addr);
+ wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr);
} else {
/* Setup the receive address. */
- wx_dbg(wxhw, "Overriding MAC Address in RAR[0]\n");
- wx_dbg(wxhw, "New MAC Addr = %pM\n", wxhw->mac.addr);
+ wx_dbg(wx, "Overriding MAC Address in RAR[0]\n");
+ wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr);
- wx_set_rar(wxhw, 0, wxhw->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
+ wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- if (wxhw->mac.type == wx_mac_sp) {
+ if (wx->mac.type == wx_mac_sp) {
/* clear VMDq pool/queue selection for RAR 0 */
- wx_clear_vmdq(wxhw, 0, WX_CLEAR_VMDQ_ALL);
+ wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
}
}
/* Zero out the other receive addresses. */
- wx_dbg(wxhw, "Clearing RAR[1-%d]\n", rar_entries - 1);
+ wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1);
for (i = 1; i < rar_entries; i++) {
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, i);
- wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0);
- wr32(wxhw, WX_PSR_MAC_SWC_AD_H, 0);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, i);
+ wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
+ wr32(wx, WX_PSR_MAC_SWC_AD_H, 0);
}
/* Clear the MTA */
- wxhw->addr_ctrl.mta_in_use = 0;
- psrctl = rd32(wxhw, WX_PSR_CTL);
+ wx->addr_ctrl.mta_in_use = 0;
+ psrctl = rd32(wx, WX_PSR_CTL);
psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
- psrctl |= wxhw->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
- wr32(wxhw, WX_PSR_CTL, psrctl);
- wx_dbg(wxhw, " Clearing MTA\n");
- for (i = 0; i < wxhw->mac.mcft_size; i++)
- wr32(wxhw, WX_PSR_MC_TBL(i), 0);
+ psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
+ wr32(wx, WX_PSR_CTL, psrctl);
+ wx_dbg(wx, " Clearing MTA\n");
+ for (i = 0; i < wx->mac.mcft_size; i++)
+ wr32(wx, WX_PSR_MC_TBL(i), 0);
- wx_init_uta_tables(wxhw);
+ wx_init_uta_tables(wx);
}
EXPORT_SYMBOL(wx_init_rx_addrs);
-void wx_disable_rx(struct wx_hw *wxhw)
+static void wx_sync_mac_table(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ wx_set_rar(wx, i,
+ wx->mac_table[i].addr,
+ wx->mac_table[i].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+ } else {
+ wx_clear_rar(wx, i);
+ }
+ wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+/* this function destroys the first RAR entry */
+void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
+{
+ memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
+ wx->mac_table[0].pools = 1ULL;
+ wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
+ wx_set_rar(wx, 0, wx->mac_table[0].addr,
+ wx->mac_table[0].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+}
+EXPORT_SYMBOL(wx_mac_set_default_filter);
+
+void wx_flush_sw_mac_table(struct wx *wx)
+{
+ u32 i;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE))
+ continue;
+
+ wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
+ wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
+ memset(wx->mac_table[i].addr, 0, ETH_ALEN);
+ wx->mac_table[i].pools = 0;
+ }
+ wx_sync_mac_table(wx);
+}
+EXPORT_SYMBOL(wx_flush_sw_mac_table);
+
+static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+{
+ u32 i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
+ if (wx->mac_table[i].pools != (1ULL << pool)) {
+ memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
+ wx->mac_table[i].pools |= (1ULL << pool);
+ wx_sync_mac_table(wx);
+ return i;
+ }
+ }
+ }
+
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
+ continue;
+ wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
+ WX_MAC_STATE_IN_USE);
+ memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
+ wx->mac_table[i].pools |= (1ULL << pool);
+ wx_sync_mac_table(wx);
+ return i;
+ }
+ return -ENOMEM;
+}
+
+static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+{
+ u32 i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* search table for addr, if found, set to 0 and sync */
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (!ether_addr_equal(addr, wx->mac_table[i].addr))
+ continue;
+
+ wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
+ wx->mac_table[i].pools &= ~(1ULL << pool);
+ if (!wx->mac_table[i].pools) {
+ wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
+ memset(wx->mac_table[i].addr, 0, ETH_ALEN);
+ }
+ wx_sync_mac_table(wx);
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static int wx_available_rars(struct wx *wx)
+{
+ u32 i, count = 0;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state == 0)
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * wx_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ * @pool: index for mac table
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > wx_available_rars(wx))
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, netdev) {
+ wx_del_mac_filter(wx, ha->addr, pool);
+ wx_add_mac_filter(wx, ha->addr, pool);
+ count++;
+ }
+ }
+ return count;
+}
+
+/**
+ * wx_mta_vector - Determines bit-vector in multicast table to set
+ * @wx: pointer to private structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (wx->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ wx_err(wx, "MC filter type param set incorrectly\n");
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * wx_set_mta - Set bit-vector in multicast table
+ * @wx: pointer to private structure
+ * @mc_addr: Multicast address
+ *
+ * Sets the bit-vector in the multicast table.
+ **/
+static void wx_set_mta(struct wx *wx, u8 *mc_addr)
+{
+ u32 vector, vector_bit, vector_reg;
+
+ wx->addr_ctrl.mta_in_use++;
+
+ vector = wx_mta_vector(wx, mc_addr);
+ wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
+
+ /* The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[vector_value]. So we figure out what register the bit is
+ * in, read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the vector value and
+ * the bit within that register are determined by the lower 5 bits of
+ * the value.
+ */
+ vector_reg = (vector >> 5) & 0x7F;
+ vector_bit = vector & 0x1F;
+ wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ * wx_update_mc_addr_list - Updates MAC list of multicast addresses
+ * @wx: pointer to private structure
+ * @netdev: pointer to net device structure
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ **/
+static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
+{
+ struct netdev_hw_addr *ha;
+ u32 i, psrctl;
+
+ /* Set the new number of MC addresses that we are being requested to
+ * use.
+ */
+ wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
+ wx->addr_ctrl.mta_in_use = 0;
+
+ /* Clear mta_shadow */
+ wx_dbg(wx, " Clearing MTA\n");
+ memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
+
+ /* Update mta_shadow */
+ netdev_for_each_mc_addr(ha, netdev) {
+ wx_dbg(wx, " Adding the multicast addresses:\n");
+ wx_set_mta(wx, ha->addr);
+ }
+
+ /* Enable mta */
+ for (i = 0; i < wx->mac.mcft_size; i++)
+ wr32a(wx, WX_PSR_MC_TBL(0), i,
+ wx->mac.mta_shadow[i]);
+
+ if (wx->addr_ctrl.mta_in_use > 0) {
+ psrctl = rd32(wx, WX_PSR_CTL);
+ psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
+ psrctl |= WX_PSR_CTL_MFE |
+ (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
+ wr32(wx, WX_PSR_CTL, psrctl);
+ }
+
+ wx_dbg(wx, "Update mc addr list Complete\n");
+}
+
+/**
+ * wx_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int wx_write_mc_addr_list(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return 0;
+
+ wx_update_mc_addr_list(wx, netdev);
+
+ return netdev_mc_count(netdev);
+}
+
+/**
+ * wx_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int wx_set_mac(struct net_device *netdev, void *p)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ int retval;
+
+ retval = eth_prepare_mac_addr_change(netdev, addr);
+ if (retval)
+ return retval;
+
+ wx_del_mac_filter(wx, wx->mac.addr, 0);
+ eth_hw_addr_set(netdev, addr->sa_data);
+ memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
+
+ wx_mac_set_default_filter(wx, wx->mac.addr);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_mac);
+
+void wx_disable_rx(struct wx *wx)
{
u32 pfdtxgswc;
u32 rxctrl;
- rxctrl = rd32(wxhw, WX_RDB_PB_CTL);
+ rxctrl = rd32(wx, WX_RDB_PB_CTL);
if (rxctrl & WX_RDB_PB_CTL_RXEN) {
- pfdtxgswc = rd32(wxhw, WX_PSR_CTL);
+ pfdtxgswc = rd32(wx, WX_PSR_CTL);
if (pfdtxgswc & WX_PSR_CTL_SW_EN) {
pfdtxgswc &= ~WX_PSR_CTL_SW_EN;
- wr32(wxhw, WX_PSR_CTL, pfdtxgswc);
- wxhw->mac.set_lben = true;
+ wr32(wx, WX_PSR_CTL, pfdtxgswc);
+ wx->mac.set_lben = true;
} else {
- wxhw->mac.set_lben = false;
+ wx->mac.set_lben = false;
}
rxctrl &= ~WX_RDB_PB_CTL_RXEN;
- wr32(wxhw, WX_RDB_PB_CTL, rxctrl);
+ wr32(wx, WX_RDB_PB_CTL, rxctrl);
- if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
- ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
+ if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
+ ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
/* disable mac receiver */
- wr32m(wxhw, WX_MAC_RX_CFG,
+ wr32m(wx, WX_MAC_RX_CFG,
WX_MAC_RX_CFG_RE, 0);
}
}
}
EXPORT_SYMBOL(wx_disable_rx);
+static void wx_enable_rx(struct wx *wx)
+{
+ u32 psrctl;
+
+ /* enable mac receiver */
+ wr32m(wx, WX_MAC_RX_CFG,
+ WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
+
+ wr32m(wx, WX_RDB_PB_CTL,
+ WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN);
+
+ if (wx->mac.set_lben) {
+ psrctl = rd32(wx, WX_PSR_CTL);
+ psrctl |= WX_PSR_CTL_SW_EN;
+ wr32(wx, WX_PSR_CTL, psrctl);
+ wx->mac.set_lben = false;
+ }
+}
+
+/**
+ * wx_set_rxpba - Initialize Rx packet buffer
+ * @wx: pointer to private structure
+ **/
+static void wx_set_rxpba(struct wx *wx)
+{
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT;
+ wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = wx->mac.tx_pb_size;
+ txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX;
+ wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
+ wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
+}
+
+static void wx_configure_port(struct wx *wx)
+{
+ u32 value, i;
+
+ value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_D_VLAN |
+ WX_CFG_PORT_CTL_QINQ,
+ value);
+
+ wr32(wx, WX_CFG_TAG_TPID(0),
+ ETH_P_8021Q | ETH_P_8021AD << 16);
+ wx->tpid[0] = ETH_P_8021Q;
+ wx->tpid[1] = ETH_P_8021AD;
+ for (i = 1; i < 4; i++)
+ wr32(wx, WX_CFG_TAG_TPID(i),
+ ETH_P_8021Q | ETH_P_8021Q << 16);
+ for (i = 2; i < 8; i++)
+ wx->tpid[i] = ETH_P_8021Q;
+}
+
+/**
+ * wx_disable_sec_rx_path - Stops the receive data path
+ * @wx: pointer to private structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+static int wx_disable_sec_rx_path(struct wx *wx)
+{
+ u32 secrx;
+
+ wr32m(wx, WX_RSC_CTL,
+ WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS);
+
+ return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
+ 1000, 40000, false, wx, WX_RSC_ST);
+}
+
+/**
+ * wx_enable_sec_rx_path - Enables the receive data path
+ * @wx: pointer to private structure
+ *
+ * Enables the receive data path.
+ **/
+static void wx_enable_sec_rx_path(struct wx *wx)
+{
+ wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
+ WX_WRITE_FLUSH(wx);
+}
+
+void wx_set_rx_mode(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u32 fctrl, vmolr, vlnctrl;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = rd32(wx, WX_PSR_CTL);
+ fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
+ vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_MPE |
+ WX_PSR_VM_L2CTL_ROPE |
+ WX_PSR_VM_L2CTL_ROMPE);
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN);
+
+ /* set all bits that we expect to always be set */
+ fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE;
+ vmolr |= WX_PSR_VM_L2CTL_BAM |
+ WX_PSR_VM_L2CTL_AUPE |
+ WX_PSR_VM_L2CTL_VACC;
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+
+ wx->addr_ctrl.user_set_promisc = false;
+ if (netdev->flags & IFF_PROMISC) {
+ wx->addr_ctrl.user_set_promisc = true;
+ fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
+ /* pf don't want packets routing to vf, so clear UPE */
+ vmolr |= WX_PSR_VM_L2CTL_MPE;
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ }
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ fctrl |= WX_PSR_CTL_MPE;
+ vmolr |= WX_PSR_VM_L2CTL_MPE;
+ }
+
+ if (netdev->features & NETIF_F_RXALL) {
+ vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE);
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ /* receive bad packets */
+ wr32m(wx, WX_RSC_CTL,
+ WX_RSC_CTL_SAVE_MAC_ERR,
+ WX_RSC_CTL_SAVE_MAC_ERR);
+ } else {
+ vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE;
+ }
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = wx_write_uc_addr_list(netdev, 0);
+ if (count < 0) {
+ vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
+ vmolr |= WX_PSR_VM_L2CTL_UPE;
+ }
+
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = wx_write_mc_addr_list(netdev);
+ if (count < 0) {
+ vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
+ vmolr |= WX_PSR_VM_L2CTL_MPE;
+ }
+
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ wr32(wx, WX_PSR_CTL, fctrl);
+ wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
+}
+EXPORT_SYMBOL(wx_set_rx_mode);
+
+static void wx_set_rx_buffer_len(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ u32 mhadd, max_frame;
+
+ max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
+ mhadd = rd32(wx, WX_PSR_MAX_SZ);
+ if (max_frame != mhadd)
+ wr32(wx, WX_PSR_MAX_SZ, max_frame);
+}
+
+/* Disable the specified rx queue */
+void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u32 rxdctl;
+ int ret;
+
+ /* write value back with RRCFG.EN bit cleared */
+ wr32m(wx, WX_PX_RR_CFG(reg_idx),
+ WX_PX_RR_CFG_RR_EN, 0);
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN),
+ 10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
+
+ if (ret == -ETIMEDOUT) {
+ /* Just for information */
+ wx_err(wx,
+ "RRCFG.EN on Rx queue %d not cleared within the polling period\n",
+ reg_idx);
+ }
+}
+EXPORT_SYMBOL(wx_disable_rx_queue);
+
+static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u32 rxdctl;
+ int ret;
+
+ ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN,
+ 1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
+
+ if (ret == -ETIMEDOUT) {
+ /* Just for information */
+ wx_err(wx,
+ "RRCFG.EN on Rx queue %d not set within the polling period\n",
+ reg_idx);
+ }
+}
+
+static void wx_configure_srrctl(struct wx *wx,
+ struct wx_ring *rx_ring)
+{
+ u16 reg_idx = rx_ring->reg_idx;
+ u32 srrctl;
+
+ srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ |
+ WX_PX_RR_CFG_RR_BUF_SZ |
+ WX_PX_RR_CFG_SPLIT_MODE);
+ /* configure header buffer length, needed for RSC */
+ srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
+
+ /* configure the packet buffer length */
+ srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
+
+ wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
+}
+
+static void wx_configure_tx_ring(struct wx *wx,
+ struct wx_ring *ring)
+{
+ u32 txdctl = WX_PX_TR_CFG_ENABLE;
+ u8 reg_idx = ring->reg_idx;
+ u64 tdba = ring->dma;
+ int ret;
+
+ /* disable queue to avoid issues while updating state */
+ wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
+ WX_WRITE_FLUSH(wx);
+
+ wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_PX_TR_RP(reg_idx), 0);
+ wr32(wx, WX_PX_TR_WP(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
+
+ if (ring->count < WX_MAX_TXD)
+ txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
+ txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
+
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct wx_tx_buffer) * ring->count);
+
+ /* enable queue */
+ wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
+
+ /* poll to verify queue is enabled */
+ ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE,
+ 1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
+ if (ret == -ETIMEDOUT)
+ wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
+}
+
+static void wx_configure_rx_ring(struct wx *wx,
+ struct wx_ring *ring)
+{
+ u16 reg_idx = ring->reg_idx;
+ union wx_rx_desc *rx_desc;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ wx_disable_rx_queue(wx, ring);
+
+ wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
+
+ if (ring->count == WX_MAX_RXD)
+ rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
+ else
+ rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
+
+ rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
+ wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_PX_RR_RP(reg_idx), 0);
+ wr32(wx, WX_PX_RR_WP(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
+
+ wx_configure_srrctl(wx, ring);
+
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct wx_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = WX_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
+ /* enable receive descriptor ring */
+ wr32m(wx, WX_PX_RR_CFG(reg_idx),
+ WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN);
+
+ wx_enable_rx_queue(wx, ring);
+ wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
+}
+
+/**
+ * wx_configure_tx - Configure Transmit Unit after Reset
+ * @wx: pointer to private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void wx_configure_tx(struct wx *wx)
+{
+ u32 i;
+
+ /* TDM_CTL.TE must be before Tx queues are enabled */
+ wr32m(wx, WX_TDM_CTL,
+ WX_TDM_CTL_TE, WX_TDM_CTL_TE);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_configure_tx_ring(wx, wx->tx_ring[i]);
+
+ wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
+
+ if (wx->mac.type == wx_mac_em)
+ wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
+
+ /* enable mac transmitter */
+ wr32m(wx, WX_MAC_TX_CFG,
+ WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
+}
+
+/**
+ * wx_configure_rx - Configure Receive Unit after Reset
+ * @wx: pointer to private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void wx_configure_rx(struct wx *wx)
+{
+ u32 psrtype, i;
+ int ret;
+
+ wx_disable_rx(wx);
+
+ psrtype = WX_RDB_PL_CFG_L4HDR |
+ WX_RDB_PL_CFG_L3HDR |
+ WX_RDB_PL_CFG_L2HDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR;
+ wr32(wx, WX_RDB_PL_CFG(0), psrtype);
+
+ /* enable hw crc stripping */
+ wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
+
+ if (wx->mac.type == wx_mac_sp) {
+ u32 psrctl;
+
+ /* RSC Setup */
+ psrctl = rd32(wx, WX_PSR_CTL);
+ psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
+ psrctl |= WX_PSR_CTL_RSC_DIS;
+ wr32(wx, WX_PSR_CTL, psrctl);
+ }
+
+ /* set_rx_buffer_len must be called before ring initialization */
+ wx_set_rx_buffer_len(wx);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_configure_rx_ring(wx, wx->rx_ring[i]);
+
+ /* Enable all receives, disable security engine prior to block traffic */
+ ret = wx_disable_sec_rx_path(wx);
+ if (ret < 0)
+ wx_err(wx, "The register status is abnormal, please check device.");
+
+ wx_enable_rx(wx);
+ wx_enable_sec_rx_path(wx);
+}
+
+static void wx_configure_isb(struct wx *wx)
+{
+ /* set ISB Address */
+ wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
+}
+
+void wx_configure(struct wx *wx)
+{
+ wx_set_rxpba(wx);
+ wx_configure_port(wx);
+
+ wx_set_rx_mode(wx->netdev);
+
+ wx_enable_sec_rx_path(wx);
+
+ wx_configure_tx(wx);
+ wx_configure_rx(wx);
+ wx_configure_isb(wx);
+}
+EXPORT_SYMBOL(wx_configure);
+
/**
* wx_disable_pcie_master - Disable PCI-express master access
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Disables PCI-Express master access and verifies there are no pending
* requests.
**/
-int wx_disable_pcie_master(struct wx_hw *wxhw)
+int wx_disable_pcie_master(struct wx *wx)
{
int status = 0;
u32 val;
/* Always set this bit to ensure any future transactions are blocked */
- pci_clear_master(wxhw->pdev);
+ pci_clear_master(wx->pdev);
/* Exit if master requests are blocked */
- if (!(rd32(wxhw, WX_PX_TRANSACTION_PENDING)))
+ if (!(rd32(wx, WX_PX_TRANSACTION_PENDING)))
return 0;
/* Poll for master request bit to clear */
status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT,
- false, wxhw, WX_PX_TRANSACTION_PENDING);
+ false, wx, WX_PX_TRANSACTION_PENDING);
if (status < 0)
- wx_err(wxhw, "PCIe transaction pending bit did not clear.\n");
+ wx_err(wx, "PCIe transaction pending bit did not clear.\n");
return status;
}
@@ -781,106 +1548,106 @@ EXPORT_SYMBOL(wx_disable_pcie_master);
/**
* wx_stop_adapter - Generic stop Tx/Rx units
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Sets the adapter_stopped flag within wx_hw struct. Clears interrupts,
* disables transmit and receive units. The adapter_stopped flag is used by
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
-int wx_stop_adapter(struct wx_hw *wxhw)
+int wx_stop_adapter(struct wx *wx)
{
u16 i;
/* Set the adapter_stopped flag so other driver functions stop touching
* the hardware
*/
- wxhw->adapter_stopped = true;
+ wx->adapter_stopped = true;
/* Disable the receive unit */
- wx_disable_rx(wxhw);
+ wx_disable_rx(wx);
/* Set interrupt mask to stop interrupts from being generated */
- wx_intr_disable(wxhw, WX_INTR_ALL);
+ wx_intr_disable(wx, WX_INTR_ALL);
/* Clear any pending interrupts, flush previous writes */
- wr32(wxhw, WX_PX_MISC_IC, 0xffffffff);
- wr32(wxhw, WX_BME_CTL, 0x3);
+ wr32(wx, WX_PX_MISC_IC, 0xffffffff);
+ wr32(wx, WX_BME_CTL, 0x3);
/* Disable the transmit unit. Each queue must be disabled. */
- for (i = 0; i < wxhw->mac.max_tx_queues; i++) {
- wr32m(wxhw, WX_PX_TR_CFG(i),
+ for (i = 0; i < wx->mac.max_tx_queues; i++) {
+ wr32m(wx, WX_PX_TR_CFG(i),
WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE,
WX_PX_TR_CFG_SWFLSH);
}
/* Disable the receive unit by stopping each queue */
- for (i = 0; i < wxhw->mac.max_rx_queues; i++) {
- wr32m(wxhw, WX_PX_RR_CFG(i),
+ for (i = 0; i < wx->mac.max_rx_queues; i++) {
+ wr32m(wx, WX_PX_RR_CFG(i),
WX_PX_RR_CFG_RR_EN, 0);
}
/* flush all queues disables */
- WX_WRITE_FLUSH(wxhw);
+ WX_WRITE_FLUSH(wx);
/* Prevent the PCI-E bus from hanging by disabling PCI-E master
* access and verify no pending requests
*/
- return wx_disable_pcie_master(wxhw);
+ return wx_disable_pcie_master(wx);
}
EXPORT_SYMBOL(wx_stop_adapter);
-void wx_reset_misc(struct wx_hw *wxhw)
+void wx_reset_misc(struct wx *wx)
{
int i;
/* receive packets that size > 2048 */
- wr32m(wxhw, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
/* clear counters on read */
- wr32m(wxhw, WX_MMC_CONTROL,
+ wr32m(wx, WX_MMC_CONTROL,
WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD);
- wr32m(wxhw, WX_MAC_RX_FLOW_CTRL,
+ wr32m(wx, WX_MAC_RX_FLOW_CTRL,
WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
- wr32(wxhw, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+ wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
- wr32m(wxhw, WX_MIS_RST_ST,
+ wr32m(wx, WX_MIS_RST_ST,
WX_MIS_RST_ST_RST_INIT, 0x1E00);
/* errata 4: initialize mng flex tbl and wakeup flex tbl*/
- wr32(wxhw, WX_PSR_MNG_FLEX_SEL, 0);
+ wr32(wx, WX_PSR_MNG_FLEX_SEL, 0);
for (i = 0; i < 16; i++) {
- wr32(wxhw, WX_PSR_MNG_FLEX_DW_L(i), 0);
- wr32(wxhw, WX_PSR_MNG_FLEX_DW_H(i), 0);
- wr32(wxhw, WX_PSR_MNG_FLEX_MSK(i), 0);
+ wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0);
+ wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0);
+ wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0);
}
- wr32(wxhw, WX_PSR_LAN_FLEX_SEL, 0);
+ wr32(wx, WX_PSR_LAN_FLEX_SEL, 0);
for (i = 0; i < 16; i++) {
- wr32(wxhw, WX_PSR_LAN_FLEX_DW_L(i), 0);
- wr32(wxhw, WX_PSR_LAN_FLEX_DW_H(i), 0);
- wr32(wxhw, WX_PSR_LAN_FLEX_MSK(i), 0);
+ wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0);
+ wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0);
+ wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0);
}
/* set pause frame dst mac addr */
- wr32(wxhw, WX_RDB_PFCMACDAL, 0xC2000001);
- wr32(wxhw, WX_RDB_PFCMACDAH, 0x0180);
+ wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001);
+ wr32(wx, WX_RDB_PFCMACDAH, 0x0180);
}
EXPORT_SYMBOL(wx_reset_misc);
/**
* wx_get_pcie_msix_counts - Gets MSI-X vector count
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @msix_count: number of MSI interrupts that can be obtained
* @max_msix_count: number of MSI interrupts that mac need
*
* Read PCIe configuration space, and get the MSI-X vector count from
* the capabilities table.
**/
-int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count)
+int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
{
- struct pci_dev *pdev = wxhw->pdev;
+ struct pci_dev *pdev = wx->pdev;
struct device *dev = &pdev->dev;
int pos;
@@ -904,31 +1671,39 @@ int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_co
}
EXPORT_SYMBOL(wx_get_pcie_msix_counts);
-int wx_sw_init(struct wx_hw *wxhw)
+int wx_sw_init(struct wx *wx)
{
- struct pci_dev *pdev = wxhw->pdev;
+ struct pci_dev *pdev = wx->pdev;
u32 ssid = 0;
int err = 0;
- wxhw->vendor_id = pdev->vendor;
- wxhw->device_id = pdev->device;
- wxhw->revision_id = pdev->revision;
- wxhw->oem_svid = pdev->subsystem_vendor;
- wxhw->oem_ssid = pdev->subsystem_device;
- wxhw->bus.device = PCI_SLOT(pdev->devfn);
- wxhw->bus.func = PCI_FUNC(pdev->devfn);
-
- if (wxhw->oem_svid == PCI_VENDOR_ID_WANGXUN) {
- wxhw->subsystem_vendor_id = pdev->subsystem_vendor;
- wxhw->subsystem_device_id = pdev->subsystem_device;
+ wx->vendor_id = pdev->vendor;
+ wx->device_id = pdev->device;
+ wx->revision_id = pdev->revision;
+ wx->oem_svid = pdev->subsystem_vendor;
+ wx->oem_ssid = pdev->subsystem_device;
+ wx->bus.device = PCI_SLOT(pdev->devfn);
+ wx->bus.func = PCI_FUNC(pdev->devfn);
+
+ if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
+ wx->subsystem_vendor_id = pdev->subsystem_vendor;
+ wx->subsystem_device_id = pdev->subsystem_device;
} else {
- err = wx_flash_read_dword(wxhw, 0xfffdc, &ssid);
+ err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
if (!err)
- wxhw->subsystem_device_id = swab16((u16)ssid);
+ wx->subsystem_device_id = swab16((u16)ssid);
return err;
}
+ wx->mac_table = kcalloc(wx->mac.num_rar_entries,
+ sizeof(struct wx_mac_addr),
+ GFP_KERNEL);
+ if (!wx->mac_table) {
+ wx_err(wx, "mac_table allocation failed\n");
+ return -ENOMEM;
+ }
+
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index a0652f5e9939..44dfd6ea442a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -4,25 +4,31 @@
#ifndef _WX_HW_H_
#define _WX_HW_H_
-int wx_check_flash_load(struct wx_hw *hw, u32 check_bit);
-void wx_control_hw(struct wx_hw *wxhw, bool drv);
-int wx_mng_present(struct wx_hw *wxhw);
-int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
+void wx_intr_enable(struct wx *wx, u64 qmask);
+void wx_irq_disable(struct wx *wx);
+int wx_check_flash_load(struct wx *wx, u32 check_bit);
+void wx_control_hw(struct wx *wx, bool drv);
+int wx_mng_present(struct wx *wx);
+int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 length, u32 timeout, bool return_data);
-int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data);
-int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
+int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data);
+int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data);
-int wx_reset_hostif(struct wx_hw *wxhw);
-void wx_init_eeprom_params(struct wx_hw *wxhw);
-void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr);
-int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, u32 enable_addr);
-int wx_clear_rar(struct wx_hw *wxhw, u32 index);
-void wx_init_rx_addrs(struct wx_hw *wxhw);
-void wx_disable_rx(struct wx_hw *wxhw);
-int wx_disable_pcie_master(struct wx_hw *wxhw);
-int wx_stop_adapter(struct wx_hw *wxhw);
-void wx_reset_misc(struct wx_hw *wxhw);
-int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count);
-int wx_sw_init(struct wx_hw *wxhw);
+int wx_reset_hostif(struct wx *wx);
+void wx_init_eeprom_params(struct wx *wx);
+void wx_get_mac_addr(struct wx *wx, u8 *mac_addr);
+void wx_init_rx_addrs(struct wx *wx);
+void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
+void wx_flush_sw_mac_table(struct wx *wx);
+int wx_set_mac(struct net_device *netdev, void *p);
+void wx_disable_rx(struct wx *wx);
+void wx_set_rx_mode(struct net_device *netdev);
+void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
+void wx_configure(struct wx *wx);
+int wx_disable_pcie_master(struct wx *wx);
+int wx_stop_adapter(struct wx *wx);
+void wx_reset_misc(struct wx *wx);
+int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
+int wx_sw_init(struct wx *wx);
#endif /* _WX_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
new file mode 100644
index 000000000000..eb89a274083e
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -0,0 +1,2004 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <net/page_pool.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_lib.h"
+#include "wx_hw.h"
+
+/* wx_test_staterr - tests bits in Rx descriptor status and error fields */
+static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
+ return false;
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
+/**
+ * wx_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void wx_reuse_rx_page(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *old_buff)
+{
+ u16 nta = rx_ring->next_to_alloc;
+ struct wx_rx_buffer *new_buff;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_buff->page = old_buff->page;
+ new_buff->page_dma = old_buff->page_dma;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+static void wx_dma_sync_frag(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer)
+{
+ struct sk_buff *skb = rx_buffer->skb;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ WX_CB(skb)->dma,
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ DMA_FROM_DEVICE);
+
+ /* If the page was released, just unmap it. */
+ if (unlikely(WX_CB(skb)->page_released))
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+}
+
+static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
+ union wx_rx_desc *rx_desc,
+ struct sk_buff **skb,
+ int *rx_buffer_pgcnt)
+{
+ struct wx_rx_buffer *rx_buffer;
+ unsigned int size;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+
+#if (PAGE_SIZE < 8192)
+ *rx_buffer_pgcnt = page_count(rx_buffer->page);
+#else
+ *rx_buffer_pgcnt = 0;
+#endif
+
+ prefetchw(rx_buffer->page);
+ *skb = rx_buffer->skb;
+
+ /* Delay unmapping of the first packet. It carries the header
+ * information, HW may still access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) {
+ if (!*skb)
+ goto skip_sync;
+ } else {
+ if (*skb)
+ wx_dma_sync_frag(rx_ring, rx_buffer);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+skip_sync:
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+static void wx_put_rx_buffer(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer,
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
+{
+ if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
+ /* hand second half of page back to the ring */
+ wx_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
+ /* the page has been released from the ring */
+ WX_CB(skb)->page_released = true;
+ else
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+ rx_buffer->skb = NULL;
+}
+
+static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer,
+ union wx_rx_desc *rx_desc)
+{
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = WX_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
+ struct sk_buff *skb = rx_buffer->skb;
+
+ if (!skb) {
+ void *page_addr = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+
+ if (size <= WX_RXBUFFER_256) {
+ memcpy(__skb_put(skb, size), page_addr,
+ ALIGN(size, sizeof(long)));
+ rx_buffer->pagecnt_bias++;
+
+ return skb;
+ }
+
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
+ WX_CB(skb)->dma = rx_buffer->dma;
+
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset,
+ size, truesize);
+ goto out;
+
+ } else {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+ }
+
+out:
+#if (PAGE_SIZE < 8192)
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+#endif
+
+ return skb;
+}
+
+static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ page = page_pool_dev_alloc_pages(rx_ring->page_pool);
+ WARN_ON(!page);
+ dma = page_pool_get_dma_addr(page);
+
+ bi->page_dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
+
+ return true;
+}
+
+/**
+ * wx_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union wx_rx_desc *rx_desc;
+ struct wx_rx_buffer *bi;
+
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
+ rx_desc = WX_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ do {
+ if (!wx_alloc_mapped_page(rx_ring, bi))
+ break;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ WX_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ rx_desc->read.pkt_addr =
+ cpu_to_le64(bi->page_dma + bi->page_offset);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = WX_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.upper.status_error = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+}
+
+u16 wx_desc_unused(struct wx_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+/**
+ * wx_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool wx_is_non_eop(struct wx_ring *rx_ring,
+ union wx_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(WX_RX_DESC(rx_ring, ntc));
+
+ /* if we are the last buffer then there is nothing else to do */
+ if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)))
+ return false;
+
+ rx_ring->rx_buffer_info[ntc].skb = skb;
+
+ return true;
+}
+
+static void wx_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ skb_frag_off_add(frag, pull_len);
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * wx_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right. These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool wx_cleanup_headers(struct wx_ring *rx_ring,
+ union wx_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rx_ring->netdev;
+
+ /* verify that the packet does not have any known errors */
+ if (!netdev ||
+ unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) &&
+ !(netdev->features & NETIF_F_RXALL))) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+
+ /* place header in linear portion of buffer */
+ if (!skb_headlen(skb))
+ wx_pull_tail(skb);
+
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+/**
+ * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
+ struct wx_ring *rx_ring,
+ int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = wx_desc_unused(rx_ring);
+
+ do {
+ struct wx_rx_buffer *rx_buffer;
+ union wx_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ int rx_buffer_pgcnt;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= WX_RX_BUFFER_WRITE) {
+ wx_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt);
+
+ /* retrieve a buffer from the ring */
+ skb = wx_build_skb(rx_ring, rx_buffer, rx_desc);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
+ cleaned_count++;
+
+ /* place incomplete frames back on ring for completion */
+ if (wx_is_non_eop(rx_ring, rx_desc, skb))
+ continue;
+
+ /* verify the packet layout is correct */
+ if (wx_cleanup_headers(rx_ring, rx_desc, skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* update budget accounting */
+ total_rx_packets++;
+ } while (likely(total_rx_packets < budget));
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
+
+ return total_rx_packets;
+}
+
+static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+
+/**
+ * wx_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: structure containing interrupt and ring information
+ * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
+ **/
+static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
+ struct wx_ring *tx_ring, int napi_budget)
+{
+ unsigned int budget = q_vector->wx->tx_work_limit;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int i = tx_ring->next_to_clean;
+ struct wx_tx_buffer *tx_buffer;
+ union wx_tx_desc *tx_desc;
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ return true;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = WX_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ union wx_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
+ /* free the skb */
+ napi_consume_skb(tx_buffer->skb, napi_budget);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ netdev_tx_completed_queue(wx_txring_txq(tx_ring),
+ total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ netif_running(tx_ring->netdev))
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ }
+
+ return !!budget;
+}
+
+/**
+ * wx_poll - NAPI polling RX/TX cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ **/
+static int wx_poll(struct napi_struct *napi, int budget)
+{
+ struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi);
+ int per_ring_budget, work_done = 0;
+ struct wx *wx = q_vector->wx;
+ bool clean_complete = true;
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->tx) {
+ if (!wx_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
+
+ /* Exit if we are called by netpoll */
+ if (budget <= 0)
+ return budget;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling
+ */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget / q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ wx_for_each_ring(ring, q_vector->rx) {
+ int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget);
+
+ work_done += cleaned;
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
+ }
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (netif_running(wx->netdev))
+ wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
+ }
+
+ return min(work_done, budget - 1);
+}
+
+static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size)
+{
+ if (likely(wx_desc_unused(tx_ring) >= size))
+ return 0;
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ /* For the next check */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (likely(wx_desc_unused(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ return 0;
+}
+
+static void wx_tx_map(struct wx_ring *tx_ring,
+ struct wx_tx_buffer *first)
+{
+ struct sk_buff *skb = first->skb;
+ struct wx_tx_buffer *tx_buffer;
+ u16 i = tx_ring->next_to_use;
+ unsigned int data_len, size;
+ union wx_tx_desc *tx_desc;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u32 cmd_type;
+
+ cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS;
+ tx_desc = WX_TX_DESC(tx_ring, i);
+
+ tx_desc->read.olinfo_status = cpu_to_le32(skb->len << WX_TXD_PAYLEN_SHIFT);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > WX_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ dma += WX_MAX_DATA_PER_TXD;
+ size -= WX_MAX_DATA_PER_TXD;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ size = skb_frag_size(frag);
+
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ }
+
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= size | WX_TXD_EOP | WX_TXD_RS;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
+
+ skb_tx_timestamp(skb);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ wx_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
+ writel(i, tx_ring->tail);
+
+ return;
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
+ i += tx_ring->count;
+ i--;
+ }
+
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
+ tx_ring->next_to_use = i;
+}
+
+static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
+ struct wx_ring *tx_ring)
+{
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ struct wx_tx_buffer *first;
+ unsigned short f;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->
+ frags[f]));
+
+ if (wx_maybe_stop_tx(tx_ring, count + 3))
+ return NETDEV_TX_BUSY;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
+ wx_tx_map(tx_ring, first);
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ unsigned int r_idx = skb->queue_mapping;
+ struct wx *wx = netdev_priv(netdev);
+ struct wx_ring *tx_ring;
+
+ if (!netif_carrier_ok(netdev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* The minimum packet size for olinfo paylen is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
+
+ if (r_idx >= wx->num_tx_queues)
+ r_idx = r_idx % wx->num_tx_queues;
+ tx_ring = wx->tx_ring[r_idx];
+
+ return wx_xmit_frame_ring(skb, tx_ring);
+}
+EXPORT_SYMBOL(wx_xmit_frame);
+
+void wx_napi_enable_all(struct wx *wx)
+{
+ struct wx_q_vector *q_vector;
+ int q_idx;
+
+ for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
+ q_vector = wx->q_vector[q_idx];
+ napi_enable(&q_vector->napi);
+ }
+}
+EXPORT_SYMBOL(wx_napi_enable_all);
+
+void wx_napi_disable_all(struct wx *wx)
+{
+ struct wx_q_vector *q_vector;
+ int q_idx;
+
+ for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
+ q_vector = wx->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+EXPORT_SYMBOL(wx_napi_disable_all);
+
+/**
+ * wx_set_rss_queues: Allocate queues for RSS
+ * @wx: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static void wx_set_rss_queues(struct wx *wx)
+{
+ wx->num_rx_queues = wx->mac.max_rx_queues;
+ wx->num_tx_queues = wx->mac.max_tx_queues;
+}
+
+static void wx_set_num_queues(struct wx *wx)
+{
+ /* Start with base case */
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+ wx->queues_per_pool = 1;
+
+ wx_set_rss_queues(wx);
+}
+
+/**
+ * wx_acquire_msix_vectors - acquire MSI-X vectors
+ * @wx: board private structure
+ *
+ * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
+ * return a negative error code if unable to acquire MSI-X vectors for any
+ * reason.
+ */
+static int wx_acquire_msix_vectors(struct wx *wx)
+{
+ struct irq_affinity affd = {0, };
+ int nvecs, i;
+
+ nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors);
+
+ wx->msix_entries = kcalloc(nvecs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entries)
+ return -ENOMEM;
+
+ nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
+ nvecs,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ &affd);
+ if (nvecs < 0) {
+ wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
+ kfree(wx->msix_entries);
+ wx->msix_entries = NULL;
+ return nvecs;
+ }
+
+ for (i = 0; i < nvecs; i++) {
+ wx->msix_entries[i].entry = i;
+ wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i);
+ }
+
+ /* one for msix_other */
+ nvecs -= 1;
+ wx->num_q_vectors = nvecs;
+ wx->num_rx_queues = nvecs;
+ wx->num_tx_queues = nvecs;
+
+ return 0;
+}
+
+/**
+ * wx_set_interrupt_capability - set MSI-X or MSI if supported
+ * @wx: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int wx_set_interrupt_capability(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ int nvecs, ret;
+
+ /* We will try to get MSI-X interrupts first */
+ ret = wx_acquire_msix_vectors(wx);
+ if (ret == 0 || (ret == -ENOMEM))
+ return ret;
+
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+ wx->num_q_vectors = 1;
+
+ /* minmum one for queue, one for misc*/
+ nvecs = 1;
+ nvecs = pci_alloc_irq_vectors(pdev, nvecs,
+ nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (nvecs == 1) {
+ if (pdev->msi_enabled)
+ wx_err(wx, "Fallback to MSI.\n");
+ else
+ wx_err(wx, "Fallback to LEGACY.\n");
+ } else {
+ wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs);
+ return nvecs;
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
+
+ return 0;
+}
+
+/**
+ * wx_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * @wx: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV.
+ *
+ **/
+static void wx_cache_ring_rss(struct wx *wx)
+{
+ u16 i;
+
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx->rx_ring[i]->reg_idx = i;
+
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx->tx_ring[i]->reg_idx = i;
+}
+
+static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head)
+{
+ ring->next = head->ring;
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * wx_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @wx: board private structure to initialize
+ * @v_count: q_vectors allocated on wx, used for ring interleaving
+ * @v_idx: index of vector in wx struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int wx_alloc_q_vector(struct wx *wx,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
+{
+ struct wx_q_vector *q_vector;
+ int ring_count, default_itr;
+ struct wx_ring *ring;
+
+ /* note this will allocate space for the ring structure as well! */
+ ring_count = txr_count + rxr_count;
+
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
+ GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(wx->netdev, &q_vector->napi,
+ wx_poll);
+
+ /* tie q_vector and wx together */
+ wx->q_vector[v_idx] = q_vector;
+ q_vector->wx = wx;
+ q_vector->v_idx = v_idx;
+ if (cpu_online(v_idx))
+ q_vector->numa_node = cpu_to_node(v_idx);
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ if (wx->mac.type == wx_mac_sp)
+ default_itr = WX_12K_ITR;
+ else
+ default_itr = WX_7K_ITR;
+ /* initialize ITR */
+ if (txr_count && !rxr_count)
+ /* tx only vector */
+ q_vector->itr = wx->tx_itr_setting ?
+ default_itr : wx->tx_itr_setting;
+ else
+ /* rx or rx/tx vector */
+ q_vector->itr = wx->rx_itr_setting ?
+ default_itr : wx->rx_itr_setting;
+
+ while (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &wx->pdev->dev;
+ ring->netdev = wx->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ wx_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = wx->tx_ring_count;
+
+ ring->queue_index = txr_idx;
+
+ /* assign ring to wx */
+ wx->tx_ring[txr_idx] = ring;
+
+ /* update count and index */
+ txr_count--;
+ txr_idx += v_count;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ while (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &wx->pdev->dev;
+ ring->netdev = wx->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ wx_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
+ ring->count = wx->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to wx */
+ wx->rx_ring[rxr_idx] = ring;
+
+ /* update count and index */
+ rxr_count--;
+ rxr_idx += v_count;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ return 0;
+}
+
+/**
+ * wx_free_q_vector - Free memory allocated for specific interrupt vector
+ * @wx: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void wx_free_q_vector(struct wx *wx, int v_idx)
+{
+ struct wx_q_vector *q_vector = wx->q_vector[v_idx];
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->tx)
+ wx->tx_ring[ring->queue_index] = NULL;
+
+ wx_for_each_ring(ring, q_vector->rx)
+ wx->rx_ring[ring->queue_index] = NULL;
+
+ wx->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * wx_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @wx: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int wx_alloc_q_vectors(struct wx *wx)
+{
+ unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ unsigned int rxr_remaining = wx->num_rx_queues;
+ unsigned int txr_remaining = wx->num_tx_queues;
+ unsigned int q_vectors = wx->num_q_vectors;
+ int rqpv, tqpv;
+ int err;
+
+ for (; v_idx < q_vectors; v_idx++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ err = wx_alloc_q_vector(wx, q_vectors, v_idx,
+ tqpv, txr_idx,
+ rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
+ return 0;
+
+err_out:
+ wx->num_tx_queues = 0;
+ wx->num_rx_queues = 0;
+ wx->num_q_vectors = 0;
+
+ while (v_idx--)
+ wx_free_q_vector(wx, v_idx);
+
+ return -ENOMEM;
+}
+
+/**
+ * wx_free_q_vectors - Free memory allocated for interrupt vectors
+ * @wx: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void wx_free_q_vectors(struct wx *wx)
+{
+ int v_idx = wx->num_q_vectors;
+
+ wx->num_tx_queues = 0;
+ wx->num_rx_queues = 0;
+ wx->num_q_vectors = 0;
+
+ while (v_idx--)
+ wx_free_q_vector(wx, v_idx);
+}
+
+void wx_reset_interrupt_capability(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ return;
+
+ pci_free_irq_vectors(wx->pdev);
+ if (pdev->msix_enabled) {
+ kfree(wx->msix_entries);
+ wx->msix_entries = NULL;
+ }
+}
+EXPORT_SYMBOL(wx_reset_interrupt_capability);
+
+/**
+ * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @wx: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void wx_clear_interrupt_scheme(struct wx *wx)
+{
+ wx_free_q_vectors(wx);
+ wx_reset_interrupt_capability(wx);
+}
+EXPORT_SYMBOL(wx_clear_interrupt_scheme);
+
+int wx_init_interrupt_scheme(struct wx *wx)
+{
+ int ret;
+
+ /* Number of supported queues */
+ wx_set_num_queues(wx);
+
+ /* Set interrupt mode */
+ ret = wx_set_interrupt_capability(wx);
+ if (ret) {
+ wx_err(wx, "Allocate irq vectors for failed.\n");
+ return ret;
+ }
+
+ /* Allocate memory for queues */
+ ret = wx_alloc_q_vectors(wx);
+ if (ret) {
+ wx_err(wx, "Unable to allocate memory for queue vectors.\n");
+ wx_reset_interrupt_capability(wx);
+ return ret;
+ }
+
+ wx_cache_ring_rss(wx);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_init_interrupt_scheme);
+
+irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector = data;
+
+ /* EIAM disabled interrupts (on this vector) for us */
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule_irqoff(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(wx_msix_clean_rings);
+
+void wx_free_irq(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ int vector;
+
+ if (!(pdev->msix_enabled)) {
+ free_irq(pdev->irq, wx);
+ return;
+ }
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_entries[vector];
+
+ /* free only the irqs that were actually requested */
+ if (!q_vector->rx.ring && !q_vector->tx.ring)
+ continue;
+
+ free_irq(entry->vector, q_vector);
+ }
+
+ free_irq(wx->msix_entries[vector].vector, wx);
+}
+EXPORT_SYMBOL(wx_free_irq);
+
+/**
+ * wx_setup_isb_resources - allocate interrupt status resources
+ * @wx: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+int wx_setup_isb_resources(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ wx->isb_mem = dma_alloc_coherent(&pdev->dev,
+ sizeof(u32) * 4,
+ &wx->isb_dma,
+ GFP_KERNEL);
+ if (!wx->isb_mem) {
+ wx_err(wx, "Alloc isb_mem failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_setup_isb_resources);
+
+/**
+ * wx_free_isb_resources - allocate all queues Rx resources
+ * @wx: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+void wx_free_isb_resources(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ dma_free_coherent(&pdev->dev, sizeof(u32) * 4,
+ wx->isb_mem, wx->isb_dma);
+ wx->isb_mem = NULL;
+}
+EXPORT_SYMBOL(wx_free_isb_resources);
+
+u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx)
+{
+ u32 cur_tag = 0;
+
+ cur_tag = wx->isb_mem[WX_ISB_HEADER];
+ wx->isb_tag[idx] = cur_tag;
+
+ return (__force u32)cpu_to_le32(wx->isb_mem[idx]);
+}
+EXPORT_SYMBOL(wx_misc_isb);
+
+/**
+ * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @wx: pointer to wx struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ **/
+static void wx_set_ivar(struct wx *wx, s8 direction,
+ u16 queue, u16 msix_vector)
+{
+ u32 ivar, index;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ index = 0;
+ ivar = rd32(wx, WX_PX_MISC_IVAR);
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ wr32(wx, WX_PX_MISC_IVAR, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ wr32(wx, WX_PX_IVAR(queue >> 1), ivar);
+ }
+}
+
+/**
+ * wx_write_eitr - write EITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update EITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void wx_write_eitr(struct wx_q_vector *q_vector)
+{
+ struct wx *wx = q_vector->wx;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg;
+
+ if (wx->mac.type == wx_mac_sp)
+ itr_reg = q_vector->itr & WX_SP_MAX_EITR;
+ else
+ itr_reg = q_vector->itr & WX_EM_MAX_EITR;
+
+ itr_reg |= WX_PX_ITR_CNT_WDIS;
+
+ wr32(wx, WX_PX_ITR(v_idx), itr_reg);
+}
+
+/**
+ * wx_configure_vectors - Configure vectors for hardware
+ * @wx: board private structure
+ *
+ * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY
+ * interrupts.
+ **/
+void wx_configure_vectors(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ u32 eitrsel = 0;
+ u16 v_idx;
+
+ if (pdev->msix_enabled) {
+ /* Populate MSIX to EITR Select */
+ wr32(wx, WX_PX_ITRSEL, eitrsel);
+ /* use EIAM to auto-mask when MSI-X interrupt is asserted
+ * this saves a register write for every interrupt
+ */
+ wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL);
+ } else {
+ /* legacy interrupts, use EIAM to auto-mask when reading EICR,
+ * specifically only auto mask tx and rx interrupts.
+ */
+ wr32(wx, WX_PX_GPIE, 0);
+ }
+
+ /* Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
+ struct wx_q_vector *q_vector = wx->q_vector[v_idx];
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->rx)
+ wx_set_ivar(wx, 0, ring->reg_idx, v_idx);
+
+ wx_for_each_ring(ring, q_vector->tx)
+ wx_set_ivar(wx, 1, ring->reg_idx, v_idx);
+
+ wx_write_eitr(q_vector);
+ }
+
+ wx_set_ivar(wx, -1, 0, v_idx);
+ if (pdev->msix_enabled)
+ wr32(wx, WX_PX_ITR(v_idx), 1950);
+}
+EXPORT_SYMBOL(wx_configure_vectors);
+
+/**
+ * wx_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void wx_clean_rx_ring(struct wx_ring *rx_ring)
+{
+ struct wx_rx_buffer *rx_buffer;
+ u16 i = rx_ring->next_to_clean;
+
+ rx_buffer = &rx_ring->rx_buffer_info[i];
+
+ /* Free all the Rx ring sk_buffs */
+ while (i != rx_ring->next_to_alloc) {
+ if (rx_buffer->skb) {
+ struct sk_buff *skb = rx_buffer->skb;
+
+ if (WX_CB(skb)->page_released)
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+
+ dev_kfree_skb(skb);
+ }
+
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ WX_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+
+ i++;
+ rx_buffer++;
+ if (i == rx_ring->count) {
+ i = 0;
+ rx_buffer = rx_ring->rx_buffer_info;
+ }
+ }
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * wx_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @wx: board private structure
+ **/
+void wx_clean_all_rx_rings(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_clean_rx_ring(wx->rx_ring[i]);
+}
+EXPORT_SYMBOL(wx_clean_all_rx_rings);
+
+/**
+ * wx_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+static void wx_free_rx_resources(struct wx_ring *rx_ring)
+{
+ wx_clean_rx_ring(rx_ring);
+ kvfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+
+ if (rx_ring->page_pool) {
+ page_pool_destroy(rx_ring->page_pool);
+ rx_ring->page_pool = NULL;
+ }
+}
+
+/**
+ * wx_free_all_rx_resources - Free Rx Resources for All Queues
+ * @wx: pointer to hardware structure
+ *
+ * Free all receive software resources
+ **/
+static void wx_free_all_rx_resources(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_free_rx_resources(wx->rx_ring[i]);
+}
+
+/**
+ * wx_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void wx_clean_tx_ring(struct wx_ring *tx_ring)
+{
+ struct wx_tx_buffer *tx_buffer;
+ u16 i = tx_ring->next_to_clean;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+
+ while (i != tx_ring->next_to_use) {
+ union wx_tx_desc *eop_desc, *tx_desc;
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = WX_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ netdev_tx_reset_queue(wx_txring_txq(tx_ring));
+
+ /* reset next_to_use and next_to_clean */
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * wx_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @wx: board private structure
+ **/
+void wx_clean_all_tx_rings(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_clean_tx_ring(wx->tx_ring[i]);
+}
+EXPORT_SYMBOL(wx_clean_all_tx_rings);
+
+/**
+ * wx_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+static void wx_free_tx_resources(struct wx_ring *tx_ring)
+{
+ wx_clean_tx_ring(tx_ring);
+ kvfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * wx_free_all_tx_resources - Free Tx Resources for All Queues
+ * @wx: pointer to hardware structure
+ *
+ * Free all transmit software resources
+ **/
+static void wx_free_all_tx_resources(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_free_tx_resources(wx->tx_ring[i]);
+}
+
+void wx_free_resources(struct wx *wx)
+{
+ wx_free_isb_resources(wx);
+ wx_free_all_rx_resources(wx);
+ wx_free_all_tx_resources(wx);
+}
+EXPORT_SYMBOL(wx_free_resources);
+
+static int wx_alloc_page_pool(struct wx_ring *rx_ring)
+{
+ int ret = 0;
+
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = rx_ring->size,
+ .nid = dev_to_node(rx_ring->dev),
+ .dev = rx_ring->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = PAGE_SIZE,
+ };
+
+ rx_ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_ring->page_pool)) {
+ ret = PTR_ERR(rx_ring->page_pool);
+ rx_ring->page_pool = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * wx_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int wx_setup_rx_resources(struct wx_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = NUMA_NO_NODE;
+ int size, ret;
+
+ size = sizeof(struct wx_rx_buffer) * rx_ring->count;
+
+ if (rx_ring->q_vector)
+ numa_node = rx_ring->q_vector->numa_node;
+
+ rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ set_dev_node(dev, numa_node);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!rx_ring->desc) {
+ set_dev_node(dev, orig_node);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ }
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ ret = wx_alloc_page_pool(rx_ring);
+ if (ret < 0) {
+ dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ kvfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * wx_setup_all_rx_resources - allocate all queues Rx resources
+ * @wx: pointer to hardware structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int wx_setup_all_rx_resources(struct wx *wx)
+{
+ int i, err = 0;
+
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ err = wx_setup_rx_resources(wx->rx_ring[i]);
+ if (!err)
+ continue;
+
+ wx_err(wx, "Allocation for Rx Queue %u failed\n", i);
+ goto err_setup_rx;
+ }
+
+ return 0;
+err_setup_rx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ wx_free_rx_resources(wx->rx_ring[i]);
+ return err;
+}
+
+/**
+ * wx_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int wx_setup_tx_resources(struct wx_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = NUMA_NO_NODE;
+ int size;
+
+ size = sizeof(struct wx_tx_buffer) * tx_ring->count;
+
+ if (tx_ring->q_vector)
+ numa_node = tx_ring->q_vector->numa_node;
+
+ tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ set_dev_node(dev, numa_node);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ set_dev_node(dev, orig_node);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ }
+
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+
+err:
+ kvfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * wx_setup_all_tx_resources - allocate all queues Tx resources
+ * @wx: pointer to private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int wx_setup_all_tx_resources(struct wx *wx)
+{
+ int i, err = 0;
+
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ err = wx_setup_tx_resources(wx->tx_ring[i]);
+ if (!err)
+ continue;
+
+ wx_err(wx, "Allocation for Tx Queue %u failed\n", i);
+ goto err_setup_tx;
+ }
+
+ return 0;
+err_setup_tx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ wx_free_tx_resources(wx->tx_ring[i]);
+ return err;
+}
+
+int wx_setup_resources(struct wx *wx)
+{
+ int err;
+
+ /* allocate transmit descriptors */
+ err = wx_setup_all_tx_resources(wx);
+ if (err)
+ return err;
+
+ /* allocate receive descriptors */
+ err = wx_setup_all_rx_resources(wx);
+ if (err)
+ goto err_free_tx;
+
+ err = wx_setup_isb_resources(wx);
+ if (err)
+ goto err_free_rx;
+
+ return 0;
+
+err_free_rx:
+ wx_free_all_rx_resources(wx);
+err_free_tx:
+ wx_free_all_tx_resources(wx);
+
+ return err;
+}
+EXPORT_SYMBOL(wx_setup_resources);
+
+/**
+ * wx_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
+ * @stats: storage space for 64bit statistics
+ */
+void wx_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ }
+
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp,
+ start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(wx_get_stats64);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
new file mode 100644
index 000000000000..50ee41f1fa10
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * WangXun Gigabit PCI Express Linux driver
+ * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+ */
+
+#ifndef _WX_LIB_H_
+#define _WX_LIB_H_
+
+void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count);
+u16 wx_desc_unused(struct wx_ring *ring);
+netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev);
+void wx_napi_enable_all(struct wx *wx);
+void wx_napi_disable_all(struct wx *wx);
+void wx_reset_interrupt_capability(struct wx *wx);
+void wx_clear_interrupt_scheme(struct wx *wx);
+int wx_init_interrupt_scheme(struct wx *wx);
+irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data);
+void wx_free_irq(struct wx *wx);
+int wx_setup_isb_resources(struct wx *wx);
+void wx_free_isb_resources(struct wx *wx);
+u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx);
+void wx_configure_vectors(struct wx *wx);
+void wx_clean_all_rx_rings(struct wx *wx);
+void wx_clean_all_tx_rings(struct wx *wx);
+void wx_free_resources(struct wx *wx);
+int wx_setup_resources(struct wx *wx);
+void wx_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+
+#endif /* _NGBE_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 1cbeef8230bf..77d8d7f1707e 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -4,6 +4,9 @@
#ifndef _WX_TYPE_H_
#define _WX_TYPE_H_
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+
/* Vendor ID */
#ifndef PCI_VENDOR_ID_WANGXUN
#define PCI_VENDOR_ID_WANGXUN 0x8088
@@ -36,12 +39,11 @@
#define WX_SPI_CMD 0x10104
#define WX_SPI_CMD_READ_DWORD 0x1
#define WX_SPI_CLK_DIV 0x3
-#define WX_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28)
-#define WX_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25)
-#define WX_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF))
+#define WX_SPI_CMD_CMD(_v) FIELD_PREP(GENMASK(30, 28), _v)
+#define WX_SPI_CMD_CLK(_v) FIELD_PREP(GENMASK(27, 25), _v)
+#define WX_SPI_CMD_ADDR(_v) FIELD_PREP(GENMASK(23, 0), _v)
#define WX_SPI_DATA 0x10108
#define WX_SPI_DATA_BYPASS BIT(31)
-#define WX_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16)
#define WX_SPI_DATA_OP_DONE BIT(0)
#define WX_SPI_STATUS 0x1010C
#define WX_SPI_STATUS_OPDONE BIT(0)
@@ -64,21 +66,50 @@
/* port cfg Registers */
#define WX_CFG_PORT_CTL 0x14400
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
+#define WX_CFG_PORT_CTL_QINQ BIT(2)
+#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
+#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
+
+/* GPIO Registers */
+#define WX_GPIO_DR 0x14800
+#define WX_GPIO_DR_0 BIT(0) /* SDP0 Data Value */
+#define WX_GPIO_DR_1 BIT(1) /* SDP1 Data Value */
+#define WX_GPIO_DDR 0x14804
+#define WX_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */
+#define WX_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */
+#define WX_GPIO_CTL 0x14808
+#define WX_GPIO_INTEN 0x14830
+#define WX_GPIO_INTEN_0 BIT(0)
+#define WX_GPIO_INTEN_1 BIT(1)
+#define WX_GPIO_INTMASK 0x14834
+#define WX_GPIO_INTTYPE_LEVEL 0x14838
+#define WX_GPIO_POLARITY 0x1483C
+#define WX_GPIO_EOI 0x1484C
/*********************** Transmit DMA registers **************************/
/* transmit global control */
#define WX_TDM_CTL 0x18000
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
+#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
/***************************** RDB registers *********************************/
/* receive packet buffer */
#define WX_RDB_PB_CTL 0x19000
#define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */
#define WX_RDB_PB_CTL_DISABLED BIT(0)
+#define WX_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4))
+#define WX_RDB_PB_SZ_SHIFT 10
/* statistic */
#define WX_RDB_PFCMACDAL 0x19210
#define WX_RDB_PFCMACDAH 0x19214
+/* ring assignment */
+#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4))
+#define WX_RDB_PL_CFG_L4HDR BIT(1)
+#define WX_RDB_PL_CFG_L3HDR BIT(2)
+#define WX_RDB_PL_CFG_L2HDR BIT(3)
+#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
+#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
/******************************* PSR Registers *******************************/
/* psr control */
@@ -96,10 +127,24 @@
#define WX_PSR_CTL_MO_SHIFT 5
#define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT)
#define WX_PSR_CTL_TPE BIT(4)
+#define WX_PSR_MAX_SZ 0x15020
+#define WX_PSR_VLAN_CTL 0x15088
+#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */
+#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+/* VM L2 contorl */
+#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
+#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */
+#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */
+#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */
+#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */
+#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */
+#define WX_PSR_VM_L2CTL_BAM BIT(11) /* accept broadcast packets */
+#define WX_PSR_VM_L2CTL_MPE BIT(12) /* multicast promiscuous */
+
/* Management */
#define WX_PSR_MNG_FLEX_SEL 0x1582C
#define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16))
@@ -113,14 +158,35 @@
/* mac switcher */
#define WX_PSR_MAC_SWC_AD_L 0x16200
#define WX_PSR_MAC_SWC_AD_H 0x16204
-#define WX_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF))
-#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30)
+#define WX_PSR_MAC_SWC_AD_H_AD(v) FIELD_PREP(U16_MAX, v)
+#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) FIELD_PREP(BIT(30), v)
#define WX_PSR_MAC_SWC_AD_H_AV BIT(31)
#define WX_PSR_MAC_SWC_VM_L 0x16208
#define WX_PSR_MAC_SWC_VM_H 0x1620C
#define WX_PSR_MAC_SWC_IDX 0x16210
#define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU
+/********************************* RSEC **************************************/
+/* general rsec */
+#define WX_RSC_CTL 0x17000
+#define WX_RSC_CTL_SAVE_MAC_ERR BIT(6)
+#define WX_RSC_CTL_CRC_STRIP BIT(2)
+#define WX_RSC_CTL_RX_DIS BIT(1)
+#define WX_RSC_ST 0x17004
+#define WX_RSC_ST_RSEC_RDY BIT(0)
+
+/****************************** TDB ******************************************/
+#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
+#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+
+/****************************** TSEC *****************************************/
+/* Security Control Registers */
+#define WX_TSC_CTL 0x1D000
+#define WX_TSC_CTL_TX_DIS BIT(1)
+#define WX_TSC_CTL_TSEC_DIS BIT(0)
+#define WX_TSC_BUF_AE 0x1D00C
+#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
+
/************************************** MNG ********************************/
#define WX_MNG_SWFW_SYNC 0x1E008
#define WX_MNG_SWFW_SYNC_SW_MB BIT(2)
@@ -133,11 +199,15 @@
/************************************* ETH MAC *****************************/
#define WX_MAC_TX_CFG 0x11000
#define WX_MAC_TX_CFG_TE BIT(0)
+#define WX_MAC_TX_CFG_SPEED_MASK GENMASK(30, 29)
+#define WX_MAC_TX_CFG_SPEED_10G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 0)
+#define WX_MAC_TX_CFG_SPEED_1G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 3)
#define WX_MAC_RX_CFG 0x11004
#define WX_MAC_RX_CFG_RE BIT(0)
#define WX_MAC_RX_CFG_JE BIT(8)
#define WX_MAC_PKT_FLT 0x11008
#define WX_MAC_PKT_FLT_PR BIT(0) /* promiscuous mode */
+#define WX_MAC_WDG_TIMEOUT 0x1100C
#define WX_MAC_RX_FLOW_CTRL 0x11090
#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */
#define WX_MMC_CONTROL 0x11800
@@ -147,10 +217,34 @@
/* Interrupt Registers */
#define WX_BME_CTL 0x12020
#define WX_PX_MISC_IC 0x100
+#define WX_PX_MISC_ICS 0x104
+#define WX_PX_MISC_IEN 0x108
+#define WX_PX_INTA 0x110
+#define WX_PX_GPIE 0x118
+#define WX_PX_GPIE_MODEL BIT(0)
+#define WX_PX_IC 0x120
#define WX_PX_IMS(_i) (0x140 + (_i) * 4)
+#define WX_PX_IMC(_i) (0x150 + (_i) * 4)
+#define WX_PX_ISB_ADDR_L 0x160
+#define WX_PX_ISB_ADDR_H 0x164
#define WX_PX_TRANSACTION_PENDING 0x168
+#define WX_PX_ITRSEL 0x180
+#define WX_PX_ITR(_i) (0x200 + (_i) * 4)
+#define WX_PX_ITR_CNT_WDIS BIT(31)
+#define WX_PX_MISC_IVAR 0x4FC
+#define WX_PX_IVAR(_i) (0x500 + (_i) * 4)
+
+#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+#define WX_7K_ITR 595
+#define WX_12K_ITR 336
+#define WX_SP_MAX_EITR 0x00000FF8U
+#define WX_EM_MAX_EITR 0x00007FFCU
/* transmit DMA Registers */
+#define WX_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40))
+#define WX_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40))
+#define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40))
+#define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40))
#define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40))
/* Transmit Config masks */
#define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */
@@ -160,8 +254,22 @@
#define WX_PX_TR_CFG_THRE_SHIFT 8
/* Receive DMA Registers */
+#define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40))
+#define WX_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40))
+#define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40))
+#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40))
#define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40))
/* PX_RR_CFG bit definitions */
+#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
+#define WX_PX_RR_CFG_RR_THER_SHIFT 16
+#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
+#define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8)
+#define WX_PX_RR_CFG_BHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6)
+ * + at bit 8 offset (<< 12)
+ * = (<< 6)
+ */
+#define WX_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */
+#define WX_PX_RR_CFG_RR_SIZE_SHIFT 1
#define WX_PX_RR_CFG_RR_EN BIT(0)
/* Number of 80 microseconds we wait for PCI Express master disable */
@@ -185,6 +293,50 @@
#define WX_SW_REGION_PTR 0x1C
+#define WX_MAC_STATE_DEFAULT 0x1
+#define WX_MAC_STATE_MODIFIED 0x2
+#define WX_MAC_STATE_IN_USE 0x4
+
+#define WX_MAX_RXD 8192
+#define WX_MAX_TXD 8192
+
+/* Supported Rx Buffer Sizes */
+#define WX_RXBUFFER_256 256 /* Used for skb receive header */
+#define WX_RXBUFFER_2K 2048
+#define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+#if MAX_SKB_FRAGS < 8
+#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024)
+#else
+#define WX_RX_BUFSZ WX_RXBUFFER_2K
+#endif
+
+#define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define WX_MAX_DATA_PER_TXD BIT(14)
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* Ether Types */
+#define WX_ETH_P_CNM 0x22E7
+
+#define WX_CFG_PORT_ST 0x14404
+
+/******************* Receive Descriptor bit definitions **********************/
+#define WX_RXD_STAT_DD BIT(0) /* Done */
+#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */
+
+#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
+
+/*********************** Transmit Descriptor Config Masks ****************/
+#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */
+#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */
+#define WX_TXD_PAYLEN_SHIFT 13 /* Desc PAYLEN shift */
+#define WX_TXD_EOP BIT(24) /* End of Packet */
+#define WX_TXD_IFCS BIT(25) /* Insert FCS */
+#define WX_TXD_RS BIT(27) /* Report Status */
+
/* Host Interface Command Structures */
struct wx_hic_hdr {
u8 cmd;
@@ -249,14 +401,23 @@ enum wx_mac_type {
wx_mac_em
};
+enum em_mac_type {
+ em_mac_type_unknown = 0,
+ em_mac_type_mdi,
+ em_mac_type_rgmii
+};
+
struct wx_mac_info {
enum wx_mac_type type;
bool set_lben;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
+ u32 mta_shadow[128];
s32 mc_filter_type;
u32 mcft_size;
u32 num_rar_entries;
+ u32 rx_pb_size;
+ u32 tx_pb_size;
u32 max_tx_queues;
u32 max_rx_queues;
@@ -284,19 +445,183 @@ struct wx_addr_filter_info {
bool user_set_promisc;
};
+struct wx_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 state; /* bitmask */
+ u64 pools;
+};
+
enum wx_reset_type {
WX_LAN_RESET = 0,
WX_SW_RESET,
WX_GLOBAL_RESET
};
-struct wx_hw {
+struct wx_cb {
+ dma_addr_t dma;
+ u16 append_cnt; /* number of skb's appended */
+ bool page_released;
+ bool dma_released;
+};
+
+#define WX_CB(skb) ((struct wx_cb *)(skb)->cb)
+
+/* Transmit Descriptor */
+union wx_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor */
+union wx_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define WX_RX_DESC(R, i) \
+ (&(((union wx_rx_desc *)((R)->desc))[i]))
+#define WX_TX_DESC(R, i) \
+ (&(((union wx_tx_desc *)((R)->desc))[i]))
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct wx_tx_buffer {
+ union wx_tx_desc *next_to_watch;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct wx_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ dma_addr_t page_dma;
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
+};
+
+struct wx_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+/* iterator for handling rings in ring container */
+#define wx_for_each_ring(posm, headm) \
+ for (posm = (headm).ring; posm; posm = posm->next)
+
+struct wx_ring_container {
+ struct wx_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct wx_ring {
+ struct wx_ring *next; /* pointer to next ring in q_vector */
+ struct wx_q_vector *q_vector; /* backpointer to host q_vector */
+ struct net_device *netdev; /* netdev ring belongs to */
+ struct device *dev; /* device for DMA mapping */
+ struct page_pool *page_pool;
+ void *desc; /* descriptor ring memory */
+ union {
+ struct wx_tx_buffer *tx_buffer_info;
+ struct wx_rx_buffer *rx_buffer_info;
+ };
+ u8 __iomem *tail;
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+
+ u16 count; /* amount of descriptors */
+
+ u8 queue_index; /* needed for multiqueue queue management */
+ u8 reg_idx; /* holds the special value that gets
+ * the hardware register offset
+ * associated with this ring, which is
+ * different for DCB and RSS modes
+ */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
+
+ struct wx_queue_stats stats;
+ struct u64_stats_sync syncp;
+} ____cacheline_internodealigned_in_smp;
+
+struct wx_q_vector {
+ struct wx *wx;
+ int cpu; /* CPU for DCA */
+ int numa_node;
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring
+ */
+ u16 itr; /* Interrupt throttle rate written to EITR */
+ struct wx_ring_container rx, tx;
+ struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
+ char name[IFNAMSIZ + 17];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct wx_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+enum wx_isb_idx {
+ WX_ISB_HEADER,
+ WX_ISB_MISC,
+ WX_ISB_VEC0,
+ WX_ISB_VEC1,
+ WX_ISB_MAX
+};
+
+struct wx {
u8 __iomem *hw_addr;
struct pci_dev *pdev;
+ struct net_device *netdev;
struct wx_bus_info bus;
struct wx_mac_info mac;
+ enum em_mac_type mac_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
+ struct wx_mac_addr *mac_table;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -304,11 +629,63 @@ struct wx_hw {
u8 revision_id;
u16 oem_ssid;
u16 oem_svid;
+ u16 msg_enable;
bool adapter_stopped;
+ u16 tpid[8];
+ char eeprom_id[32];
+ char *driver_name;
enum wx_reset_type reset_type;
+
+ /* PHY stuff */
+ unsigned int link;
+ int speed;
+ int duplex;
+ struct phy_device *phydev;
+
+ bool wol_enabled;
+ bool ncsi_enabled;
+ bool gpio_ctrl;
+
+ /* Tx fast path data */
+ int num_tx_queues;
+ u16 tx_itr_setting;
+ u16 tx_work_limit;
+
+ /* Rx fast path data */
+ int num_rx_queues;
+ u16 rx_itr_setting;
+ u16 rx_work_limit;
+
+ int num_q_vectors; /* current number of q_vectors for device */
+ int max_q_vectors; /* upper limit of q_vectors for device */
+
+ u32 tx_ring_count;
+ u32 rx_ring_count;
+
+ struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
+ struct wx_ring *rx_ring[64];
+ struct wx_q_vector *q_vector[64];
+
+ unsigned int queues_per_pool;
+ struct msix_entry *msix_entries;
+
+ /* misc interrupt status block */
+ dma_addr_t isb_dma;
+ u32 *isb_mem;
+ u32 isb_tag[WX_ISB_MAX];
+
+#define WX_MAX_RETA_ENTRIES 128
+ u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
+
+#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+ u32 *rss_key;
+ u32 wol;
+
+ u16 bd_number;
};
#define WX_INTR_ALL (~0ULL)
+#define WX_INTR_Q(i) BIT(i)
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
@@ -319,23 +696,23 @@ struct wx_hw {
wr32((a), (reg) + ((off) << 2), (val))
static inline u32
-rd32m(struct wx_hw *wxhw, u32 reg, u32 mask)
+rd32m(struct wx *wx, u32 reg, u32 mask)
{
u32 val;
- val = rd32(wxhw, reg);
+ val = rd32(wx, reg);
return val & mask;
}
static inline void
-wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field)
+wr32m(struct wx *wx, u32 reg, u32 mask, u32 field)
{
u32 val;
- val = rd32(wxhw, reg);
+ val = rd32(wx, reg);
val = ((val & ~mask) | (field & mask));
- wr32(wxhw, reg, val);
+ wr32(wx, reg, val);
}
/* On some domestic CPU platforms, sometimes IO is not synchronized with
@@ -343,10 +720,10 @@ wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field)
*/
#define WX_WRITE_FLUSH(H) rd32(H, WX_MIS_PWR)
-#define wx_err(wxhw, fmt, arg...) \
- dev_err(&(wxhw)->pdev->dev, fmt, ##arg)
+#define wx_err(wx, fmt, arg...) \
+ dev_err(&(wx)->pdev->dev, fmt, ##arg)
-#define wx_dbg(wxhw, fmt, arg...) \
- dev_dbg(&(wxhw)->pdev->dev, fmt, ##arg)
+#define wx_dbg(wx, fmt, arg...) \
+ dev_dbg(&(wx)->pdev->dev, fmt, ##arg)
#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile
index 391c2cbc1bb4..61a13d98abe7 100644
--- a/drivers/net/ethernet/wangxun/ngbe/Makefile
+++ b/drivers/net/ethernet/wangxun/ngbe/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_NGBE) += ngbe.o
-ngbe-objs := ngbe_main.o ngbe_hw.o
+ngbe-objs := ngbe_main.o ngbe_hw.o ngbe_mdio.o ngbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
deleted file mode 100644
index af147ca8605c..000000000000
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
-
-#ifndef _NGBE_H_
-#define _NGBE_H_
-
-#include "ngbe_type.h"
-
-#define NGBE_MAX_FDIR_INDICES 7
-
-#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
-#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
-
-#define NGBE_ETH_LENGTH_OF_ADDRESS 6
-#define NGBE_MAX_MSIX_VECTORS 0x09
-#define NGBE_RAR_ENTRIES 32
-
-/* TX/RX descriptor defines */
-#define NGBE_DEFAULT_TXD 512 /* default ring size */
-#define NGBE_DEFAULT_TX_WORK 256
-#define NGBE_MAX_TXD 8192
-#define NGBE_MIN_TXD 128
-
-#define NGBE_DEFAULT_RXD 512 /* default ring size */
-#define NGBE_DEFAULT_RX_WORK 256
-#define NGBE_MAX_RXD 8192
-#define NGBE_MIN_RXD 128
-
-#define NGBE_MAC_STATE_DEFAULT 0x1
-#define NGBE_MAC_STATE_MODIFIED 0x2
-#define NGBE_MAC_STATE_IN_USE 0x4
-
-struct ngbe_mac_addr {
- u8 addr[ETH_ALEN];
- u16 state; /* bitmask */
- u64 pools;
-};
-
-/* board specific private data structure */
-struct ngbe_adapter {
- u8 __iomem *io_addr; /* Mainly for iounmap use */
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- /* structs defined in ngbe_hw.h */
- struct ngbe_hw hw;
- struct ngbe_mac_addr *mac_table;
- u16 msg_enable;
-
- /* Tx fast path data */
- int num_tx_queues;
- u16 tx_itr_setting;
- u16 tx_work_limit;
-
- /* Rx fast path data */
- int num_rx_queues;
- u16 rx_itr_setting;
- u16 rx_work_limit;
-
- int num_q_vectors; /* current number of q_vectors for device */
- int max_q_vectors; /* upper limit of q_vectors for device */
-
- u32 tx_ring_count;
- u32 rx_ring_count;
-
-#define NGBE_MAX_RETA_ENTRIES 128
- u8 rss_indir_tbl[NGBE_MAX_RETA_ENTRIES];
-
-#define NGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
- u32 *rss_key;
- u32 wol;
-
- u16 bd_number;
-};
-
-extern char ngbe_driver_name[];
-
-#endif /* _NGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
new file mode 100644
index 000000000000..5b25834baf38
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#include "../libwx/wx_ethtool.h"
+#include "ngbe_ethtool.h"
+
+static const struct ethtool_ops ngbe_ethtool_ops = {
+ .get_drvinfo = wx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+};
+
+void ngbe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ngbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h
new file mode 100644
index 000000000000..487074e0eeec
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBE_ETHTOOL_H_
+#define _NGBE_ETHTOOL_H_
+
+void ngbe_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _NGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
index 0e3923b3737e..6562a2de9527 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
@@ -9,12 +9,10 @@
#include "../libwx/wx_hw.h"
#include "ngbe_type.h"
#include "ngbe_hw.h"
-#include "ngbe.h"
-int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw)
+int ngbe_eeprom_chksum_hostif(struct wx *wx)
{
struct wx_hic_read_shadow_ram buffer;
- struct wx_hw *wxhw = &hw->wxhw;
int status;
int tmp;
@@ -27,61 +25,73 @@ int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw)
/* one word */
buffer.length = 0;
- status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer),
+ status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
WX_HI_COMMAND_TIMEOUT, false);
if (status < 0)
return status;
- tmp = rd32a(wxhw, WX_MNG_MBOX, 1);
+ tmp = rd32a(wx, WX_MNG_MBOX, 1);
if (tmp == NGBE_FW_CMD_ST_PASS)
return 0;
return -EIO;
}
-static int ngbe_reset_misc(struct ngbe_hw *hw)
+static int ngbe_reset_misc(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
-
- wx_reset_misc(wxhw);
- if (hw->mac_type == ngbe_mac_type_rgmii)
- wr32(wxhw, NGBE_MDIO_CLAUSE_SELECT, 0xF);
- if (hw->gpio_ctrl) {
+ wx_reset_misc(wx);
+ if (wx->gpio_ctrl) {
/* gpio0 is used to power on/off control*/
- wr32(wxhw, NGBE_GPIO_DDR, 0x1);
- wr32(wxhw, NGBE_GPIO_DR, NGBE_GPIO_DR_0);
+ wr32(wx, NGBE_GPIO_DDR, 0x1);
+ ngbe_sfp_modules_txrx_powerctl(wx, false);
}
return 0;
}
+void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi)
+{
+ /* gpio0 is used to power on control . 0 is on */
+ wr32(wx, NGBE_GPIO_DR, swi ? 0 : NGBE_GPIO_DR_0);
+}
+
/**
* ngbe_reset_hw - Perform hardware reset
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-int ngbe_reset_hw(struct ngbe_hw *hw)
+int ngbe_reset_hw(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
- int status = 0;
- u32 reset = 0;
+ u32 val = 0;
+ int ret = 0;
- /* Call adapter stop to disable tx/rx and clear interrupts */
- status = wx_stop_adapter(wxhw);
- if (status != 0)
- return status;
- reset = WX_MIS_RST_LAN_RST(wxhw->bus.func);
- wr32(wxhw, WX_MIS_RST, reset | rd32(wxhw, WX_MIS_RST));
- ngbe_reset_misc(hw);
+ /* Call wx stop to disable tx/rx and clear interrupts */
+ ret = wx_stop_adapter(wx);
+ if (ret != 0)
+ return ret;
+
+ if (wx->mac_type != em_mac_type_mdi) {
+ val = WX_MIS_RST_LAN_RST(wx->bus.func);
+ wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST));
+
+ ret = read_poll_timeout(rd32, val,
+ !(val & (BIT(9) << wx->bus.func)), 1000,
+ 100000, false, wx, 0x10028);
+ if (ret) {
+ wx_err(wx, "Lan reset exceed s maximum times.\n");
+ return ret;
+ }
+ }
+ ngbe_reset_misc(wx);
/* Store the permanent mac address */
- wx_get_mac_addr(wxhw, wxhw->mac.perm_addr);
+ wx_get_mac_addr(wx, wx->mac.perm_addr);
/* reset num_rar_entries to 128 */
- wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES;
- wx_init_rx_addrs(wxhw);
- pci_set_master(wxhw->pdev);
+ wx->mac.num_rar_entries = NGBE_RAR_ENTRIES;
+ wx_init_rx_addrs(wx);
+ pci_set_master(wx->pdev);
return 0;
}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h
index 42476a3fe57c..a4693e006816 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h
@@ -7,6 +7,7 @@
#ifndef _NGBE_HW_H_
#define _NGBE_HW_H_
-int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw);
-int ngbe_reset_hw(struct ngbe_hw *hw);
+int ngbe_eeprom_chksum_hostif(struct wx *wx);
+void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi);
+int ngbe_reset_hw(struct wx *wx);
#endif /* _NGBE_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index f0b24366da18..5b564d348c09 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -9,12 +9,16 @@
#include <linux/aer.h>
#include <linux/etherdevice.h>
#include <net/ip.h>
+#include <linux/phy.h>
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_lib.h"
#include "ngbe_type.h"
+#include "ngbe_mdio.h"
#include "ngbe_hw.h"
-#include "ngbe.h"
+#include "ngbe_ethtool.h"
+
char ngbe_driver_name[] = "ngbe";
/* ngbe_pci_tbl - PCI Device ID Table
@@ -39,70 +43,27 @@ static const struct pci_device_id ngbe_pci_tbl[] = {
{ .device = 0 }
};
-static void ngbe_mac_set_default_filter(struct ngbe_adapter *adapter, u8 *addr)
-{
- struct ngbe_hw *hw = &adapter->hw;
-
- memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
- adapter->mac_table[0].pools = 1ULL;
- adapter->mac_table[0].state = (NGBE_MAC_STATE_DEFAULT |
- NGBE_MAC_STATE_IN_USE);
- wx_set_rar(&hw->wxhw, 0, adapter->mac_table[0].addr,
- adapter->mac_table[0].pools,
- WX_PSR_MAC_SWC_AD_H_AV);
-}
-
/**
* ngbe_init_type_code - Initialize the shared code
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
**/
-static void ngbe_init_type_code(struct ngbe_hw *hw)
+static void ngbe_init_type_code(struct wx *wx)
{
int wol_mask = 0, ncsi_mask = 0;
- struct wx_hw *wxhw = &hw->wxhw;
- u16 type_mask = 0;
+ u16 type_mask = 0, val;
- wxhw->mac.type = wx_mac_em;
- type_mask = (u16)(wxhw->subsystem_device_id & NGBE_OEM_MASK);
- ncsi_mask = wxhw->subsystem_device_id & NGBE_NCSI_MASK;
- wol_mask = wxhw->subsystem_device_id & NGBE_WOL_MASK;
-
- switch (type_mask) {
- case NGBE_SUBID_M88E1512_SFP:
- case NGBE_SUBID_LY_M88E1512_SFP:
- hw->phy.type = ngbe_phy_m88e1512_sfi;
- break;
- case NGBE_SUBID_M88E1512_RJ45:
- hw->phy.type = ngbe_phy_m88e1512;
- break;
- case NGBE_SUBID_M88E1512_MIX:
- hw->phy.type = ngbe_phy_m88e1512_unknown;
- break;
- case NGBE_SUBID_YT8521S_SFP:
- case NGBE_SUBID_YT8521S_SFP_GPIO:
- case NGBE_SUBID_LY_YT8521S_SFP:
- hw->phy.type = ngbe_phy_yt8521s_sfi;
- break;
- case NGBE_SUBID_INTERNAL_YT8521S_SFP:
- case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO:
- hw->phy.type = ngbe_phy_internal_yt8521s_sfi;
- break;
- case NGBE_SUBID_RGMII_FPGA:
- case NGBE_SUBID_OCP_CARD:
- fallthrough;
- default:
- hw->phy.type = ngbe_phy_internal;
- break;
- }
+ wx->mac.type = wx_mac_em;
+ type_mask = (u16)(wx->subsystem_device_id & NGBE_OEM_MASK);
+ ncsi_mask = wx->subsystem_device_id & NGBE_NCSI_MASK;
+ wol_mask = wx->subsystem_device_id & NGBE_WOL_MASK;
- if (hw->phy.type == ngbe_phy_internal ||
- hw->phy.type == ngbe_phy_internal_yt8521s_sfi)
- hw->mac_type = ngbe_mac_type_mdi;
- else
- hw->mac_type = ngbe_mac_type_rgmii;
+ val = rd32(wx, WX_CFG_PORT_ST);
+ wx->mac_type = (val & BIT(7)) >> 7 ?
+ em_mac_type_rgmii :
+ em_mac_type_mdi;
- hw->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
- hw->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK ||
+ wx->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
+ wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK ||
type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0;
switch (type_mask) {
@@ -110,31 +71,31 @@ static void ngbe_init_type_code(struct ngbe_hw *hw)
case NGBE_SUBID_LY_M88E1512_SFP:
case NGBE_SUBID_YT8521S_SFP_GPIO:
case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO:
- hw->gpio_ctrl = 1;
+ wx->gpio_ctrl = 1;
break;
default:
- hw->gpio_ctrl = 0;
+ wx->gpio_ctrl = 0;
break;
}
}
/**
- * ngbe_init_rss_key - Initialize adapter RSS key
- * @adapter: device handle
+ * ngbe_init_rss_key - Initialize wx RSS key
+ * @wx: device handle
*
* Allocates and initializes the RSS key if it is not allocated.
**/
-static inline int ngbe_init_rss_key(struct ngbe_adapter *adapter)
+static inline int ngbe_init_rss_key(struct wx *wx)
{
u32 *rss_key;
- if (!adapter->rss_key) {
- rss_key = kzalloc(NGBE_RSS_KEY_SIZE, GFP_KERNEL);
+ if (!wx->rss_key) {
+ rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
if (unlikely(!rss_key))
return -ENOMEM;
- netdev_rss_key_fill(rss_key, NGBE_RSS_KEY_SIZE);
- adapter->rss_key = rss_key;
+ netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
+ wx->rss_key = rss_key;
}
return 0;
@@ -142,72 +103,263 @@ static inline int ngbe_init_rss_key(struct ngbe_adapter *adapter)
/**
* ngbe_sw_init - Initialize general software structures
- * @adapter: board private structure to initialize
+ * @wx: board private structure to initialize
**/
-static int ngbe_sw_init(struct ngbe_adapter *adapter)
+static int ngbe_sw_init(struct wx *wx)
{
- struct pci_dev *pdev = adapter->pdev;
- struct ngbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ struct pci_dev *pdev = wx->pdev;
u16 msix_count = 0;
int err = 0;
- wxhw->hw_addr = adapter->io_addr;
- wxhw->pdev = pdev;
+ wx->mac.num_rar_entries = NGBE_RAR_ENTRIES;
+ wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES;
+ wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES;
+ wx->mac.mcft_size = NGBE_MC_TBL_SIZE;
+ wx->mac.rx_pb_size = NGBE_RX_PB_SIZE;
+ wx->mac.tx_pb_size = NGBE_TDB_PB_SZ;
/* PCI config space info */
- err = wx_sw_init(wxhw);
+ err = wx_sw_init(wx);
if (err < 0) {
- netif_err(adapter, probe, adapter->netdev,
- "Read of internal subsystem device id failed\n");
+ wx_err(wx, "read of internal subsystem device id failed\n");
return err;
}
/* mac type, phy type , oem type */
- ngbe_init_type_code(hw);
+ ngbe_init_type_code(wx);
- wxhw->mac.max_rx_queues = NGBE_MAX_RX_QUEUES;
- wxhw->mac.max_tx_queues = NGBE_MAX_TX_QUEUES;
- wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES;
/* Set common capability flags and settings */
- adapter->max_q_vectors = NGBE_MAX_MSIX_VECTORS;
-
- err = wx_get_pcie_msix_counts(wxhw, &msix_count, NGBE_MAX_MSIX_VECTORS);
+ wx->max_q_vectors = NGBE_MAX_MSIX_VECTORS;
+ err = wx_get_pcie_msix_counts(wx, &msix_count, NGBE_MAX_MSIX_VECTORS);
if (err)
dev_err(&pdev->dev, "Do not support MSI-X\n");
- wxhw->mac.max_msix_vectors = msix_count;
+ wx->mac.max_msix_vectors = msix_count;
- adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries,
- sizeof(struct ngbe_mac_addr),
- GFP_KERNEL);
- if (!adapter->mac_table) {
- dev_err(&pdev->dev, "mac_table allocation failed: %d\n", err);
- return -ENOMEM;
- }
-
- if (ngbe_init_rss_key(adapter))
+ if (ngbe_init_rss_key(wx))
return -ENOMEM;
/* enable itr by default in dynamic mode */
- adapter->rx_itr_setting = 1;
- adapter->tx_itr_setting = 1;
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
/* set default ring sizes */
- adapter->tx_ring_count = NGBE_DEFAULT_TXD;
- adapter->rx_ring_count = NGBE_DEFAULT_RXD;
+ wx->tx_ring_count = NGBE_DEFAULT_TXD;
+ wx->rx_ring_count = NGBE_DEFAULT_RXD;
/* set default work limits */
- adapter->tx_work_limit = NGBE_DEFAULT_TX_WORK;
- adapter->rx_work_limit = NGBE_DEFAULT_RX_WORK;
+ wx->tx_work_limit = NGBE_DEFAULT_TX_WORK;
+ wx->rx_work_limit = NGBE_DEFAULT_RX_WORK;
return 0;
}
-static void ngbe_down(struct ngbe_adapter *adapter)
+/**
+ * ngbe_irq_enable - Enable default interrupt generation settings
+ * @wx: board private structure
+ * @queues: enable all queues interrupts
+ **/
+static void ngbe_irq_enable(struct wx *wx, bool queues)
{
- netif_carrier_off(adapter->netdev);
- netif_tx_disable(adapter->netdev);
-};
+ u32 mask;
+
+ /* enable misc interrupt */
+ mask = NGBE_PX_MISC_IEN_MASK;
+
+ wr32(wx, WX_GPIO_DDR, WX_GPIO_DDR_0);
+ wr32(wx, WX_GPIO_INTEN, WX_GPIO_INTEN_0 | WX_GPIO_INTEN_1);
+ wr32(wx, WX_GPIO_INTTYPE_LEVEL, 0x0);
+ wr32(wx, WX_GPIO_POLARITY, wx->gpio_ctrl ? 0 : 0x3);
+
+ wr32(wx, WX_PX_MISC_IEN, mask);
+
+ /* mask interrupt */
+ if (queues)
+ wx_intr_enable(wx, NGBE_INTR_ALL);
+ else
+ wx_intr_enable(wx, NGBE_INTR_MISC(wx));
+}
+
+/**
+ * ngbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the EICR read.
+ */
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+
+ /* re-enable the original interrupt state, no lsc, no queues */
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ngbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * ngbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ngbe_request_msix_irqs(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ err = request_irq(wx->msix_entries[vector].vector,
+ ngbe_msix_other, 0, netdev->name, wx);
+
+ if (err) {
+ wx_err(wx, "request_irq for msix_other failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+
+/**
+ * ngbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ngbe_request_irq(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ if (pdev->msix_enabled)
+ err = ngbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(pdev->irq, ngbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(pdev->irq, ngbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static void ngbe_disable_device(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ u32 i;
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ wx_disable_rx_queue(wx, wx->rx_ring[i]);
+ /* disable receives */
+ wx_disable_rx(wx);
+ wx_napi_disable_all(wx);
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+ if (wx->gpio_ctrl)
+ ngbe_sfp_modules_txrx_powerctl(wx, false);
+ wx_irq_disable(wx);
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ u8 reg_idx = wx->tx_ring[i]->reg_idx;
+
+ wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
+ }
+}
+
+static void ngbe_down(struct wx *wx)
+{
+ phy_stop(wx->phydev);
+ ngbe_disable_device(wx);
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
+}
+
+static void ngbe_up(struct wx *wx)
+{
+ wx_configure_vectors(wx);
+
+ /* make sure to complete pre-operations */
+ smp_mb__before_atomic();
+ wx_napi_enable_all(wx);
+ /* enable transmits */
+ netif_tx_start_all_queues(wx->netdev);
+
+ /* clear any pending interrupts, may auto mask */
+ rd32(wx, WX_PX_IC);
+ rd32(wx, WX_PX_MISC_IC);
+ ngbe_irq_enable(wx, true);
+ if (wx->gpio_ctrl)
+ ngbe_sfp_modules_txrx_powerctl(wx, true);
+
+ phy_start(wx->phydev);
+}
/**
* ngbe_open - Called when a network interface is made active
@@ -220,13 +372,43 @@ static void ngbe_down(struct ngbe_adapter *adapter)
**/
static int ngbe_open(struct net_device *netdev)
{
- struct ngbe_adapter *adapter = netdev_priv(netdev);
- struct ngbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ struct wx *wx = netdev_priv(netdev);
+ int err;
+
+ wx_control_hw(wx, true);
+
+ err = wx_setup_resources(wx);
+ if (err)
+ return err;
+
+ wx_configure(wx);
+
+ err = ngbe_request_irq(wx);
+ if (err)
+ goto err_free_resources;
+
+ err = ngbe_phy_connect(wx);
+ if (err)
+ goto err_free_irq;
+
+ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ if (err)
+ goto err_dis_phy;
+
+ err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
+ if (err)
+ goto err_dis_phy;
- wx_control_hw(wxhw, true);
+ ngbe_up(wx);
return 0;
+err_dis_phy:
+ phy_disconnect(wx->phydev);
+err_free_irq:
+ wx_free_irq(wx);
+err_free_resources:
+ wx_free_resources(wx);
+ return err;
}
/**
@@ -242,66 +424,40 @@ static int ngbe_open(struct net_device *netdev)
**/
static int ngbe_close(struct net_device *netdev)
{
- struct ngbe_adapter *adapter = netdev_priv(netdev);
-
- ngbe_down(adapter);
- wx_control_hw(&adapter->hw.wxhw, false);
-
- return 0;
-}
-
-static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
-{
- return NETDEV_TX_OK;
-}
-
-/**
- * ngbe_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int ngbe_set_mac(struct net_device *netdev, void *p)
-{
- struct ngbe_adapter *adapter = netdev_priv(netdev);
- struct wx_hw *wxhw = &adapter->hw.wxhw;
- struct sockaddr *addr = p;
+ struct wx *wx = netdev_priv(netdev);
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
- memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len);
-
- ngbe_mac_set_default_filter(adapter, wxhw->mac.addr);
+ ngbe_down(wx);
+ wx_free_irq(wx);
+ wx_free_resources(wx);
+ phy_disconnect(wx->phydev);
+ wx_control_hw(wx, false);
return 0;
}
static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
- struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
+ struct wx *wx = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+ netdev = wx->netdev;
netif_device_detach(netdev);
rtnl_lock();
if (netif_running(netdev))
- ngbe_down(adapter);
+ ngbe_down(wx);
rtnl_unlock();
- wx_control_hw(&adapter->hw.wxhw, false);
+ wx_control_hw(wx, false);
pci_disable_device(pdev);
}
static void ngbe_shutdown(struct pci_dev *pdev)
{
- struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct wx *wx = pci_get_drvdata(pdev);
bool wake;
- wake = !!adapter->wol;
+ wake = !!wx->wol;
ngbe_dev_shutdown(pdev, &wake);
@@ -314,9 +470,11 @@ static void ngbe_shutdown(struct pci_dev *pdev)
static const struct net_device_ops ngbe_netdev_ops = {
.ndo_open = ngbe_open,
.ndo_stop = ngbe_close,
- .ndo_start_xmit = ngbe_xmit_frame,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_set_rx_mode = wx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = ngbe_set_mac,
+ .ndo_set_mac_address = wx_set_mac,
+ .ndo_get_stats64 = wx_get_stats64,
};
/**
@@ -326,18 +484,16 @@ static const struct net_device_ops ngbe_netdev_ops = {
*
* Returns 0 on success, negative on failure
*
- * ngbe_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
+ * ngbe_probe initializes an wx identified by a pci_dev structure.
+ * The OS initialization, configuring of the wx private structure,
* and a hardware reset occur.
**/
static int ngbe_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
- struct ngbe_adapter *adapter = NULL;
- struct ngbe_hw *hw = NULL;
- struct wx_hw *wxhw = NULL;
struct net_device *netdev;
u32 e2rom_cksum_cap = 0;
+ struct wx *wx = NULL;
static int func_nums;
u16 e2rom_ver = 0;
u32 etrack_id = 0;
@@ -368,7 +524,7 @@ static int ngbe_probe(struct pci_dev *pdev,
pci_set_master(pdev);
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
- sizeof(struct ngbe_adapter),
+ sizeof(struct wx),
NGBE_MAX_TX_QUEUES,
NGBE_MAX_RX_QUEUES);
if (!netdev) {
@@ -378,63 +534,74 @@ static int ngbe_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- hw = &adapter->hw;
- wxhw = &hw->wxhw;
- adapter->msg_enable = BIT(3) - 1;
-
- adapter->io_addr = devm_ioremap(&pdev->dev,
- pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!adapter->io_addr) {
+ wx = netdev_priv(netdev);
+ wx->netdev = netdev;
+ wx->pdev = pdev;
+ wx->msg_enable = BIT(3) - 1;
+
+ wx->hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!wx->hw_addr) {
err = -EIO;
goto err_pci_release_regions;
}
+ wx->driver_name = ngbe_driver_name;
+ ngbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &ngbe_netdev_ops;
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = NETIF_F_SG;
- adapter->bd_number = func_nums;
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features |
+ NETIF_F_RXALL;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+
+ wx->bd_number = func_nums;
/* setup the private structure */
- err = ngbe_sw_init(adapter);
+ err = ngbe_sw_init(wx);
if (err)
goto err_free_mac_table;
/* check if flash load is done after hw power up */
- err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PERST);
+ err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST);
if (err)
goto err_free_mac_table;
- err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PWRRST);
+ err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PWRRST);
if (err)
goto err_free_mac_table;
- err = wx_mng_present(wxhw);
+ err = wx_mng_present(wx);
if (err) {
dev_err(&pdev->dev, "Management capability is not present\n");
goto err_free_mac_table;
}
- err = ngbe_reset_hw(hw);
+ err = ngbe_reset_hw(wx);
if (err) {
dev_err(&pdev->dev, "HW Init failed: %d\n", err);
goto err_free_mac_table;
}
- if (wxhw->bus.func == 0) {
- wr32(wxhw, NGBE_CALSUM_CAP_STATUS, 0x0);
- wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, 0x0);
+ if (wx->bus.func == 0) {
+ wr32(wx, NGBE_CALSUM_CAP_STATUS, 0x0);
+ wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, 0x0);
} else {
- e2rom_cksum_cap = rd32(wxhw, NGBE_CALSUM_CAP_STATUS);
- saved_ver = rd32(wxhw, NGBE_EEPROM_VERSION_STORE_REG);
+ e2rom_cksum_cap = rd32(wx, NGBE_CALSUM_CAP_STATUS);
+ saved_ver = rd32(wx, NGBE_EEPROM_VERSION_STORE_REG);
}
- wx_init_eeprom_params(wxhw);
- if (wxhw->bus.func == 0 || e2rom_cksum_cap == 0) {
+ wx_init_eeprom_params(wx);
+ if (wx->bus.func == 0 || e2rom_cksum_cap == 0) {
/* make sure the EEPROM is ready */
- err = ngbe_eeprom_chksum_hostif(hw);
+ err = ngbe_eeprom_chksum_hostif(wx);
if (err) {
dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
@@ -442,14 +609,14 @@ static int ngbe_probe(struct pci_dev *pdev,
}
}
- adapter->wol = 0;
- if (hw->wol_enabled)
- adapter->wol = NGBE_PSR_WKUP_CTL_MAG;
+ wx->wol = 0;
+ if (wx->wol_enabled)
+ wx->wol = NGBE_PSR_WKUP_CTL_MAG;
- hw->wol_enabled = !!(adapter->wol);
- wr32(wxhw, NGBE_PSR_WKUP_CTL, adapter->wol);
+ wx->wol_enabled = !!(wx->wol);
+ wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol);
- device_set_wakeup_enable(&pdev->dev, adapter->wol);
+ device_set_wakeup_enable(&pdev->dev, wx->wol);
/* Save off EEPROM version number and Option Rom version which
* together make a unique identify for the eeprom
@@ -457,37 +624,50 @@ static int ngbe_probe(struct pci_dev *pdev,
if (saved_ver) {
etrack_id = saved_ver;
} else {
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H,
&e2rom_ver);
etrack_id = e2rom_ver << 16;
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L,
&e2rom_ver);
etrack_id |= e2rom_ver;
- wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id);
+ wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, etrack_id);
}
+ snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
+ "0x%08x", etrack_id);
+
+ eth_hw_addr_set(netdev, wx->mac.perm_addr);
+ wx_mac_set_default_filter(wx, wx->mac.perm_addr);
- eth_hw_addr_set(netdev, wxhw->mac.perm_addr);
- ngbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr);
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_mac_table;
+
+ /* phy Interface Configuration */
+ err = ngbe_mdio_init(wx);
+ if (err)
+ goto err_clear_interrupt_scheme;
err = register_netdev(netdev);
if (err)
goto err_register;
- pci_set_drvdata(pdev, adapter);
+ pci_set_drvdata(pdev, wx);
- netif_info(adapter, probe, netdev,
+ netif_info(wx, probe, netdev,
"PHY: %s, PBA No: Wang Xun GbE Family Controller\n",
- hw->phy.type == ngbe_phy_internal ? "Internal" : "External");
- netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr);
+ wx->mac_type == em_mac_type_mdi ? "Internal" : "External");
+ netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr);
return 0;
err_register:
- wx_control_hw(wxhw, false);
+ wx_control_hw(wx, false);
+err_clear_interrupt_scheme:
+ wx_clear_interrupt_scheme(wx);
err_free_mac_table:
- kfree(adapter->mac_table);
+ kfree(wx->mac_table);
err_pci_release_regions:
pci_disable_pcie_error_reporting(pdev);
pci_release_selected_regions(pdev,
@@ -508,15 +688,16 @@ err_pci_disable_dev:
**/
static void ngbe_remove(struct pci_dev *pdev)
{
- struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct wx *wx = pci_get_drvdata(pdev);
struct net_device *netdev;
- netdev = adapter->netdev;
+ netdev = wx->netdev;
unregister_netdev(netdev);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
- kfree(adapter->mac_table);
+ kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
new file mode 100644
index 000000000000..c9ddbbc3fa4f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/ethtool.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "ngbe_type.h"
+#include "ngbe_mdio.h"
+
+static int ngbe_phy_read_reg_internal(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct wx *wx = bus->priv;
+
+ if (phy_addr != 0)
+ return 0xffff;
+ return (u16)rd32(wx, NGBE_PHY_CONFIG(regnum));
+}
+
+static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+
+ if (phy_addr == 0)
+ wr32(wx, NGBE_PHY_CONFIG(regnum), value);
+ return 0;
+}
+
+static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ u32 command, val, device_type = 0;
+ struct wx *wx = bus->priv;
+ int ret;
+
+ wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
+ /* setup and write the address cycle command */
+ command = NGBE_MSCA_RA(regnum) |
+ NGBE_MSCA_PA(phy_addr) |
+ NGBE_MSCA_DA(device_type);
+ wr32(wx, NGBE_MSCA, command);
+ command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) |
+ NGBE_MSCC_BUSY |
+ NGBE_MDIO_CLK(6);
+ wr32(wx, NGBE_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
+ 100000, false, wx, NGBE_MSCC);
+ if (ret) {
+ wx_err(wx, "Mdio read c22 command did not complete.\n");
+ return ret;
+ }
+
+ return (u16)rd32(wx, NGBE_MSCC);
+}
+
+static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ u32 command, val, device_type = 0;
+ struct wx *wx = bus->priv;
+ int ret;
+
+ wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
+ /* setup and write the address cycle command */
+ command = NGBE_MSCA_RA(regnum) |
+ NGBE_MSCA_PA(phy_addr) |
+ NGBE_MSCA_DA(device_type);
+ wr32(wx, NGBE_MSCA, command);
+ command = value |
+ NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) |
+ NGBE_MSCC_BUSY |
+ NGBE_MDIO_CLK(6);
+ wr32(wx, NGBE_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
+ 100000, false, wx, NGBE_MSCC);
+ if (ret)
+ wx_err(wx, "Mdio write c22 command did not complete.\n");
+
+ return ret;
+}
+
+static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
+{
+ struct wx *wx = bus->priv;
+ u32 val, command;
+ int ret;
+
+ wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
+ /* setup and write the address cycle command */
+ command = NGBE_MSCA_RA(regnum) |
+ NGBE_MSCA_PA(phy_addr) |
+ NGBE_MSCA_DA(devnum);
+ wr32(wx, NGBE_MSCA, command);
+ command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) |
+ NGBE_MSCC_BUSY |
+ NGBE_MDIO_CLK(6);
+ wr32(wx, NGBE_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
+ 100000, false, wx, NGBE_MSCC);
+ if (ret) {
+ wx_err(wx, "Mdio read c45 command did not complete.\n");
+ return ret;
+ }
+
+ return (u16)rd32(wx, NGBE_MSCC);
+}
+
+static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+ int ret, command;
+ u16 val;
+
+ wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
+ /* setup and write the address cycle command */
+ command = NGBE_MSCA_RA(regnum) |
+ NGBE_MSCA_PA(phy_addr) |
+ NGBE_MSCA_DA(devnum);
+ wr32(wx, NGBE_MSCA, command);
+ command = value |
+ NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) |
+ NGBE_MSCC_BUSY |
+ NGBE_MDIO_CLK(6);
+ wr32(wx, NGBE_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
+ 100000, false, wx, NGBE_MSCC);
+ if (ret)
+ wx_err(wx, "Mdio write c45 command did not complete.\n");
+
+ return ret;
+}
+
+static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct wx *wx = bus->priv;
+ u16 phy_data;
+
+ if (wx->mac_type == em_mac_type_mdi)
+ phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum);
+ else
+ phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum);
+
+ return phy_data;
+}
+
+static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr,
+ int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+ int ret;
+
+ if (wx->mac_type == em_mac_type_mdi)
+ ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value);
+ else
+ ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value);
+
+ return ret;
+}
+
+static void ngbe_handle_link_change(struct net_device *dev)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct phy_device *phydev;
+ u32 lan_speed, reg;
+
+ phydev = wx->phydev;
+ if (!(wx->link != phydev->link ||
+ wx->speed != phydev->speed ||
+ wx->duplex != phydev->duplex))
+ return;
+
+ wx->link = phydev->link;
+ wx->speed = phydev->speed;
+ wx->duplex = phydev->duplex;
+ switch (phydev->speed) {
+ case SPEED_10:
+ lan_speed = 0;
+ break;
+ case SPEED_100:
+ lan_speed = 1;
+ break;
+ case SPEED_1000:
+ default:
+ lan_speed = 2;
+ break;
+ }
+ wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed);
+
+ if (phydev->link) {
+ reg = rd32(wx, WX_MAC_TX_CFG);
+ reg &= ~WX_MAC_TX_CFG_SPEED_MASK;
+ reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE;
+ wr32(wx, WX_MAC_TX_CFG, reg);
+ /* Re configure MAC RX */
+ reg = rd32(wx, WX_MAC_RX_CFG);
+ wr32(wx, WX_MAC_RX_CFG, reg);
+ wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+ reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+ }
+ phy_print_status(phydev);
+}
+
+int ngbe_phy_connect(struct wx *wx)
+{
+ int ret;
+
+ ret = phy_connect_direct(wx->netdev,
+ wx->phydev,
+ ngbe_handle_link_change,
+ PHY_INTERFACE_MODE_RGMII_ID);
+ if (ret) {
+ wx_err(wx, "PHY connect failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ngbe_phy_fixup(struct wx *wx)
+{
+ struct phy_device *phydev = wx->phydev;
+ struct ethtool_eee eee;
+
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ if (wx->mac_type != em_mac_type_mdi)
+ return;
+ /* disable EEE, internal phy does not support eee */
+ memset(&eee, 0, sizeof(eee));
+ phy_ethtool_set_eee(phydev, &eee);
+}
+
+int ngbe_mdio_init(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "ngbe_mii_bus";
+ mii_bus->read = ngbe_phy_read_reg_c22;
+ mii_bus->write = ngbe_phy_write_reg_c22;
+ mii_bus->phy_mask = GENMASK(31, 4);
+ mii_bus->parent = &pdev->dev;
+ mii_bus->priv = wx;
+
+ if (wx->mac_type == em_mac_type_rgmii) {
+ mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45;
+ mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45;
+ }
+
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x",
+ (pdev->bus->number << 8) | pdev->devfn);
+ ret = devm_mdiobus_register(&pdev->dev, mii_bus);
+ if (ret)
+ return ret;
+
+ wx->phydev = phy_find_first(mii_bus);
+ if (!wx->phydev)
+ return -ENODEV;
+
+ phy_attached_info(wx->phydev);
+ ngbe_phy_fixup(wx);
+
+ wx->link = 0;
+ wx->speed = 0;
+ wx->duplex = 0;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
new file mode 100644
index 000000000000..0a6400dd89c4
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * WangXun Gigabit PCI Express Linux driver
+ * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+ */
+
+#ifndef _NGBE_MDIO_H_
+#define _NGBE_MDIO_H_
+
+int ngbe_phy_connect(struct wx *wx);
+int ngbe_mdio_init(struct wx *wx);
+#endif /* _NGBE_MDIO_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index 39f6c03f1a54..a2351349785e 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -49,7 +49,6 @@
#define NGBE_SPI_ILDR_STATUS 0x10120
#define NGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */
#define NGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */
-#define NGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */
/* Checksum and EEPROM pointers */
#define NGBE_CALSUM_COMMAND 0xE9
@@ -60,6 +59,25 @@
#define NGBE_EEPROM_VERSION_L 0x1D
#define NGBE_EEPROM_VERSION_H 0x1E
+/* mdio access */
+#define NGBE_MSCA 0x11200
+#define NGBE_MSCA_RA(v) FIELD_PREP(U16_MAX, v)
+#define NGBE_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v)
+#define NGBE_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v)
+#define NGBE_MSCC 0x11204
+#define NGBE_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v)
+
+enum NGBE_MSCA_CMD_value {
+ NGBE_MSCA_CMD_RSV = 0,
+ NGBE_MSCA_CMD_WRITE,
+ NGBE_MSCA_CMD_POST_READ,
+ NGBE_MSCA_CMD_READ,
+};
+
+#define NGBE_MSCC_SADDR BIT(18)
+#define NGBE_MSCC_BUSY BIT(22)
+#define NGBE_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v)
+
/* Media-dependent registers. */
#define NGBE_MDIO_CLAUSE_SELECT 0x11220
@@ -72,6 +90,24 @@
#define NGBE_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */
#define NGBE_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */
+/* Extended Interrupt Enable Set */
+#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
+#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
+#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
+#define NGBE_PX_MISC_IEN_GPIO BIT(26)
+#define NGBE_PX_MISC_IEN_MASK ( \
+ NGBE_PX_MISC_IEN_DEV_RST | \
+ NGBE_PX_MISC_IEN_ETH_LK | \
+ NGBE_PX_MISC_IEN_INT_ERR | \
+ NGBE_PX_MISC_IEN_GPIO)
+
+#define NGBE_INTR_ALL 0x1FF
+#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+
+#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
+#define NGBE_CFG_LAN_SPEED 0x14440
+#define NGBE_CFG_PORT_ST 0x14404
+
/* Wake up registers */
#define NGBE_PSR_WKUP_CTL 0x15B80
/* Wake Up Filter Control Bit */
@@ -90,50 +126,30 @@
#define NGBE_FW_CMD_ST_PASS 0x80658383
#define NGBE_FW_CMD_ST_FAIL 0x70657376
-enum ngbe_phy_type {
- ngbe_phy_unknown = 0,
- ngbe_phy_none,
- ngbe_phy_internal,
- ngbe_phy_m88e1512,
- ngbe_phy_m88e1512_sfi,
- ngbe_phy_m88e1512_unknown,
- ngbe_phy_yt8521s,
- ngbe_phy_yt8521s_sfi,
- ngbe_phy_internal_yt8521s_sfi,
- ngbe_phy_generic
-};
+#define NGBE_MAX_FDIR_INDICES 7
-enum ngbe_media_type {
- ngbe_media_type_unknown = 0,
- ngbe_media_type_fiber,
- ngbe_media_type_copper,
- ngbe_media_type_backplane,
-};
-
-enum ngbe_mac_type {
- ngbe_mac_type_unknown = 0,
- ngbe_mac_type_mdi,
- ngbe_mac_type_rgmii
-};
+#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
+#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
-struct ngbe_phy_info {
- enum ngbe_phy_type type;
- enum ngbe_media_type media_type;
+#define NGBE_ETH_LENGTH_OF_ADDRESS 6
+#define NGBE_MAX_MSIX_VECTORS 0x09
+#define NGBE_RAR_ENTRIES 32
+#define NGBE_RX_PB_SIZE 42
+#define NGBE_MC_TBL_SIZE 128
+#define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */
+#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
- u32 addr;
- u32 id;
+/* TX/RX descriptor defines */
+#define NGBE_DEFAULT_TXD 512 /* default ring size */
+#define NGBE_DEFAULT_TX_WORK 256
+#define NGBE_MAX_TXD 8192
+#define NGBE_MIN_TXD 128
- bool reset_if_overtemp;
+#define NGBE_DEFAULT_RXD 512 /* default ring size */
+#define NGBE_DEFAULT_RX_WORK 256
+#define NGBE_MAX_RXD 8192
+#define NGBE_MIN_RXD 128
-};
-
-struct ngbe_hw {
- struct wx_hw wxhw;
- struct ngbe_phy_info phy;
- enum ngbe_mac_type mac_type;
+extern char ngbe_driver_name[];
- bool wol_enabled;
- bool ncsi_enabled;
- bool gpio_ctrl;
-};
#endif /* _NGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 78484c58b78b..6db14a2cb2d0 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -7,4 +7,5 @@
obj-$(CONFIG_TXGBE) += txgbe.o
txgbe-objs := txgbe_main.o \
- txgbe_hw.o
+ txgbe_hw.o \
+ txgbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
deleted file mode 100644
index 19e61377bd00..000000000000
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
-
-#ifndef _TXGBE_H_
-#define _TXGBE_H_
-
-#define TXGBE_MAX_FDIR_INDICES 63
-
-#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
-#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
-
-#define TXGBE_SP_MAX_TX_QUEUES 128
-#define TXGBE_SP_MAX_RX_QUEUES 128
-#define TXGBE_SP_RAR_ENTRIES 128
-#define TXGBE_SP_MC_TBL_SIZE 128
-
-struct txgbe_mac_addr {
- u8 addr[ETH_ALEN];
- u16 state; /* bitmask */
- u64 pools;
-};
-
-#define TXGBE_MAC_STATE_DEFAULT 0x1
-#define TXGBE_MAC_STATE_MODIFIED 0x2
-#define TXGBE_MAC_STATE_IN_USE 0x4
-
-/* board specific private data structure */
-struct txgbe_adapter {
- u8 __iomem *io_addr; /* Mainly for iounmap use */
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- /* structs defined in txgbe_type.h */
- struct txgbe_hw hw;
- u16 msg_enable;
- struct txgbe_mac_addr *mac_table;
- char eeprom_id[32];
-};
-
-extern char txgbe_driver_name[];
-
-#endif /* _TXGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
new file mode 100644
index 000000000000..d914e9a05404
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include <linux/phylink.h>
+#include <linux/netdevice.h>
+
+#include "../libwx/wx_ethtool.h"
+#include "txgbe_ethtool.h"
+
+static const struct ethtool_ops txgbe_ethtool_ops = {
+ .get_drvinfo = wx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+void txgbe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &txgbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
new file mode 100644
index 000000000000..ace1b3571012
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_ETHTOOL_H_
+#define _TXGBE_ETHTOOL_H_
+
+void txgbe_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _TXGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 167f7ff73192..ebc46f3be056 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -12,70 +12,67 @@
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
-#include "txgbe.h"
/**
* txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Inits the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-static void txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw)
+static void txgbe_init_thermal_sensor_thresh(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
- struct wx_thermal_sensor_data *data = &wxhw->mac.sensor;
+ struct wx_thermal_sensor_data *data = &wx->mac.sensor;
memset(data, 0, sizeof(struct wx_thermal_sensor_data));
/* Only support thermal sensors attached to SP physical port 0 */
- if (wxhw->bus.func)
+ if (wx->bus.func)
return;
- wr32(wxhw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD);
+ wr32(wx, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD);
- wr32(wxhw, WX_TS_INT_EN,
+ wr32(wx, WX_TS_INT_EN,
WX_TS_INT_EN_ALARM_INT_EN | WX_TS_INT_EN_DALARM_INT_EN);
- wr32(wxhw, WX_TS_EN, WX_TS_EN_ENA);
+ wr32(wx, WX_TS_EN, WX_TS_EN_ENA);
data->alarm_thresh = 100;
- wr32(wxhw, WX_TS_ALARM_THRE, 677);
+ wr32(wx, WX_TS_ALARM_THRE, 677);
data->dalarm_thresh = 90;
- wr32(wxhw, WX_TS_DALARM_THRE, 614);
+ wr32(wx, WX_TS_DALARM_THRE, 614);
}
/**
* txgbe_read_pba_string - Reads part number string from EEPROM
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @pba_num: stores the part number string from the EEPROM
* @pba_num_size: part number string buffer length
*
* Reads the part number string from the EEPROM.
**/
-int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size)
{
u16 pba_ptr, offset, length, data;
- struct wx_hw *wxhw = &hw->wxhw;
int ret_val;
if (!pba_num) {
- wx_err(wxhw, "PBA string buffer was null\n");
+ wx_err(wx, "PBA string buffer was null\n");
return -EINVAL;
}
- ret_val = wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR,
+ ret_val = wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR,
&data);
if (ret_val != 0) {
- wx_err(wxhw, "NVM Read Error\n");
+ wx_err(wx, "NVM Read Error\n");
return ret_val;
}
- ret_val = wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR,
+ ret_val = wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR,
&pba_ptr);
if (ret_val != 0) {
- wx_err(wxhw, "NVM Read Error\n");
+ wx_err(wx, "NVM Read Error\n");
return ret_val;
}
@@ -84,11 +81,11 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
* and we can decode it into an ascii string
*/
if (data != TXGBE_PBANUM_PTR_GUARD) {
- wx_err(wxhw, "NVM PBA number is not stored as string\n");
+ wx_err(wx, "NVM PBA number is not stored as string\n");
/* we will need 11 characters to store the PBA */
if (pba_num_size < 11) {
- wx_err(wxhw, "PBA string buffer too small\n");
+ wx_err(wx, "PBA string buffer too small\n");
return -ENOMEM;
}
@@ -118,20 +115,20 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
return 0;
}
- ret_val = wx_read_ee_hostif(wxhw, pba_ptr, &length);
+ ret_val = wx_read_ee_hostif(wx, pba_ptr, &length);
if (ret_val != 0) {
- wx_err(wxhw, "NVM Read Error\n");
+ wx_err(wx, "NVM Read Error\n");
return ret_val;
}
if (length == 0xFFFF || length == 0) {
- wx_err(wxhw, "NVM PBA number section invalid length\n");
+ wx_err(wx, "NVM PBA number section invalid length\n");
return -EINVAL;
}
/* check if pba_num buffer is big enough */
if (pba_num_size < (((u32)length * 2) - 1)) {
- wx_err(wxhw, "PBA string buffer too small\n");
+ wx_err(wx, "PBA string buffer too small\n");
return -ENOMEM;
}
@@ -140,9 +137,9 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
length--;
for (offset = 0; offset < length; offset++) {
- ret_val = wx_read_ee_hostif(wxhw, pba_ptr + offset, &data);
+ ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data);
if (ret_val != 0) {
- wx_err(wxhw, "NVM Read Error\n");
+ wx_err(wx, "NVM Read Error\n");
return ret_val;
}
pba_num[offset * 2] = (u8)(data >> 8);
@@ -155,14 +152,13 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
/**
* txgbe_calc_eeprom_checksum - Calculates and returns the checksum
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @checksum: pointer to cheksum
*
* Returns a negative error code on error
**/
-static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
+static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
{
- struct wx_hw *wxhw = &hw->wxhw;
u16 *eeprom_ptrs = NULL;
u32 buffer_size = 0;
u16 *buffer = NULL;
@@ -170,7 +166,7 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
int status;
u16 i;
- wx_init_eeprom_params(wxhw);
+ wx_init_eeprom_params(wx);
if (!buffer) {
eeprom_ptrs = kvmalloc_array(TXGBE_EEPROM_LAST_WORD, sizeof(u16),
@@ -178,11 +174,11 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
if (!eeprom_ptrs)
return -ENOMEM;
/* Read pointer area */
- status = wx_read_ee_hostif_buffer(wxhw, 0,
+ status = wx_read_ee_hostif_buffer(wx, 0,
TXGBE_EEPROM_LAST_WORD,
eeprom_ptrs);
if (status != 0) {
- wx_err(wxhw, "Failed to read EEPROM image\n");
+ wx_err(wx, "Failed to read EEPROM image\n");
kvfree(eeprom_ptrs);
return status;
}
@@ -194,7 +190,7 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
}
for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++)
- if (i != wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
+ if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
*checksum += local_buffer[i];
if (eeprom_ptrs)
@@ -210,15 +206,14 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
/**
* txgbe_validate_eeprom_checksum - Validate EEPROM checksum
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @checksum_val: calculated checksum
*
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val)
+int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val)
{
- struct wx_hw *wxhw = &hw->wxhw;
u16 read_checksum = 0;
u16 checksum;
int status;
@@ -227,18 +222,18 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val)
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
- status = wx_read_ee_hostif(wxhw, 0, &checksum);
+ status = wx_read_ee_hostif(wx, 0, &checksum);
if (status) {
- wx_err(wxhw, "EEPROM read failed\n");
+ wx_err(wx, "EEPROM read failed\n");
return status;
}
checksum = 0;
- status = txgbe_calc_eeprom_checksum(hw, &checksum);
+ status = txgbe_calc_eeprom_checksum(wx, &checksum);
if (status != 0)
return status;
- status = wx_read_ee_hostif(wxhw, wxhw->eeprom.sw_region_offset +
+ status = wx_read_ee_hostif(wx, wx->eeprom.sw_region_offset +
TXGBE_EEPROM_CHECKSUM, &read_checksum);
if (status != 0)
return status;
@@ -248,7 +243,7 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val)
*/
if (read_checksum != checksum) {
status = -EIO;
- wx_err(wxhw, "Invalid EEPROM checksum\n");
+ wx_err(wx, "Invalid EEPROM checksum\n");
}
/* If the user cares, return the calculated checksum */
@@ -258,55 +253,52 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val)
return status;
}
-static void txgbe_reset_misc(struct txgbe_hw *hw)
+static void txgbe_reset_misc(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
-
- wx_reset_misc(wxhw);
- txgbe_init_thermal_sensor_thresh(hw);
+ wx_reset_misc(wx);
+ txgbe_init_thermal_sensor_thresh(wx);
}
/**
* txgbe_reset_hw - Perform hardware reset
- * @hw: pointer to hardware structure
+ * @wx: pointer to wx structure
*
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-int txgbe_reset_hw(struct txgbe_hw *hw)
+int txgbe_reset_hw(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
- status = wx_stop_adapter(wxhw);
+ status = wx_stop_adapter(wx);
if (status != 0)
return status;
- if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
- ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP)))
- wx_reset_hostif(wxhw);
+ if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
+ ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP)))
+ wx_reset_hostif(wx);
usleep_range(10, 100);
- status = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wxhw->bus.func));
+ status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func));
if (status != 0)
return status;
- txgbe_reset_misc(hw);
+ txgbe_reset_misc(wx);
/* Store the permanent mac address */
- wx_get_mac_addr(wxhw, wxhw->mac.perm_addr);
+ wx_get_mac_addr(wx, wx->mac.perm_addr);
/* Store MAC address from RAR0, clear receive address registers, and
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
- wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
- wx_init_rx_addrs(wxhw);
+ wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wx_init_rx_addrs(wx);
- pci_set_master(wxhw->pdev);
+ pci_set_master(wx->pdev);
return 0;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index 6a751a69177b..e82f65dff8a6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -4,8 +4,8 @@
#ifndef _TXGBE_HW_H_
#define _TXGBE_HW_H_
-int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
-int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val);
-int txgbe_reset_hw(struct txgbe_hw *hw);
+int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size);
+int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val);
+int txgbe_reset_hw(struct wx *wx);
#endif /* _TXGBE_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 36780e7f05b7..6c0a98230557 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -11,10 +11,11 @@
#include <net/ip.h>
#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
-#include "txgbe.h"
+#include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe";
@@ -35,26 +36,26 @@ static const struct pci_device_id txgbe_pci_tbl[] = {
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-static void txgbe_check_minimum_link(struct txgbe_adapter *adapter)
+static void txgbe_check_minimum_link(struct wx *wx)
{
struct pci_dev *pdev;
- pdev = adapter->pdev;
+ pdev = wx->pdev;
pcie_print_link_status(pdev);
}
/**
* txgbe_enumerate_functions - Get the number of ports this device has
- * @adapter: adapter structure
+ * @wx: wx structure
*
* This function enumerates the phsyical functions co-located on a single slot,
* in order to determine how many ports a device has. This is most useful in
* determining the required GT/s of PCIe bandwidth necessary for optimal
* performance.
**/
-static int txgbe_enumerate_functions(struct txgbe_adapter *adapter)
+static int txgbe_enumerate_functions(struct wx *wx)
{
- struct pci_dev *entry, *pdev = adapter->pdev;
+ struct pci_dev *entry, *pdev = wx->pdev;
int physfns = 0;
list_for_each_entry(entry, &pdev->bus->devices, bus_list) {
@@ -73,196 +74,299 @@ static int txgbe_enumerate_functions(struct txgbe_adapter *adapter)
return physfns;
}
-static void txgbe_sync_mac_table(struct txgbe_adapter *adapter)
+/**
+ * txgbe_irq_enable - Enable default interrupt generation settings
+ * @wx: pointer to private structure
+ * @queues: enable irqs for queues
+ **/
+static void txgbe_irq_enable(struct wx *wx, bool queues)
{
- struct txgbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
- int i;
-
- for (i = 0; i < wxhw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) {
- if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) {
- wx_set_rar(wxhw, i,
- adapter->mac_table[i].addr,
- adapter->mac_table[i].pools,
- WX_PSR_MAC_SWC_AD_H_AV);
- } else {
- wx_clear_rar(wxhw, i);
- }
- adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED);
- }
- }
+ /* unmask interrupt */
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ if (queues)
+ wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
}
-/* this function destroys the first RAR entry */
-static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter,
- u8 *addr)
+/**
+ * txgbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
{
- struct wx_hw *wxhw = &adapter->hw.wxhw;
-
- memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
- adapter->mac_table[0].pools = 1ULL;
- adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT |
- TXGBE_MAC_STATE_IN_USE);
- wx_set_rar(wxhw, 0, adapter->mac_table[0].addr,
- adapter->mac_table[0].pools,
- WX_PSR_MAC_SWC_AD_H_AV);
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ /* re-enable link(maybe) and non-queue interrupts, no flush.
+ * txgbe_poll will re-enable the queue interrupts
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
}
-static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter)
+static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data)
{
- struct wx_hw *wxhw = &adapter->hw.wxhw;
- u32 i;
+ struct wx *wx = data;
- for (i = 0; i < wxhw->mac.num_rar_entries; i++) {
- adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- adapter->mac_table[i].pools = 0;
- }
- txgbe_sync_mac_table(adapter);
+ /* re-enable the original interrupt state */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
}
-static int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool)
+/**
+ * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * Allocate MSI-X vectors and request interrupts from the kernel.
+ **/
+static int txgbe_request_msix_irqs(struct wx *wx)
{
- struct wx_hw *wxhw = &adapter->hw.wxhw;
- u32 i;
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* search table for addr, if found, set to 0 and sync */
- for (i = 0; i < wxhw->mac.num_rar_entries; i++) {
- if (ether_addr_equal(addr, adapter->mac_table[i].addr)) {
- if (adapter->mac_table[i].pools & (1ULL << pool)) {
- adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
- adapter->mac_table[i].pools &= ~(1ULL << pool);
- txgbe_sync_mac_table(adapter);
- }
- return 0;
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
}
+ }
- if (adapter->mac_table[i].pools != (1 << pool))
- continue;
- if (!ether_addr_equal(addr, adapter->mac_table[i].addr))
- continue;
+ err = request_irq(wx->msix_entries[vector].vector,
+ txgbe_msix_other, 0, netdev->name, wx);
+ if (err) {
+ wx_err(wx, "request_irq for msix_other failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
- adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- adapter->mac_table[i].pools = 0;
- txgbe_sync_mac_table(adapter);
- return 0;
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_entries[vector].vector,
+ wx->q_vector[vector]);
}
- return -ENOMEM;
+ wx_reset_interrupt_capability(wx);
+ return err;
}
-static void txgbe_up_complete(struct txgbe_adapter *adapter)
+/**
+ * txgbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int txgbe_request_irq(struct wx *wx)
{
- struct txgbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
- wx_control_hw(wxhw, true);
+ if (pdev->msix_enabled)
+ err = txgbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
}
-static void txgbe_reset(struct txgbe_adapter *adapter)
+static void txgbe_up_complete(struct wx *wx)
{
- struct net_device *netdev = adapter->netdev;
- struct txgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ wx_control_hw(wx, true);
+ wx_configure_vectors(wx);
+
+ /* make sure to complete pre-operations */
+ smp_mb__before_atomic();
+ wx_napi_enable_all(wx);
+
+ /* clear any pending interrupts, may auto mask */
+ rd32(wx, WX_PX_IC);
+ rd32(wx, WX_PX_MISC_IC);
+ txgbe_irq_enable(wx, true);
+
+ /* Configure MAC Rx and Tx when link is up */
+ reg = rd32(wx, WX_MAC_RX_CFG);
+ wr32(wx, WX_MAC_RX_CFG, reg);
+ wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+ reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+ reg = rd32(wx, WX_MAC_TX_CFG);
+ wr32(wx, WX_MAC_TX_CFG, (reg & ~WX_MAC_TX_CFG_SPEED_MASK) | WX_MAC_TX_CFG_SPEED_10G);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(wx->netdev);
+ netif_carrier_on(wx->netdev);
+}
+
+static void txgbe_reset(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
u8 old_addr[ETH_ALEN];
int err;
- err = txgbe_reset_hw(hw);
+ err = txgbe_reset_hw(wx);
if (err != 0)
- dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+ wx_err(wx, "Hardware Error: %d\n", err);
/* do not flush user set addresses */
- memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
- txgbe_flush_sw_mac_table(adapter);
- txgbe_mac_set_default_filter(adapter, old_addr);
+ memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len);
+ wx_flush_sw_mac_table(wx);
+ wx_mac_set_default_filter(wx, old_addr);
}
-static void txgbe_disable_device(struct txgbe_adapter *adapter)
+static void txgbe_disable_device(struct wx *wx)
{
- struct net_device *netdev = adapter->netdev;
- struct wx_hw *wxhw = &adapter->hw.wxhw;
+ struct net_device *netdev = wx->netdev;
+ u32 i;
- wx_disable_pcie_master(wxhw);
+ wx_disable_pcie_master(wx);
/* disable receives */
- wx_disable_rx(wxhw);
+ wx_disable_rx(wx);
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ wx_disable_rx_queue(wx, wx->rx_ring[i]);
+ netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- if (wxhw->bus.func < 2)
- wr32m(wxhw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wxhw->bus.func), 0);
+ wx_irq_disable(wx);
+ wx_napi_disable_all(wx);
+
+ if (wx->bus.func < 2)
+ wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
else
- dev_err(&adapter->pdev->dev,
- "%s: invalid bus lan id %d\n",
- __func__, wxhw->bus.func);
+ wx_err(wx, "%s: invalid bus lan id %d\n",
+ __func__, wx->bus.func);
- if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
- ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
+ if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
+ ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
/* disable mac transmiter */
- wr32m(wxhw, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+ wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+ }
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ u8 reg_idx = wx->tx_ring[i]->reg_idx;
+
+ wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
}
/* Disable the Tx DMA engine */
- wr32m(wxhw, WX_TDM_CTL, WX_TDM_CTL_TE, 0);
+ wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0);
}
-static void txgbe_down(struct txgbe_adapter *adapter)
+static void txgbe_down(struct wx *wx)
{
- txgbe_disable_device(adapter);
- txgbe_reset(adapter);
+ txgbe_disable_device(wx);
+ txgbe_reset(wx);
+
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
}
/**
- * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter)
- * @adapter: board private structure to initialize
+ * txgbe_sw_init - Initialize general software structures (struct wx)
+ * @wx: board private structure to initialize
**/
-static int txgbe_sw_init(struct txgbe_adapter *adapter)
+static int txgbe_sw_init(struct wx *wx)
{
- struct pci_dev *pdev = adapter->pdev;
- struct txgbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ u16 msix_count = 0;
int err;
- wxhw->hw_addr = adapter->io_addr;
- wxhw->pdev = pdev;
+ wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
+ wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
+ wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
+ wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
+ wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
/* PCI config space info */
- err = wx_sw_init(wxhw);
+ err = wx_sw_init(wx);
if (err < 0) {
- netif_err(adapter, probe, adapter->netdev,
- "read of internal subsystem device id failed\n");
+ wx_err(wx, "read of internal subsystem device id failed\n");
return err;
}
- switch (wxhw->device_id) {
+ switch (wx->device_id) {
case TXGBE_DEV_ID_SP1000:
case TXGBE_DEV_ID_WX1820:
- wxhw->mac.type = wx_mac_sp;
+ wx->mac.type = wx_mac_sp;
break;
default:
- wxhw->mac.type = wx_mac_unknown;
+ wx->mac.type = wx_mac_unknown;
break;
}
- wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
- wxhw->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
- wxhw->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
- wxhw->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
-
- adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries,
- sizeof(struct txgbe_mac_addr),
- GFP_KERNEL);
- if (!adapter->mac_table) {
- netif_err(adapter, probe, adapter->netdev,
- "mac_table allocation failed\n");
- return -ENOMEM;
- }
+ /* Set common capability flags and settings */
+ wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS;
+ err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS);
+ if (err)
+ wx_err(wx, "Do not support MSI-X\n");
+ wx->mac.max_msix_vectors = msix_count;
+
+ /* enable itr by default in dynamic mode */
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+
+ /* set default ring sizes */
+ wx->tx_ring_count = TXGBE_DEFAULT_TXD;
+ wx->rx_ring_count = TXGBE_DEFAULT_RXD;
+
+ /* set default work limits */
+ wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
+ wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
return 0;
}
@@ -278,23 +382,53 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
**/
static int txgbe_open(struct net_device *netdev)
{
- struct txgbe_adapter *adapter = netdev_priv(netdev);
+ struct wx *wx = netdev_priv(netdev);
+ int err;
+
+ err = wx_setup_resources(wx);
+ if (err)
+ goto err_reset;
+
+ wx_configure(wx);
- txgbe_up_complete(adapter);
+ err = txgbe_request_irq(wx);
+ if (err)
+ goto err_free_isb;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ if (err)
+ goto err_free_irq;
+
+ err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
+ if (err)
+ goto err_free_irq;
+
+ txgbe_up_complete(wx);
return 0;
+
+err_free_irq:
+ wx_free_irq(wx);
+err_free_isb:
+ wx_free_isb_resources(wx);
+err_reset:
+ txgbe_reset(wx);
+
+ return err;
}
/**
* txgbe_close_suspend - actions necessary to both suspend and close flows
- * @adapter: the private adapter struct
+ * @wx: the private wx struct
*
* This function should contain the necessary work common to both suspending
* and closing of the device.
*/
-static void txgbe_close_suspend(struct txgbe_adapter *adapter)
+static void txgbe_close_suspend(struct wx *wx)
{
- txgbe_disable_device(adapter);
+ txgbe_disable_device(wx);
+ wx_free_resources(wx);
}
/**
@@ -310,29 +444,30 @@ static void txgbe_close_suspend(struct txgbe_adapter *adapter)
**/
static int txgbe_close(struct net_device *netdev)
{
- struct txgbe_adapter *adapter = netdev_priv(netdev);
+ struct wx *wx = netdev_priv(netdev);
- txgbe_down(adapter);
- wx_control_hw(&adapter->hw.wxhw, false);
+ txgbe_down(wx);
+ wx_free_irq(wx);
+ wx_free_resources(wx);
+ wx_control_hw(wx, false);
return 0;
}
static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
- struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
- struct txgbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ struct wx *wx = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+ netdev = wx->netdev;
netif_device_detach(netdev);
rtnl_lock();
if (netif_running(netdev))
- txgbe_close_suspend(adapter);
+ txgbe_close_suspend(wx);
rtnl_unlock();
- wx_control_hw(wxhw, false);
+ wx_control_hw(wx, false);
pci_disable_device(pdev);
}
@@ -349,45 +484,14 @@ static void txgbe_shutdown(struct pci_dev *pdev)
}
}
-static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
-{
- return NETDEV_TX_OK;
-}
-
-/**
- * txgbe_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int txgbe_set_mac(struct net_device *netdev, void *p)
-{
- struct txgbe_adapter *adapter = netdev_priv(netdev);
- struct wx_hw *wxhw = &adapter->hw.wxhw;
- struct sockaddr *addr = p;
- int retval;
-
- retval = eth_prepare_mac_addr_change(netdev, addr);
- if (retval)
- return retval;
-
- txgbe_del_mac_filter(adapter, wxhw->mac.addr, 0);
- eth_hw_addr_set(netdev, addr->sa_data);
- memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len);
-
- txgbe_mac_set_default_filter(adapter, wxhw->mac.addr);
-
- return 0;
-}
-
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
- .ndo_start_xmit = txgbe_xmit_frame,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_set_rx_mode = wx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = txgbe_set_mac,
+ .ndo_set_mac_address = wx_set_mac,
+ .ndo_get_stats64 = wx_get_stats64,
};
/**
@@ -398,17 +502,15 @@ static const struct net_device_ops txgbe_netdev_ops = {
* Returns 0 on success, negative on failure
*
* txgbe_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
+ * The OS initialization, configuring of the wx private structure,
* and a hardware reset occur.
**/
static int txgbe_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
- struct txgbe_adapter *adapter = NULL;
- struct txgbe_hw *hw = NULL;
- struct wx_hw *wxhw = NULL;
struct net_device *netdev;
int err, expected_gts;
+ struct wx *wx = NULL;
u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0;
u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0;
@@ -440,7 +542,7 @@ static int txgbe_probe(struct pci_dev *pdev,
pci_set_master(pdev);
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
- sizeof(struct txgbe_adapter),
+ sizeof(struct wx),
TXGBE_MAX_TX_QUEUES,
TXGBE_MAX_RX_QUEUES);
if (!netdev) {
@@ -450,81 +552,96 @@ static int txgbe_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- hw = &adapter->hw;
- wxhw = &hw->wxhw;
- adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
-
- adapter->io_addr = devm_ioremap(&pdev->dev,
- pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!adapter->io_addr) {
+ wx = netdev_priv(netdev);
+ wx->netdev = netdev;
+ wx->pdev = pdev;
+
+ wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ wx->hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!wx->hw_addr) {
err = -EIO;
goto err_pci_release_regions;
}
+ wx->driver_name = txgbe_driver_name;
+ txgbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &txgbe_netdev_ops;
/* setup the private structure */
- err = txgbe_sw_init(adapter);
+ err = txgbe_sw_init(wx);
if (err)
goto err_free_mac_table;
/* check if flash load is done after hw power up */
- err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PERST);
+ err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST);
if (err)
goto err_free_mac_table;
- err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PWRRST);
+ err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST);
if (err)
goto err_free_mac_table;
- err = wx_mng_present(wxhw);
+ err = wx_mng_present(wx);
if (err) {
dev_err(&pdev->dev, "Management capability is not present\n");
goto err_free_mac_table;
}
- err = txgbe_reset_hw(hw);
+ err = txgbe_reset_hw(wx);
if (err) {
dev_err(&pdev->dev, "HW Init failed: %d\n", err);
goto err_free_mac_table;
}
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = NETIF_F_SG;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features | NETIF_F_RXALL;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
/* make sure the EEPROM is good */
- err = txgbe_validate_eeprom_checksum(hw, NULL);
+ err = txgbe_validate_eeprom_checksum(wx, NULL);
if (err != 0) {
dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
- wr32(wxhw, WX_MIS_RST, WX_MIS_RST_SW_RST);
+ wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST);
err = -EIO;
goto err_free_mac_table;
}
- eth_hw_addr_set(netdev, wxhw->mac.perm_addr);
- txgbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr);
+ eth_hw_addr_set(netdev, wx->mac.perm_addr);
+ wx_mac_set_default_filter(wx, wx->mac.perm_addr);
+
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_mac_table;
/* Save off EEPROM version number and Option Rom version which
* together make a unique identify for the eeprom
*/
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H,
&eeprom_verh);
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L,
&eeprom_verl);
etrack_id = (eeprom_verh << 16) | eeprom_verl;
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG,
&offset);
/* Make sure offset to SCSI block is valid */
if (!(offset == 0x0) && !(offset == 0xffff)) {
- wx_read_ee_hostif(wxhw, offset + 0x84, &eeprom_cfg_blkh);
- wx_read_ee_hostif(wxhw, offset + 0x83, &eeprom_cfg_blkl);
+ wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh);
+ wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl);
/* Only display Option Rom if exist */
if (eeprom_cfg_blkl && eeprom_cfg_blkh) {
@@ -532,15 +649,15 @@ static int txgbe_probe(struct pci_dev *pdev,
build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8);
patch = eeprom_cfg_blkh & 0x00ff;
- snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
"0x%08x, %d.%d.%d", etrack_id, major, build,
patch);
} else {
- snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
"0x%08x", etrack_id);
}
} else {
- snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
"0x%08x", etrack_id);
}
@@ -548,7 +665,9 @@ static int txgbe_probe(struct pci_dev *pdev,
if (err)
goto err_release_hw;
- pci_set_drvdata(pdev, adapter);
+ pci_set_drvdata(pdev, wx);
+
+ netif_tx_stop_all_queues(netdev);
/* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough
@@ -556,27 +675,28 @@ static int txgbe_probe(struct pci_dev *pdev,
* parts to ensure that no warning is displayed, as this could confuse
* users otherwise.
*/
- expected_gts = txgbe_enumerate_functions(adapter) * 10;
+ expected_gts = txgbe_enumerate_functions(wx) * 10;
/* don't check link if we failed to enumerate functions */
if (expected_gts > 0)
- txgbe_check_minimum_link(adapter);
+ txgbe_check_minimum_link(wx);
else
dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n");
/* First try to read PBA as a string */
- err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH);
+ err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH);
if (err)
strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH);
- netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr);
+ netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr);
return 0;
err_release_hw:
- wx_control_hw(wxhw, false);
+ wx_clear_interrupt_scheme(wx);
+ wx_control_hw(wx, false);
err_free_mac_table:
- kfree(adapter->mac_table);
+ kfree(wx->mac_table);
err_pci_release_regions:
pci_disable_pcie_error_reporting(pdev);
pci_release_selected_regions(pdev,
@@ -597,16 +717,17 @@ err_pci_disable_dev:
**/
static void txgbe_remove(struct pci_dev *pdev)
{
- struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct wx *wx = pci_get_drvdata(pdev);
struct net_device *netdev;
- netdev = adapter->netdev;
+ netdev = wx->netdev;
unregister_netdev(netdev);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
- kfree(adapter->mac_table);
+ kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 740a1c447e20..563ea51deca6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -67,8 +67,37 @@
#define TXGBE_PBANUM1_PTR 0x06
#define TXGBE_PBANUM_PTR_GUARD 0xFAFA
-struct txgbe_hw {
- struct wx_hw wxhw;
-};
+#define TXGBE_MAX_MSIX_VECTORS 64
+#define TXGBE_MAX_FDIR_INDICES 63
+
+#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+
+#define TXGBE_SP_MAX_TX_QUEUES 128
+#define TXGBE_SP_MAX_RX_QUEUES 128
+#define TXGBE_SP_RAR_ENTRIES 128
+#define TXGBE_SP_MC_TBL_SIZE 128
+#define TXGBE_SP_RX_PB_SIZE 512
+#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
+
+/* TX/RX descriptor defines */
+#define TXGBE_DEFAULT_TXD 512
+#define TXGBE_DEFAULT_TX_WORK 256
+
+#if (PAGE_SIZE < 8192)
+#define TXGBE_DEFAULT_RXD 512
+#define TXGBE_DEFAULT_RX_WORK 256
+#else
+#define TXGBE_DEFAULT_RXD 256
+#define TXGBE_DEFAULT_RX_WORK 128
+#endif
+
+#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
+
+#define TXGBE_MAX_EITR GENMASK(11, 3)
+
+extern char txgbe_driver_name[];
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index bd3b0c2655a2..83ff882f5d97 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -623,16 +623,10 @@ static int receive(struct net_device *dev, int cnt)
/* --------------------------------------------------------------------- */
-#if defined(__i386__) && !defined(CONFIG_UML)
-#include <asm/msr.h>
#define GETTICK(x) \
({ \
- if (boot_cpu_has(X86_FEATURE_TSC)) \
- x = (unsigned int)rdtsc(); \
+ x = (unsigned int)get_cycles(); \
})
-#else /* __i386__ && !CONFIG_UML */
-#define GETTICK(x)
-#endif /* __i386__ && !CONFIG_UML */
static void epp_bh(struct work_struct *work)
{
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 79f4e13620a4..da737d959e81 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -851,6 +851,7 @@ static void netvsc_send_completion(struct net_device *ndev,
u32 msglen = hv_pkt_datalen(desc);
struct nvsp_message *pkt_rqst;
u64 cmd_rqst;
+ u32 status;
/* First check if this is a VMBUS completion without data payload */
if (!msglen) {
@@ -922,6 +923,23 @@ static void netvsc_send_completion(struct net_device *ndev,
break;
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
+ msglen);
+ return;
+ }
+
+ /* If status indicates an error, output a message so we know
+ * there's a problem. But process the completion anyway so the
+ * resources are released.
+ */
+ status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
+ if (status != NVSP_STAT_SUCCESS && net_ratelimit())
+ netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
+ status);
+
netvsc_send_tx_complete(ndev, net_device, incoming_channel,
desc, budget);
break;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 42162167b5b4..0103ff914024 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2559,6 +2559,9 @@ static int netvsc_probe(struct hv_device *dev,
netdev_lockdep_set_classes(net);
+ net->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 15f283b26721..62b984f84d9f 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -17,8 +17,8 @@
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/delay.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
-#include <linux/spi/at86rf230.h>
#include <linux/regmap.h>
#include <linux/skbuff.h>
#include <linux/of_gpio.h>
@@ -82,7 +82,7 @@ struct at86rf230_local {
struct ieee802154_hw *hw;
struct at86rf2xx_chip_data *data;
struct regmap *regmap;
- int slp_tr;
+ struct gpio_desc *slp_tr;
bool sleep;
struct completion state_complete;
@@ -107,8 +107,8 @@ at86rf230_async_state_change(struct at86rf230_local *lp,
static inline void
at86rf230_sleep(struct at86rf230_local *lp)
{
- if (gpio_is_valid(lp->slp_tr)) {
- gpio_set_value(lp->slp_tr, 1);
+ if (lp->slp_tr) {
+ gpiod_set_value(lp->slp_tr, 1);
usleep_range(lp->data->t_off_to_sleep,
lp->data->t_off_to_sleep + 10);
lp->sleep = true;
@@ -118,8 +118,8 @@ at86rf230_sleep(struct at86rf230_local *lp)
static inline void
at86rf230_awake(struct at86rf230_local *lp)
{
- if (gpio_is_valid(lp->slp_tr)) {
- gpio_set_value(lp->slp_tr, 0);
+ if (lp->slp_tr) {
+ gpiod_set_value(lp->slp_tr, 0);
usleep_range(lp->data->t_sleep_to_off,
lp->data->t_sleep_to_off + 100);
lp->sleep = false;
@@ -204,9 +204,9 @@ at86rf230_write_subreg(struct at86rf230_local *lp,
static inline void
at86rf230_slp_tr_rising_edge(struct at86rf230_local *lp)
{
- gpio_set_value(lp->slp_tr, 1);
+ gpiod_set_value(lp->slp_tr, 1);
udelay(1);
- gpio_set_value(lp->slp_tr, 0);
+ gpiod_set_value(lp->slp_tr, 0);
}
static bool
@@ -819,7 +819,7 @@ at86rf230_write_frame_complete(void *context)
ctx->trx.len = 2;
- if (gpio_is_valid(lp->slp_tr))
+ if (lp->slp_tr)
at86rf230_slp_tr_rising_edge(lp);
else
at86rf230_async_write_reg(lp, RG_TRX_STATE, STATE_BUSY_TX, ctx,
@@ -1416,32 +1416,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
}
static int
-at86rf230_get_pdata(struct spi_device *spi, int *rstn, int *slp_tr,
- u8 *xtal_trim)
-{
- struct at86rf230_platform_data *pdata = spi->dev.platform_data;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) {
- if (!pdata)
- return -ENOENT;
-
- *rstn = pdata->rstn;
- *slp_tr = pdata->slp_tr;
- *xtal_trim = pdata->xtal_trim;
- return 0;
- }
-
- *rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
- *slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
- ret = of_property_read_u8(spi->dev.of_node, "xtal-trim", xtal_trim);
- if (ret < 0 && ret != -EINVAL)
- return ret;
-
- return 0;
-}
-
-static int
at86rf230_detect_device(struct at86rf230_local *lp)
{
unsigned int part, version, val;
@@ -1546,41 +1520,47 @@ static int at86rf230_probe(struct spi_device *spi)
{
struct ieee802154_hw *hw;
struct at86rf230_local *lp;
+ struct gpio_desc *slp_tr;
+ struct gpio_desc *rstn;
unsigned int status;
- int rc, irq_type, rstn, slp_tr;
- u8 xtal_trim = 0;
+ int rc, irq_type;
+ u8 xtal_trim;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ specified\n");
return -EINVAL;
}
- rc = at86rf230_get_pdata(spi, &rstn, &slp_tr, &xtal_trim);
+ rc = device_property_read_u8(&spi->dev, "xtal-trim", &xtal_trim);
if (rc < 0) {
- dev_err(&spi->dev, "failed to parse platform_data: %d\n", rc);
- return rc;
- }
-
- if (gpio_is_valid(rstn)) {
- rc = devm_gpio_request_one(&spi->dev, rstn,
- GPIOF_OUT_INIT_HIGH, "rstn");
- if (rc)
+ if (rc != -EINVAL) {
+ dev_err(&spi->dev,
+ "failed to parse xtal-trim: %d\n", rc);
return rc;
+ }
+ xtal_trim = 0;
}
- if (gpio_is_valid(slp_tr)) {
- rc = devm_gpio_request_one(&spi->dev, slp_tr,
- GPIOF_OUT_INIT_LOW, "slp_tr");
- if (rc)
- return rc;
- }
+ rstn = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
+ rc = PTR_ERR_OR_ZERO(rstn);
+ if (rc)
+ return rc;
+
+ gpiod_set_consumer_name(rstn, "rstn");
+
+ slp_tr = devm_gpiod_get_optional(&spi->dev, "sleep", GPIOD_OUT_LOW);
+ rc = PTR_ERR_OR_ZERO(slp_tr);
+ if (rc)
+ return rc;
+
+ gpiod_set_consumer_name(slp_tr, "slp_tr");
/* Reset */
- if (gpio_is_valid(rstn)) {
+ if (rstn) {
udelay(1);
- gpio_set_value_cansleep(rstn, 0);
+ gpiod_set_value_cansleep(rstn, 1);
udelay(1);
- gpio_set_value_cansleep(rstn, 1);
+ gpiod_set_value_cansleep(rstn, 0);
usleep_range(120, 240);
}
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index edc769daad07..a94d8dd71aad 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -7,14 +7,13 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/spi/cc2520.h>
+#include <linux/property.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
-#include <linux/of_gpio.h>
#include <linux/ieee802154.h>
#include <linux/crc-ccitt.h>
#include <asm/unaligned.h>
@@ -206,7 +205,7 @@ struct cc2520_private {
struct mutex buffer_mutex; /* SPI buffer mutex */
bool is_tx; /* Flag for sync b/w Tx and Rx */
bool amplified; /* Flag for CC2591 */
- int fifo_pin; /* FIFO GPIO pin number */
+ struct gpio_desc *fifo_pin; /* FIFO GPIO pin number */
struct work_struct fifop_irqwork;/* Workqueue for FIFOP */
spinlock_t lock; /* Lock for is_tx*/
struct completion tx_complete; /* Work completion for Tx */
@@ -875,7 +874,7 @@ static void cc2520_fifop_irqwork(struct work_struct *work)
dev_dbg(&priv->spi->dev, "fifop interrupt received\n");
- if (gpio_get_value(priv->fifo_pin))
+ if (gpiod_get_value(priv->fifo_pin))
cc2520_rx(priv);
else
dev_dbg(&priv->spi->dev, "rxfifo overflow\n");
@@ -912,49 +911,11 @@ static irqreturn_t cc2520_sfd_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int cc2520_get_platform_data(struct spi_device *spi,
- struct cc2520_platform_data *pdata)
-{
- struct device_node *np = spi->dev.of_node;
- struct cc2520_private *priv = spi_get_drvdata(spi);
-
- if (!np) {
- struct cc2520_platform_data *spi_pdata = spi->dev.platform_data;
-
- if (!spi_pdata)
- return -ENOENT;
- *pdata = *spi_pdata;
- priv->fifo_pin = pdata->fifo;
- return 0;
- }
-
- pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0);
- priv->fifo_pin = pdata->fifo;
-
- pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0);
-
- pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0);
- pdata->cca = of_get_named_gpio(np, "cca-gpio", 0);
- pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0);
- pdata->reset = of_get_named_gpio(np, "reset-gpio", 0);
-
- /* CC2591 front end for CC2520 */
- if (of_property_read_bool(np, "amplified"))
- priv->amplified = true;
-
- return 0;
-}
-
static int cc2520_hw_init(struct cc2520_private *priv)
{
u8 status = 0, state = 0xff;
int ret;
int timeout = 100;
- struct cc2520_platform_data pdata;
-
- ret = cc2520_get_platform_data(priv->spi, &pdata);
- if (ret)
- goto err_ret;
ret = cc2520_read_register(priv, CC2520_FSMSTAT1, &state);
if (ret)
@@ -1071,7 +1032,11 @@ err_ret:
static int cc2520_probe(struct spi_device *spi)
{
struct cc2520_private *priv;
- struct cc2520_platform_data pdata;
+ struct gpio_desc *fifop;
+ struct gpio_desc *cca;
+ struct gpio_desc *sfd;
+ struct gpio_desc *reset;
+ struct gpio_desc *vreg;
int ret;
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
@@ -1080,11 +1045,11 @@ static int cc2520_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
- ret = cc2520_get_platform_data(spi, &pdata);
- if (ret < 0) {
- dev_err(&spi->dev, "no platform data\n");
- return -EINVAL;
- }
+ /* CC2591 front end for CC2520 */
+ /* Assumption that CC2591 is not connected */
+ priv->amplified = false;
+ if (device_property_read_bool(&spi->dev, "amplified"))
+ priv->amplified = true;
priv->spi = spi;
@@ -1098,80 +1063,53 @@ static int cc2520_probe(struct spi_device *spi)
spin_lock_init(&priv->lock);
init_completion(&priv->tx_complete);
- /* Assumption that CC2591 is not connected */
- priv->amplified = false;
-
/* Request all the gpio's */
- if (!gpio_is_valid(pdata.fifo)) {
+ priv->fifo_pin = devm_gpiod_get(&spi->dev, "fifo", GPIOD_IN);
+ if (IS_ERR(priv->fifo_pin)) {
dev_err(&spi->dev, "fifo gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(priv->fifo_pin);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.fifo,
- GPIOF_IN, "fifo");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.cca)) {
+ cca = devm_gpiod_get(&spi->dev, "cca", GPIOD_IN);
+ if (IS_ERR(cca)) {
dev_err(&spi->dev, "cca gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(cca);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.cca,
- GPIOF_IN, "cca");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.fifop)) {
+ fifop = devm_gpiod_get(&spi->dev, "fifop", GPIOD_IN);
+ if (IS_ERR(fifop)) {
dev_err(&spi->dev, "fifop gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(fifop);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.fifop,
- GPIOF_IN, "fifop");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.sfd)) {
+ sfd = devm_gpiod_get(&spi->dev, "sfd", GPIOD_IN);
+ if (IS_ERR(sfd)) {
dev_err(&spi->dev, "sfd gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(sfd);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.sfd,
- GPIOF_IN, "sfd");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.reset)) {
+ reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset)) {
dev_err(&spi->dev, "reset gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(reset);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.reset,
- GPIOF_OUT_INIT_LOW, "reset");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.vreg)) {
+ vreg = devm_gpiod_get(&spi->dev, "vreg", GPIOD_OUT_LOW);
+ if (IS_ERR(vreg)) {
dev_err(&spi->dev, "vreg gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(vreg);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.vreg,
- GPIOF_OUT_INIT_LOW, "vreg");
- if (ret)
- goto err_hw_init;
-
- gpio_set_value(pdata.vreg, HIGH);
+ gpiod_set_value(vreg, HIGH);
usleep_range(100, 150);
- gpio_set_value(pdata.reset, HIGH);
+ gpiod_set_value(reset, HIGH);
usleep_range(200, 250);
ret = cc2520_hw_init(priv);
@@ -1180,7 +1118,7 @@ static int cc2520_probe(struct spi_device *spi)
/* Set up fifop interrupt */
ret = devm_request_irq(&spi->dev,
- gpio_to_irq(pdata.fifop),
+ gpiod_to_irq(fifop),
cc2520_fifop_isr,
IRQF_TRIGGER_RISING,
dev_name(&spi->dev),
@@ -1192,7 +1130,7 @@ static int cc2520_probe(struct spi_device *spi)
/* Set up sfd interrupt */
ret = devm_request_irq(&spi->dev,
- gpio_to_irq(pdata.sfd),
+ gpiod_to_irq(sfd),
cc2520_sfd_isr,
IRQF_TRIGGER_FALLING,
dev_name(&spi->dev),
@@ -1241,7 +1179,7 @@ MODULE_DEVICE_TABLE(of, cc2520_of_ids);
static struct spi_driver cc2520_driver = {
.driver = {
.name = "cc2520",
- .of_match_table = of_match_ptr(cc2520_of_ids),
+ .of_match_table = cc2520_of_ids,
},
.id_table = cc2520_ids,
.probe = cc2520_probe,
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 8cdcaaf58ae3..cba199422f47 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -4,15 +4,20 @@
IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11
+# Some IPA versions can reuse another set of GSI register definitions.
+GSI_IPA_VERSIONS := 3.1 3.5.1 4.0 4.5 4.9 4.11
+
obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
- ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
- ipa_gsi.o ipa_smp2p.o ipa_uc.o \
+ ipa_table.o ipa_interrupt.o gsi.o gsi_reg.o \
+ gsi_trans.o ipa_gsi.o ipa_smp2p.o ipa_uc.o \
ipa_endpoint.o ipa_cmd.o ipa_modem.o \
ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
ipa_sysfs.o
+ipa-y += $(GSI_IPA_VERSIONS:%=reg/gsi_reg-v%.o)
+
ipa-y += $(IPA_VERSIONS:%=reg/ipa_reg-v%.o)
ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o)
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index bea2da1c4c51..9a0b1fe4a93a 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include "gsi.h"
+#include "reg.h"
#include "gsi_reg.h"
#include "gsi_private.h"
#include "gsi_trans.h"
@@ -162,12 +163,6 @@ static void gsi_validate_build(void)
* ensure the elements themselves meet the requirement.
*/
BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
-
- /* The channel element size must fit in this field */
- BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
-
- /* The event ring element size must fit in this field */
- BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
}
/* Return the channel id associated with a given channel */
@@ -182,21 +177,39 @@ static bool gsi_channel_initialized(struct gsi_channel *channel)
return !!channel->gsi;
}
+/* Encode the channel protocol for the CH_C_CNTXT_0 register */
+static u32 ch_c_cntxt_0_type_encode(enum ipa_version version,
+ const struct reg *reg,
+ enum gsi_channel_type type)
+{
+ u32 val;
+
+ val = reg_encode(reg, CHTYPE_PROTOCOL, type);
+ if (version < IPA_VERSION_4_5 || version >= IPA_VERSION_5_0)
+ return val;
+
+ type >>= hweight32(reg_fmask(reg, CHTYPE_PROTOCOL));
+
+ return val | reg_encode(reg, CHTYPE_PROTOCOL_MSB, type);
+}
+
/* Update the GSI IRQ type register with the cached value */
static void gsi_irq_type_update(struct gsi *gsi, u32 val)
{
+ const struct reg *reg = gsi_reg(gsi, CNTXT_TYPE_IRQ_MSK);
+
gsi->type_enabled_bitmap = val;
- iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+ iowrite32(val, gsi->virt + reg_offset(reg));
}
static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | type_id);
}
static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~type_id);
}
/* Event ring commands are performed one at a time. Their completion
@@ -207,22 +220,29 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
{
u32 val = BIT(evt_ring_id);
+ const struct reg *reg;
/* There's a small chance that a previous command completed
* after the interrupt was disabled, so make sure we have no
* pending interrupts before we enable them.
*/
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
+ iowrite32(~0, gsi->virt + reg_offset(reg));
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
+ iowrite32(val, gsi->virt + reg_offset(reg));
gsi_irq_type_enable(gsi, GSI_EV_CTRL);
}
/* Disable event ring control interrupts */
static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
{
+ const struct reg *reg;
+
gsi_irq_type_disable(gsi, GSI_EV_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
}
/* Channel commands are performed one at a time. Their completion is
@@ -233,32 +253,43 @@ static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
{
u32 val = BIT(channel_id);
+ const struct reg *reg;
/* There's a small chance that a previous command completed
* after the interrupt was disabled, so make sure we have no
* pending interrupts before we enable them.
*/
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
+ iowrite32(~0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
+ iowrite32(val, gsi->virt + reg_offset(reg));
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_CH_CTRL);
}
/* Disable channel control interrupts */
static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
{
+ const struct reg *reg;
+
gsi_irq_type_disable(gsi, GSI_CH_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
}
static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
{
bool enable_ieob = !gsi->ieob_enabled_bitmap;
+ const struct reg *reg;
u32 val;
gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
val = gsi->ieob_enabled_bitmap;
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ iowrite32(val, gsi->virt + reg_offset(reg));
/* Enable the interrupt type if this is the first channel enabled */
if (enable_ieob)
@@ -267,6 +298,7 @@ static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
{
+ const struct reg *reg;
u32 val;
gsi->ieob_enabled_bitmap &= ~event_mask;
@@ -275,8 +307,9 @@ static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
if (!gsi->ieob_enabled_bitmap)
gsi_irq_type_disable(gsi, GSI_IEOB);
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
val = gsi->ieob_enabled_bitmap;
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ iowrite32(val, gsi->virt + reg_offset(reg));
}
static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
@@ -287,34 +320,44 @@ static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
/* Enable all GSI_interrupt types */
static void gsi_irq_enable(struct gsi *gsi)
{
+ const struct reg *reg;
u32 val;
/* Global interrupts include hardware error reports. Enable
* that so we can at least report the error should it occur.
*/
- iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
+
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE);
/* General GSI interrupts are reported to all EEs; if they occur
* they are unrecoverable (without reset). A breakpoint interrupt
* also exists, but we don't support that. We want to be notified
* of errors so we can report them, even if they can't be handled.
*/
- val = BIT(BUS_ERROR);
- val |= BIT(CMD_FIFO_OVRFLOW);
- val |= BIT(MCS_STACK_OVRFLOW);
- iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
+ val = BUS_ERROR;
+ val |= CMD_FIFO_OVRFLOW;
+ val |= MCS_STACK_OVRFLOW;
+ iowrite32(val, gsi->virt + reg_offset(reg));
+
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL);
}
/* Disable all GSI interrupt types */
static void gsi_irq_disable(struct gsi *gsi)
{
+ const struct reg *reg;
+
gsi_irq_type_update(gsi, 0);
/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
- iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
}
/* Return the virtual address associated with a ring index */
@@ -356,11 +399,12 @@ static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
static enum gsi_evt_ring_state
gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
{
+ const struct reg *reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
u32 val;
- val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+ val = ioread32(gsi->virt + reg_n_offset(reg, evt_ring_id));
- return u32_get_bits(val, EV_CHSTATE_FMASK);
+ return reg_decode(reg, EV_CHSTATE, val);
}
/* Issue an event ring command and wait for it to complete */
@@ -368,16 +412,18 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
enum gsi_evt_cmd_opcode opcode)
{
struct device *dev = gsi->dev;
+ const struct reg *reg;
bool timeout;
u32 val;
/* Enable the completion interrupt for the command */
gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
- val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
- val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
+ reg = gsi_reg(gsi, EV_CH_CMD);
+ val = reg_encode(reg, EV_CHID, evt_ring_id);
+ val |= reg_encode(reg, EV_OPCODE, opcode);
- timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
+ timeout = !gsi_command(gsi, reg_offset(reg), val);
gsi_irq_ev_ctrl_disable(gsi);
@@ -464,13 +510,16 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Fetch the current state of a channel from hardware */
static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
+ const struct reg *reg = gsi_reg(channel->gsi, CH_C_CNTXT_0);
u32 channel_id = gsi_channel_id(channel);
- void __iomem *virt = channel->gsi->virt;
+ struct gsi *gsi = channel->gsi;
+ void __iomem *virt = gsi->virt;
u32 val;
- val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+ reg = gsi_reg(gsi, CH_C_CNTXT_0);
+ val = ioread32(virt + reg_n_offset(reg, channel_id));
- return u32_get_bits(val, CHSTATE_FMASK);
+ return reg_decode(reg, CHSTATE, val);
}
/* Issue a channel command and wait for it to complete */
@@ -480,15 +529,18 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
+ const struct reg *reg;
bool timeout;
u32 val;
/* Enable the completion interrupt for the command */
gsi_irq_ch_ctrl_enable(gsi, channel_id);
- val = u32_encode_bits(channel_id, CH_CHID_FMASK);
- val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
+ reg = gsi_reg(gsi, CH_CMD);
+ val = reg_encode(reg, CH_CHID, channel_id);
+ val |= reg_encode(reg, CH_OPCODE, opcode);
+
+ timeout = !gsi_command(gsi, reg_offset(reg), val);
gsi_irq_ch_ctrl_disable(gsi);
@@ -651,6 +703,7 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
*/
static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
{
+ const struct reg *reg = gsi_reg(gsi, EV_CH_E_DOORBELL_0);
struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
u32 val;
@@ -658,7 +711,7 @@ static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
/* Note: index *must* be used modulo the ring count here */
val = gsi_ring_addr(ring, (index - 1) % ring->count);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
}
/* Program an event ring for use */
@@ -666,41 +719,56 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring;
- size_t size;
+ const struct reg *reg;
u32 val;
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
/* We program all event rings as GPI type/protocol */
- val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
- val |= EV_INTYPE_FMASK;
- val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+ val = reg_encode(reg, EV_CHTYPE, GSI_CHANNEL_TYPE_GPI);
+ /* EV_EE field is 0 (GSI_EE_AP) */
+ val |= reg_bit(reg, EV_INTYPE);
+ val |= reg_encode(reg, EV_ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
- size = ring->count * GSI_RING_ELEMENT_SIZE;
- val = ev_r_length_encoded(gsi->version, size);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_1);
+ val = reg_encode(reg, R_LENGTH, ring->count * GSI_RING_ELEMENT_SIZE);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the event ring,
* respectively.
*/
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_2);
val = lower_32_bits(ring->addr);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_3);
val = upper_32_bits(ring->addr);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
- val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
- val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_8);
+ val = reg_encode(reg, EV_MODT, GSI_EVT_RING_INT_MODT);
+ val |= reg_encode(reg, EV_MODC, 1); /* comes from channel */
+ /* EV_MOD_CNT is 0 (no counter-based interrupt coalescing) */
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ /* No MSI write data, and MSI high and low address is 0 */
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_9);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
- /* No MSI write data, and MSI address high and low address is 0 */
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_10);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_11);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* We don't need to get event read pointer updates */
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_12);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_13);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* Finally, tell the hardware our "last processed" event (arbitrary) */
gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
@@ -761,39 +829,52 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
union gsi_channel_scratch scr = { };
struct gsi_channel_scratch_gpi *gpi;
struct gsi *gsi = channel->gsi;
+ const struct reg *reg;
u32 wrr_weight = 0;
+ u32 offset;
u32 val;
+ reg = gsi_reg(gsi, CH_C_CNTXT_0);
+
/* We program all channels as GPI type/protocol */
- val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
+ val = ch_c_cntxt_0_type_encode(gsi->version, reg, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
- val |= CHTYPE_DIR_FMASK;
- val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
- val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
- iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
-
- val = r_length_encoded(gsi->version, size);
- iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
+ val |= reg_bit(reg, CHTYPE_DIR);
+ if (gsi->version < IPA_VERSION_5_0)
+ val |= reg_encode(reg, ERINDEX, channel->evt_ring_id);
+ val |= reg_encode(reg, ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_CNTXT_1);
+ val = reg_encode(reg, CH_R_LENGTH, size);
+ if (gsi->version >= IPA_VERSION_5_0)
+ val |= reg_encode(reg, CH_ERINDEX, channel->evt_ring_id);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the channel ring,
* respectively.
*/
+ reg = gsi_reg(gsi, CH_C_CNTXT_2);
val = lower_32_bits(channel->tre_ring.addr);
- iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_CNTXT_3);
val = upper_32_bits(channel->tre_ring.addr);
- iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_QOS);
/* Command channel gets low weighted round-robin priority */
if (channel->command)
- wrr_weight = field_max(WRR_WEIGHT_FMASK);
- val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
+ wrr_weight = reg_field_max(reg, WRR_WEIGHT);
+ val = reg_encode(reg, WRR_WEIGHT, wrr_weight);
/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
/* No need to use the doorbell engine starting at IPA v4.0 */
if (gsi->version < IPA_VERSION_4_0 && doorbell)
- val |= USE_DB_ENG_FMASK;
+ val |= reg_bit(reg, USE_DB_ENG);
/* v4.0 introduces an escape buffer for prefetch. We use it
* on all but the AP command channel.
@@ -801,16 +882,15 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
/* If not otherwise set, prefetch buffers are used */
if (gsi->version < IPA_VERSION_4_5)
- val |= USE_ESCAPE_BUF_ONLY_FMASK;
+ val |= reg_bit(reg, USE_ESCAPE_BUF_ONLY);
else
- val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
- PREFETCH_MODE_FMASK);
+ val |= reg_encode(reg, PREFETCH_MODE, ESCAPE_BUF_ONLY);
}
/* All channels set DB_IN_BYTES */
if (gsi->version >= IPA_VERSION_4_9)
- val |= DB_IN_BYTES;
+ val |= reg_bit(reg, DB_IN_BYTES);
- iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* Now update the scratch registers for GPI protocol */
gpi = &scr.gpi;
@@ -818,22 +898,27 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
GSI_RING_ELEMENT_SIZE;
gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
+ reg = gsi_reg(gsi, CH_C_SCRATCH_0);
val = scr.data.word1;
- iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+ reg = gsi_reg(gsi, CH_C_SCRATCH_1);
val = scr.data.word2;
- iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+ reg = gsi_reg(gsi, CH_C_SCRATCH_2);
val = scr.data.word3;
- iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* We must preserve the upper 16 bits of the last scratch register.
* The next sequence assumes those bits remain unchanged between the
* read and the write.
*/
- val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+ reg = gsi_reg(gsi, CH_C_SCRATCH_3);
+ offset = reg_n_offset(reg, channel_id);
+ val = ioread32(gsi->virt + offset);
val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
- iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + offset);
/* All done! */
}
@@ -1049,10 +1134,14 @@ static void gsi_trans_tx_completed(struct gsi_trans *trans)
/* Channel control interrupt handler */
static void gsi_isr_chan_ctrl(struct gsi *gsi)
{
+ const struct reg *reg;
u32 channel_mask;
- channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
- iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ);
+ channel_mask = ioread32(gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
+ iowrite32(channel_mask, gsi->virt + reg_offset(reg));
while (channel_mask) {
u32 channel_id = __ffs(channel_mask);
@@ -1066,10 +1155,14 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
/* Event ring control interrupt handler */
static void gsi_isr_evt_ctrl(struct gsi *gsi)
{
+ const struct reg *reg;
u32 event_mask;
- event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
- iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ);
+ event_mask = ioread32(gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
+ iowrite32(event_mask, gsi->virt + reg_offset(reg));
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
@@ -1117,21 +1210,29 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
/* Global error interrupt handler */
static void gsi_isr_glob_err(struct gsi *gsi)
{
+ const struct reg *log_reg;
+ const struct reg *clr_reg;
enum gsi_err_type type;
enum gsi_err_code code;
+ u32 offset;
u32 which;
u32 val;
u32 ee;
/* Get the logged error, then reinitialize the log */
- val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
- iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
- iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
+ log_reg = gsi_reg(gsi, ERROR_LOG);
+ offset = reg_offset(log_reg);
+ val = ioread32(gsi->virt + offset);
+ iowrite32(0, gsi->virt + offset);
+
+ clr_reg = gsi_reg(gsi, ERROR_LOG_CLR);
+ iowrite32(~0, gsi->virt + reg_offset(clr_reg));
- ee = u32_get_bits(val, ERR_EE_FMASK);
- type = u32_get_bits(val, ERR_TYPE_FMASK);
- which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
- code = u32_get_bits(val, ERR_CODE_FMASK);
+ /* Parse the error value */
+ ee = reg_decode(log_reg, ERR_EE, val);
+ type = reg_decode(log_reg, ERR_TYPE, val);
+ which = reg_decode(log_reg, ERR_VIRT_IDX, val);
+ code = reg_decode(log_reg, ERR_CODE, val);
if (type == GSI_ERR_TYPE_CHAN)
gsi_isr_glob_chan_err(gsi, ee, which, code);
@@ -1144,6 +1245,7 @@ static void gsi_isr_glob_err(struct gsi *gsi)
/* Generic EE interrupt handler */
static void gsi_isr_gp_int1(struct gsi *gsi)
{
+ const struct reg *reg;
u32 result;
u32 val;
@@ -1166,8 +1268,9 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
* In either case, we silently ignore a INCORRECT_CHANNEL_STATE
* error if we receive it.
*/
- val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
- result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
+ reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
+ val = ioread32(gsi->virt + reg_offset(reg));
+ result = reg_decode(reg, GENERIC_EE_RESULT, val);
switch (result) {
case GENERIC_EE_SUCCESS:
@@ -1191,19 +1294,22 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
/* Inter-EE interrupt handler */
static void gsi_isr_glob_ee(struct gsi *gsi)
{
+ const struct reg *reg;
u32 val;
- val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_STTS);
+ val = ioread32(gsi->virt + reg_offset(reg));
- if (val & BIT(ERROR_INT))
+ if (val & ERROR_INT)
gsi_isr_glob_err(gsi);
- iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_CLR);
+ iowrite32(val, gsi->virt + reg_offset(reg));
- val &= ~BIT(ERROR_INT);
+ val &= ~ERROR_INT;
- if (val & BIT(GP_INT1)) {
- val ^= BIT(GP_INT1);
+ if (val & GP_INT1) {
+ val ^= GP_INT1;
gsi_isr_gp_int1(gsi);
}
@@ -1214,11 +1320,16 @@ static void gsi_isr_glob_ee(struct gsi *gsi)
/* I/O completion interrupt event */
static void gsi_isr_ieob(struct gsi *gsi)
{
+ const struct reg *reg;
u32 event_mask;
- event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ);
+ event_mask = ioread32(gsi->virt + reg_offset(reg));
+
gsi_irq_ieob_disable(gsi, event_mask);
- iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_CLR);
+ iowrite32(event_mask, gsi->virt + reg_offset(reg));
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
@@ -1233,10 +1344,14 @@ static void gsi_isr_ieob(struct gsi *gsi)
static void gsi_isr_general(struct gsi *gsi)
{
struct device *dev = gsi->dev;
+ const struct reg *reg;
u32 val;
- val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
- iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_STTS);
+ val = ioread32(gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_CLR);
+ iowrite32(val, gsi->virt + reg_offset(reg));
dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
}
@@ -1252,31 +1367,39 @@ static void gsi_isr_general(struct gsi *gsi)
static irqreturn_t gsi_isr(int irq, void *dev_id)
{
struct gsi *gsi = dev_id;
+ const struct reg *reg;
u32 intr_mask;
u32 cnt = 0;
+ u32 offset;
+
+ reg = gsi_reg(gsi, CNTXT_TYPE_IRQ);
+ offset = reg_offset(reg);
/* enum gsi_irq_type_id defines GSI interrupt types */
- while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
+ while ((intr_mask = ioread32(gsi->virt + offset))) {
/* intr_mask contains bitmask of pending GSI interrupts */
do {
u32 gsi_intr = BIT(__ffs(intr_mask));
intr_mask ^= gsi_intr;
+ /* Note: the IRQ condition for each type is cleared
+ * when the type-specific register is updated.
+ */
switch (gsi_intr) {
- case BIT(GSI_CH_CTRL):
+ case GSI_CH_CTRL:
gsi_isr_chan_ctrl(gsi);
break;
- case BIT(GSI_EV_CTRL):
+ case GSI_EV_CTRL:
gsi_isr_evt_ctrl(gsi);
break;
- case BIT(GSI_GLOB_EE):
+ case GSI_GLOB_EE:
gsi_isr_glob_ee(gsi);
break;
- case BIT(GSI_IEOB):
+ case GSI_IEOB:
gsi_isr_ieob(gsi);
break;
- case BIT(GSI_GENERAL):
+ case GSI_GENERAL:
gsi_isr_general(gsi);
break;
default:
@@ -1467,11 +1590,13 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
struct gsi_ring *tre_ring = &channel->tre_ring;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
+ const struct reg *reg;
u32 val;
+ reg = gsi_reg(gsi, CH_C_DOORBELL_0);
/* Note: index *must* be used modulo the ring count here */
val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
- iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
}
/* Consult hardware, move newly completed transactions to completed state */
@@ -1482,6 +1607,7 @@ void gsi_channel_update(struct gsi_channel *channel)
struct gsi_evt_ring *evt_ring;
struct gsi_trans *trans;
struct gsi_ring *ring;
+ const struct reg *reg;
u32 offset;
u32 index;
@@ -1491,7 +1617,8 @@ void gsi_channel_update(struct gsi_channel *channel)
/* See if there's anything new to process; if not, we're done. Note
* that index always refers to an entry *within* the event ring.
*/
- offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_4);
+ offset = reg_n_offset(reg, evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
return;
@@ -1642,7 +1769,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode,
u8 params)
{
+ const struct reg *reg;
bool timeout;
+ u32 offset;
u32 val;
/* The error global interrupt type is always enabled (until we tear
@@ -1654,24 +1783,31 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
* channel), and only from this function. So we enable the GP_INT1
* IRQ type here, and disable it again after the command completes.
*/
- val = BIT(ERROR_INT) | BIT(GP_INT1);
- iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ val = ERROR_INT | GP_INT1;
+ iowrite32(val, gsi->virt + reg_offset(reg));
/* First zero the result code field */
- val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
- val &= ~GENERIC_EE_RESULT_FMASK;
- iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
+ offset = reg_offset(reg);
+ val = ioread32(gsi->virt + offset);
+
+ val &= ~reg_fmask(reg, GENERIC_EE_RESULT);
+ iowrite32(val, gsi->virt + offset);
/* Now issue the command */
- val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
- val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
- val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
+ reg = gsi_reg(gsi, GENERIC_CMD);
+ val = reg_encode(reg, GENERIC_OPCODE, opcode);
+ val |= reg_encode(reg, GENERIC_CHID, channel_id);
+ val |= reg_encode(reg, GENERIC_EE, GSI_EE_MODEM);
+ if (gsi->version >= IPA_VERSION_4_11)
+ val |= reg_encode(reg, GENERIC_PARAMS, params);
- timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
+ timeout = !gsi_command(gsi, reg_offset(reg), val);
/* Disable the GP_INT1 IRQ type again */
- iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
if (!timeout)
return gsi->result;
@@ -1828,32 +1964,40 @@ static void gsi_channel_teardown(struct gsi *gsi)
/* Turn off all GSI interrupts initially */
static int gsi_irq_setup(struct gsi *gsi)
{
+ const struct reg *reg;
int ret;
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
- iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_INTSET);
+ iowrite32(reg_bit(reg, INTYPE), gsi->virt + reg_offset(reg));
/* Disable all interrupt types */
gsi_irq_type_update(gsi, 0);
/* Clear all type-specific interrupt masks */
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
/* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
if (gsi->version > IPA_VERSION_3_1) {
- u32 offset;
+ reg = gsi_reg(gsi, INTER_EE_SRC_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
- /* These registers are in the non-adjusted address range */
- offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
- iowrite32(0, gsi->virt_raw + offset);
- offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
- iowrite32(0, gsi->virt_raw + offset);
+ reg = gsi_reg(gsi, INTER_EE_SRC_EV_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
}
- iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
if (ret)
@@ -1871,6 +2015,7 @@ static void gsi_irq_teardown(struct gsi *gsi)
static int gsi_ring_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
+ const struct reg *reg;
u32 count;
u32 val;
@@ -1882,9 +2027,10 @@ static int gsi_ring_setup(struct gsi *gsi)
return 0;
}
- val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
+ reg = gsi_reg(gsi, HW_PARAM_2);
+ val = ioread32(gsi->virt + reg_offset(reg));
- count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
+ count = reg_decode(reg, NUM_CH_PER_EE, val);
if (!count) {
dev_err(dev, "GSI reports zero channels supported\n");
return -EINVAL;
@@ -1896,7 +2042,12 @@ static int gsi_ring_setup(struct gsi *gsi)
}
gsi->channel_count = count;
- count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
+ if (gsi->version < IPA_VERSION_5_0) {
+ count = reg_decode(reg, NUM_EV_PER_EE, val);
+ } else {
+ reg = gsi_reg(gsi, HW_PARAM_4);
+ count = reg_decode(reg, EV_PER_EE, val);
+ }
if (!count) {
dev_err(dev, "GSI reports zero event rings supported\n");
return -EINVAL;
@@ -1915,12 +2066,14 @@ static int gsi_ring_setup(struct gsi *gsi)
/* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi)
{
+ const struct reg *reg;
u32 val;
int ret;
/* Here is where we first touch the GSI hardware */
- val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
- if (!(val & ENABLED_FMASK)) {
+ reg = gsi_reg(gsi, GSI_STATUS);
+ val = ioread32(gsi->virt + reg_offset(reg));
+ if (!(val & reg_bit(reg, ENABLED))) {
dev_err(gsi->dev, "GSI has not been enabled\n");
return -EIO;
}
@@ -1934,7 +2087,8 @@ int gsi_setup(struct gsi *gsi)
goto err_irq_teardown;
/* Initialize the error log */
- iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
+ reg = gsi_reg(gsi, ERROR_LOG);
+ iowrite32(0, gsi->virt + reg_offset(reg));
ret = gsi_channel_setup(gsi);
if (ret)
@@ -2205,67 +2359,37 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
enum ipa_version version, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
- struct device *dev = &pdev->dev;
- struct resource *res;
- resource_size_t size;
- u32 adjust;
int ret;
gsi_validate_build();
- gsi->dev = dev;
+ gsi->dev = &pdev->dev;
gsi->version = version;
/* GSI uses NAPI on all channels. Create a dummy network device
* for the channel NAPI contexts to be associated with.
*/
init_dummy_netdev(&gsi->dummy_dev);
-
- /* Get GSI memory range and map it */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
- if (!res) {
- dev_err(dev, "DT error getting \"gsi\" memory property\n");
- return -ENODEV;
- }
-
- size = resource_size(res);
- if (res->start > U32_MAX || size > U32_MAX - res->start) {
- dev_err(dev, "DT memory resource \"gsi\" out of range\n");
- return -EINVAL;
- }
-
- /* Make sure we can make our pointer adjustment if necessary */
- adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
- if (res->start < adjust) {
- dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
- adjust);
- return -EINVAL;
- }
-
- gsi->virt_raw = ioremap(res->start, size);
- if (!gsi->virt_raw) {
- dev_err(dev, "unable to remap \"gsi\" memory\n");
- return -ENOMEM;
- }
- /* Most registers are accessed using an adjusted register range */
- gsi->virt = gsi->virt_raw - adjust;
-
init_completion(&gsi->completion);
+ ret = gsi_reg_init(gsi, pdev);
+ if (ret)
+ return ret;
+
ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
if (ret)
- goto err_iounmap;
+ goto err_reg_exit;
ret = gsi_channel_init(gsi, count, data);
if (ret)
- goto err_iounmap;
+ goto err_reg_exit;
mutex_init(&gsi->mutex);
return 0;
-err_iounmap:
- iounmap(gsi->virt_raw);
+err_reg_exit:
+ gsi_reg_exit(gsi);
return ret;
}
@@ -2275,7 +2399,7 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
- iounmap(gsi->virt_raw);
+ gsi_reg_exit(gsi);
}
/* The maximum number of outstanding TREs on a channel. This limits
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 49dcadba4e0b..50bc80cb167c 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
@@ -140,8 +140,9 @@ struct gsi_evt_ring {
struct gsi {
struct device *dev; /* Same as IPA device */
enum ipa_version version;
- void __iomem *virt_raw; /* I/O mapped address range */
- void __iomem *virt; /* Adjusted for most registers */
+ void __iomem *virt; /* I/O mapped registers */
+ const struct regs *regs;
+
u32 irq;
u32 channel_count;
u32 evt_ring_count;
diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c
new file mode 100644
index 000000000000..1412b67304c8
--- /dev/null
+++ b/drivers/net/ipa/gsi_reg.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include "gsi.h"
+#include "reg.h"
+#include "gsi_reg.h"
+
+/* Is this register ID valid for the current GSI version? */
+static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
+{
+ switch (reg_id) {
+ case INTER_EE_SRC_CH_IRQ_MSK:
+ case INTER_EE_SRC_EV_CH_IRQ_MSK:
+ case CH_C_CNTXT_0:
+ case CH_C_CNTXT_1:
+ case CH_C_CNTXT_2:
+ case CH_C_CNTXT_3:
+ case CH_C_QOS:
+ case CH_C_SCRATCH_0:
+ case CH_C_SCRATCH_1:
+ case CH_C_SCRATCH_2:
+ case CH_C_SCRATCH_3:
+ case EV_CH_E_CNTXT_0:
+ case EV_CH_E_CNTXT_1:
+ case EV_CH_E_CNTXT_2:
+ case EV_CH_E_CNTXT_3:
+ case EV_CH_E_CNTXT_4:
+ case EV_CH_E_CNTXT_8:
+ case EV_CH_E_CNTXT_9:
+ case EV_CH_E_CNTXT_10:
+ case EV_CH_E_CNTXT_11:
+ case EV_CH_E_CNTXT_12:
+ case EV_CH_E_CNTXT_13:
+ case EV_CH_E_SCRATCH_0:
+ case EV_CH_E_SCRATCH_1:
+ case CH_C_DOORBELL_0:
+ case EV_CH_E_DOORBELL_0:
+ case GSI_STATUS:
+ case CH_CMD:
+ case EV_CH_CMD:
+ case GENERIC_CMD:
+ case HW_PARAM_2:
+ case CNTXT_TYPE_IRQ:
+ case CNTXT_TYPE_IRQ_MSK:
+ case CNTXT_SRC_CH_IRQ:
+ case CNTXT_SRC_CH_IRQ_MSK:
+ case CNTXT_SRC_CH_IRQ_CLR:
+ case CNTXT_SRC_EV_CH_IRQ:
+ case CNTXT_SRC_EV_CH_IRQ_MSK:
+ case CNTXT_SRC_EV_CH_IRQ_CLR:
+ case CNTXT_SRC_IEOB_IRQ:
+ case CNTXT_SRC_IEOB_IRQ_MSK:
+ case CNTXT_SRC_IEOB_IRQ_CLR:
+ case CNTXT_GLOB_IRQ_STTS:
+ case CNTXT_GLOB_IRQ_EN:
+ case CNTXT_GLOB_IRQ_CLR:
+ case CNTXT_GSI_IRQ_STTS:
+ case CNTXT_GSI_IRQ_EN:
+ case CNTXT_GSI_IRQ_CLR:
+ case CNTXT_INTSET:
+ case ERROR_LOG:
+ case ERROR_LOG_CLR:
+ case CNTXT_SCRATCH_0:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id)
+{
+ if (WARN(!gsi_reg_id_valid(gsi, reg_id), "invalid reg %u\n", reg_id))
+ return NULL;
+
+ return reg(gsi->regs, reg_id);
+}
+
+static const struct regs *gsi_regs(struct gsi *gsi)
+{
+ switch (gsi->version) {
+ case IPA_VERSION_3_1:
+ return &gsi_regs_v3_1;
+
+ case IPA_VERSION_3_5_1:
+ return &gsi_regs_v3_5_1;
+
+ case IPA_VERSION_4_2:
+ return &gsi_regs_v4_0;
+
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_7:
+ return &gsi_regs_v4_5;
+
+ case IPA_VERSION_4_9:
+ return &gsi_regs_v4_9;
+
+ case IPA_VERSION_4_11:
+ return &gsi_regs_v4_11;
+
+ default:
+ return NULL;
+ }
+}
+
+/* Sets gsi->virt and I/O maps the "gsi" memory range for registers */
+int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ resource_size_t size;
+
+ /* Get GSI memory range and map it */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
+ if (!res) {
+ dev_err(dev, "DT error getting \"gsi\" memory property\n");
+ return -ENODEV;
+ }
+
+ size = resource_size(res);
+ if (res->start > U32_MAX || size > U32_MAX - res->start) {
+ dev_err(dev, "DT memory resource \"gsi\" out of range\n");
+ return -EINVAL;
+ }
+
+ gsi->regs = gsi_regs(gsi);
+ if (!gsi->regs) {
+ dev_err(dev, "unsupported IPA version %u (?)\n", gsi->version);
+ return -EINVAL;
+ }
+
+ gsi->virt = ioremap(res->start, size);
+ if (!gsi->virt) {
+ dev_err(dev, "unable to remap \"gsi\" memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Inverse of gsi_reg_init() */
+void gsi_reg_exit(struct gsi *gsi)
+{
+ iounmap(gsi->virt);
+ gsi->virt = NULL;
+ gsi->regs = NULL;
+}
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 3763359f208f..f62f0a5c653d 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#ifndef _GSI_REG_H_
#define _GSI_REG_H_
-/* === Only "gsi.c" should include this file === */
+/* === Only "gsi.c" and "gsi_reg.c" should include this file === */
#include <linux/bits.h>
@@ -38,29 +38,75 @@
* (though the actual limit is hardware-dependent).
*/
-/* GSI EE registers as a group are shifted downward by a fixed constant amount
- * for IPA versions 4.5 and beyond. This applies to all GSI registers we use
- * *except* the ones that disable inter-EE interrupts for channels and event
- * channels.
- *
- * The "raw" (not adjusted) GSI register range is mapped, and a pointer to
- * the mapped range is held in gsi->virt_raw. The inter-EE interrupt
- * registers are accessed using that pointer.
- *
- * Most registers are accessed using gsi->virt, which is a copy of the "raw"
- * pointer, adjusted downward by the fixed amount.
- */
-#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
-
-/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
-
-#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
- (0x0000c020 + 0x1000 * GSI_EE_AP)
-
-#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
- (0x0000c024 + 0x1000 * GSI_EE_AP)
+/* enum gsi_reg_id - GSI register IDs */
+enum gsi_reg_id {
+ INTER_EE_SRC_CH_IRQ_MSK, /* IPA v3.5+ */
+ INTER_EE_SRC_EV_CH_IRQ_MSK, /* IPA v3.5+ */
+ CH_C_CNTXT_0,
+ CH_C_CNTXT_1,
+ CH_C_CNTXT_2,
+ CH_C_CNTXT_3,
+ CH_C_QOS,
+ CH_C_SCRATCH_0,
+ CH_C_SCRATCH_1,
+ CH_C_SCRATCH_2,
+ CH_C_SCRATCH_3,
+ EV_CH_E_CNTXT_0,
+ EV_CH_E_CNTXT_1,
+ EV_CH_E_CNTXT_2,
+ EV_CH_E_CNTXT_3,
+ EV_CH_E_CNTXT_4,
+ EV_CH_E_CNTXT_8,
+ EV_CH_E_CNTXT_9,
+ EV_CH_E_CNTXT_10,
+ EV_CH_E_CNTXT_11,
+ EV_CH_E_CNTXT_12,
+ EV_CH_E_CNTXT_13,
+ EV_CH_E_SCRATCH_0,
+ EV_CH_E_SCRATCH_1,
+ CH_C_DOORBELL_0,
+ EV_CH_E_DOORBELL_0,
+ GSI_STATUS,
+ CH_CMD,
+ EV_CH_CMD,
+ GENERIC_CMD,
+ HW_PARAM_2, /* IPA v3.5.1+ */
+ HW_PARAM_4, /* IPA v5.0+ */
+ CNTXT_TYPE_IRQ,
+ CNTXT_TYPE_IRQ_MSK,
+ CNTXT_SRC_CH_IRQ,
+ CNTXT_SRC_CH_IRQ_MSK,
+ CNTXT_SRC_CH_IRQ_CLR,
+ CNTXT_SRC_EV_CH_IRQ,
+ CNTXT_SRC_EV_CH_IRQ_MSK,
+ CNTXT_SRC_EV_CH_IRQ_CLR,
+ CNTXT_SRC_IEOB_IRQ,
+ CNTXT_SRC_IEOB_IRQ_MSK,
+ CNTXT_SRC_IEOB_IRQ_CLR,
+ CNTXT_GLOB_IRQ_STTS,
+ CNTXT_GLOB_IRQ_EN,
+ CNTXT_GLOB_IRQ_CLR,
+ CNTXT_GSI_IRQ_STTS,
+ CNTXT_GSI_IRQ_EN,
+ CNTXT_GSI_IRQ_CLR,
+ CNTXT_INTSET,
+ ERROR_LOG,
+ ERROR_LOG_CLR,
+ CNTXT_SCRATCH_0,
+ GSI_REG_ID_COUNT, /* Last; not an ID */
+};
-/* All other register offsets are relative to gsi->virt */
+/* CH_C_CNTXT_0 register */
+enum gsi_reg_ch_c_cntxt_0_field_id {
+ CHTYPE_PROTOCOL,
+ CHTYPE_DIR,
+ CH_EE,
+ CHID,
+ CHTYPE_PROTOCOL_MSB, /* IPA v4.5-4.11 */
+ ERINDEX, /* Not IPA v5.0+ */
+ CHSTATE,
+ ELEMENT_SIZE,
+};
/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */
enum gsi_channel_type {
@@ -76,155 +122,64 @@ enum gsi_channel_type {
GSI_CHANNEL_TYPE_11AD = 0x9,
};
-#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
- (0x0001c000 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
-#define CHTYPE_DIR_FMASK GENMASK(3, 3)
-#define EE_FMASK GENMASK(7, 4)
-#define CHID_FMASK GENMASK(12, 8)
-/* The next field is present for IPA v4.5 and above */
-#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
-#define ERINDEX_FMASK GENMASK(18, 14)
-#define CHSTATE_FMASK GENMASK(23, 20)
-#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
-
-/* Encoded value for CH_C_CNTXT_0 register channel protocol fields */
-static inline u32
-chtype_protocol_encoded(enum ipa_version version, enum gsi_channel_type type)
-{
- u32 val;
-
- val = u32_encode_bits(type, CHTYPE_PROTOCOL_FMASK);
- if (version < IPA_VERSION_4_5)
- return val;
-
- /* Encode upper bit(s) as well */
- type >>= hweight32(CHTYPE_PROTOCOL_FMASK);
- val |= u32_encode_bits(type, CHTYPE_PROTOCOL_MSB_FMASK);
-
- return val;
-}
-
-#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
- (0x0001c004 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-/* Encoded value for CH_C_CNTXT_1 register R_LENGTH field */
-static inline u32 r_length_encoded(enum ipa_version version, u32 length)
-{
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(length, GENMASK(15, 0));
- return u32_encode_bits(length, GENMASK(19, 0));
-}
-
-#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
- (0x0001c008 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
- (0x0001c00c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_QOS_OFFSET(ch) \
- (0x0001c05c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-#define WRR_WEIGHT_FMASK GENMASK(3, 0)
-#define MAX_PREFETCH_FMASK GENMASK(8, 8)
-#define USE_DB_ENG_FMASK GENMASK(9, 9)
-/* The next field is only present for IPA v4.0, v4.1, and v4.2 */
-#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
-/* The next two fields are present for IPA v4.5 and above */
-#define PREFETCH_MODE_FMASK GENMASK(13, 10)
-#define EMPTY_LVL_THRSHOLD_FMASK GENMASK(23, 16)
-/* The next field is present for IPA v4.9 and above */
-#define DB_IN_BYTES GENMASK(24, 24)
+/* CH_C_CNTXT_1 register */
+enum gsi_reg_ch_c_cntxt_1_field_id {
+ CH_R_LENGTH,
+ CH_ERINDEX, /* IPA v5.0+ */
+};
+
+/* CH_C_QOS register */
+enum gsi_reg_ch_c_qos_field_id {
+ WRR_WEIGHT,
+ MAX_PREFETCH,
+ USE_DB_ENG,
+ USE_ESCAPE_BUF_ONLY, /* IPA v4.0-4.2 */
+ PREFETCH_MODE, /* IPA v4.5+ */
+ EMPTY_LVL_THRSHOLD, /* IPA v4.5+ */
+ DB_IN_BYTES, /* IPA v4.9+ */
+ LOW_LATENCY_EN, /* IPA v5.0+ */
+};
/** enum gsi_prefetch_mode - PREFETCH_MODE field in CH_C_QOS */
enum gsi_prefetch_mode {
- GSI_USE_PREFETCH_BUFS = 0x0,
- GSI_ESCAPE_BUF_ONLY = 0x1,
- GSI_SMART_PREFETCH = 0x2,
- GSI_FREE_PREFETCH = 0x3,
+ USE_PREFETCH_BUFS = 0,
+ ESCAPE_BUF_ONLY = 1,
+ SMART_PREFETCH = 2,
+ FREE_PREFETCH = 3,
};
-#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
- (0x0001c060 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
- (0x0001c064 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
- (0x0001c068 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
- (0x0001c06c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
- (0x0001d000 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
-#define EV_CHTYPE_FMASK GENMASK(3, 0)
-#define EV_EE_FMASK GENMASK(7, 4)
-#define EV_EVCHID_FMASK GENMASK(15, 8)
-#define EV_INTYPE_FMASK GENMASK(16, 16)
-#define EV_CHSTATE_FMASK GENMASK(23, 20)
-#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
-
-#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
- (0x0001d004 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-/* Encoded value for EV_CH_C_CNTXT_1 register EV_R_LENGTH field */
-static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
-{
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(length, GENMASK(15, 0));
- return u32_encode_bits(length, GENMASK(19, 0));
-}
-
-#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
- (0x0001d008 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
- (0x0001d00c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
- (0x0001d010 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
- (0x0001d020 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-#define MODT_FMASK GENMASK(15, 0)
-#define MODC_FMASK GENMASK(23, 16)
-#define MOD_CNT_FMASK GENMASK(31, 24)
-
-#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \
- (0x0001d024 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
- (0x0001d028 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
- (0x0001d02c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
- (0x0001d030 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
- (0x0001d034 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
- (0x0001d048 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
- (0x0001d04c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
+/* EV_CH_E_CNTXT_0 register */
+enum gsi_reg_ch_c_ev_ch_e_cntxt_0_field_id {
+ EV_CHTYPE, /* enum gsi_channel_type */
+ EV_EE, /* enum gsi_ee_id; always GSI_EE_AP for us */
+ EV_EVCHID,
+ EV_INTYPE,
+ EV_CHSTATE,
+ EV_ELEMENT_SIZE,
+};
-#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
- (0x0001e000 + 0x4000 * GSI_EE_AP + 0x08 * (ch))
+/* EV_CH_E_CNTXT_1 register */
+enum gsi_reg_ev_ch_c_cntxt_1_field_id {
+ R_LENGTH,
+};
-#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
- (0x0001e100 + 0x4000 * GSI_EE_AP + 0x08 * (ev))
+/* EV_CH_E_CNTXT_8 register */
+enum gsi_reg_ch_c_ev_ch_e_cntxt_8_field_id {
+ EV_MODT,
+ EV_MODC,
+ EV_MOD_CNT,
+};
-#define GSI_GSI_STATUS_OFFSET \
- (0x0001f000 + 0x4000 * GSI_EE_AP)
-#define ENABLED_FMASK GENMASK(0, 0)
+/* GSI_STATUS register */
+enum gsi_reg_gsi_status_field_id {
+ ENABLED,
+};
-#define GSI_CH_CMD_OFFSET \
- (0x0001f008 + 0x4000 * GSI_EE_AP)
-#define CH_CHID_FMASK GENMASK(7, 0)
-#define CH_OPCODE_FMASK GENMASK(31, 24)
+/* CH_CMD register */
+enum gsi_reg_gsi_ch_cmd_field_id {
+ CH_CHID,
+ CH_OPCODE,
+};
/** enum gsi_ch_cmd_opcode - CH_OPCODE field values in CH_CMD */
enum gsi_ch_cmd_opcode {
@@ -236,10 +191,11 @@ enum gsi_ch_cmd_opcode {
GSI_CH_DB_STOP = 0xb,
};
-#define GSI_EV_CH_CMD_OFFSET \
- (0x0001f010 + 0x4000 * GSI_EE_AP)
-#define EV_CHID_FMASK GENMASK(7, 0)
-#define EV_OPCODE_FMASK GENMASK(31, 24)
+/* EV_CH_CMD register */
+enum gsi_ev_ch_cmd_field_id {
+ EV_CHID,
+ EV_OPCODE,
+};
/** enum gsi_evt_cmd_opcode - EV_OPCODE field values in EV_CH_CMD */
enum gsi_evt_cmd_opcode {
@@ -248,12 +204,13 @@ enum gsi_evt_cmd_opcode {
GSI_EVT_DE_ALLOC = 0xa,
};
-#define GSI_GENERIC_CMD_OFFSET \
- (0x0001f018 + 0x4000 * GSI_EE_AP)
-#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
-#define GENERIC_CHID_FMASK GENMASK(9, 5)
-#define GENERIC_EE_FMASK GENMASK(13, 10)
-#define GENERIC_PARAMS_FMASK GENMASK(31, 24) /* IPA v4.11+ */
+/* GENERIC_CMD register */
+enum gsi_generic_cmd_field_id {
+ GENERIC_OPCODE,
+ GENERIC_CHID,
+ GENERIC_EE,
+ GENERIC_PARAMS, /* IPA v4.11+ */
+};
/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
enum gsi_generic_cmd_opcode {
@@ -264,22 +221,20 @@ enum gsi_generic_cmd_opcode {
GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */
};
-/* The next register is present for IPA v3.5.1 and above */
-#define GSI_GSI_HW_PARAM_2_OFFSET \
- (0x0001f040 + 0x4000 * GSI_EE_AP)
-#define IRAM_SIZE_FMASK GENMASK(2, 0)
-#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
-#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
-#define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13)
-#define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14)
-/* Fields below are present for IPA v4.0 and above */
-#define GSI_USE_SDMA_FMASK GENMASK(15, 15)
-#define GSI_SDMA_N_INT_FMASK GENMASK(18, 16)
-#define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19)
-#define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27)
-/* Fields below are present for IPA v4.2 and above */
-#define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30)
-#define GSI_USE_INTER_EE_FMASK GENMASK(31, 31)
+/* HW_PARAM_2 register */ /* IPA v3.5.1+ */
+enum gsi_hw_param_2_field_id {
+ IRAM_SIZE,
+ NUM_CH_PER_EE,
+ NUM_EV_PER_EE, /* Not IPA v5.0+ */
+ GSI_CH_PEND_TRANSLATE,
+ GSI_CH_FULL_LOGIC,
+ GSI_USE_SDMA, /* IPA v4.0+ */
+ GSI_SDMA_N_INT, /* IPA v4.0+ */
+ GSI_SDMA_MAX_BURST, /* IPA v4.0+ */
+ GSI_SDMA_N_IOVEC, /* IPA v4.0+ */
+ GSI_USE_RD_WR_ENG, /* IPA v4.2+ */
+ GSI_USE_INTER_EE, /* IPA v4.2+ */
+};
/** enum gsi_iram_size - IRAM_SIZE field values in HW_PARAM_2 */
enum gsi_iram_size {
@@ -293,93 +248,66 @@ enum gsi_iram_size {
IRAM_SIZE_FOUR_KB = 0x5,
};
-/* IRQ condition for each type is cleared by writing type-specific register */
-#define GSI_CNTXT_TYPE_IRQ_OFFSET \
- (0x0001f080 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
- (0x0001f088 + 0x4000 * GSI_EE_AP)
+/* HW_PARAM_4 register */ /* IPA v5.0+ */
+enum gsi_hw_param_4_field_id {
+ EV_PER_EE,
+ IRAM_PROTOCOL_COUNT,
+};
-/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
+/**
+ * enum gsi_irq_type_id: GSI IRQ types
+ * @GSI_CH_CTRL: Channel allocation, deallocation, etc.
+ * @GSI_EV_CTRL: Event ring allocation, deallocation, etc.
+ * @GSI_GLOB_EE: Global/general event
+ * @GSI_IEOB: Transfer (TRE) completion
+ * @GSI_INTER_EE_CH_CTRL: Remote-issued stop/reset (unused)
+ * @GSI_INTER_EE_EV_CTRL: Remote-issued event reset (unused)
+ * @GSI_GENERAL: General hardware event (bus error, etc.)
+ */
enum gsi_irq_type_id {
- GSI_CH_CTRL = 0x0, /* channel allocation, etc. */
- GSI_EV_CTRL = 0x1, /* event ring allocation, etc. */
- GSI_GLOB_EE = 0x2, /* global/general event */
- GSI_IEOB = 0x3, /* TRE completion */
- GSI_INTER_EE_CH_CTRL = 0x4, /* remote-issued stop/reset (unused) */
- GSI_INTER_EE_EV_CTRL = 0x5, /* remote-issued event reset (unused) */
- GSI_GENERAL = 0x6, /* general-purpose event */
+ GSI_CH_CTRL = BIT(0),
+ GSI_EV_CTRL = BIT(1),
+ GSI_GLOB_EE = BIT(2),
+ GSI_IEOB = BIT(3),
+ GSI_INTER_EE_CH_CTRL = BIT(4),
+ GSI_INTER_EE_EV_CTRL = BIT(5),
+ GSI_GENERAL = BIT(6),
+ /* IRQ types 7-31 (and their bit values) are reserved */
};
-#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
- (0x0001f090 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
- (0x0001f094 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
- (0x0001f098 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
- (0x0001f09c + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
- (0x0001f0a0 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
- (0x0001f0a4 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
- (0x0001f0b0 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
- (0x0001f0b8 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
- (0x0001f0c0 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
- (0x0001f100 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
- (0x0001f108 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
- (0x0001f110 + 0x4000 * GSI_EE_AP)
-/* Values here are bit positions in the GLOB_IRQ_* registers */
+/** enum gsi_global_irq_id: Global GSI interrupt events */
enum gsi_global_irq_id {
- ERROR_INT = 0x0,
- GP_INT1 = 0x1,
- GP_INT2 = 0x2,
- GP_INT3 = 0x3,
+ ERROR_INT = BIT(0),
+ GP_INT1 = BIT(1),
+ GP_INT2 = BIT(2),
+ GP_INT3 = BIT(3),
+ /* Global IRQ types 4-31 (and their bit values) are reserved */
};
-#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
- (0x0001f118 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
- (0x0001f120 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
- (0x0001f128 + 0x4000 * GSI_EE_AP)
-/* Values here are bit positions in the (general) GSI_IRQ_* registers */
-enum gsi_general_id {
- BREAK_POINT = 0x0,
- BUS_ERROR = 0x1,
- CMD_FIFO_OVRFLOW = 0x2,
- MCS_STACK_OVRFLOW = 0x3,
+/** enum gsi_general_irq_id: GSI general IRQ conditions */
+enum gsi_general_irq_id {
+ BREAK_POINT = BIT(0),
+ BUS_ERROR = BIT(1),
+ CMD_FIFO_OVRFLOW = BIT(2),
+ MCS_STACK_OVRFLOW = BIT(3),
+ /* General IRQ types 4-31 (and their bit values) are reserved */
};
-#define GSI_CNTXT_INTSET_OFFSET \
- (0x0001f180 + 0x4000 * GSI_EE_AP)
-#define INTYPE_FMASK GENMASK(0, 0)
-
-#define GSI_ERROR_LOG_OFFSET \
- (0x0001f200 + 0x4000 * GSI_EE_AP)
+/* CNTXT_INTSET register */
+enum gsi_cntxt_intset_field_id {
+ INTYPE,
+};
-/* Fields below are present for IPA v3.5.1 and above */
-#define ERR_ARG3_FMASK GENMASK(3, 0)
-#define ERR_ARG2_FMASK GENMASK(7, 4)
-#define ERR_ARG1_FMASK GENMASK(11, 8)
-#define ERR_CODE_FMASK GENMASK(15, 12)
-#define ERR_VIRT_IDX_FMASK GENMASK(23, 19)
-#define ERR_TYPE_FMASK GENMASK(27, 24)
-#define ERR_EE_FMASK GENMASK(31, 28)
+/* ERROR_LOG register */
+enum gsi_error_log_field_id {
+ ERR_ARG3,
+ ERR_ARG2,
+ ERR_ARG1,
+ ERR_CODE,
+ ERR_VIRT_IDX,
+ ERR_TYPE,
+ ERR_EE,
+};
/** enum gsi_err_code - ERR_CODE field values in EE_ERR_LOG */
enum gsi_err_code {
@@ -400,13 +328,11 @@ enum gsi_err_type {
GSI_ERR_TYPE_EVT = 0x3,
};
-#define GSI_ERROR_LOG_CLR_OFFSET \
- (0x0001f210 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SCRATCH_0_OFFSET \
- (0x0001f400 + 0x4000 * GSI_EE_AP)
-#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
-#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
+/* CNTXT_SCRATCH_0 register */
+enum gsi_cntxt_scratch_0_field_id {
+ INTER_EE_RESULT,
+ GENERIC_EE_RESULT,
+};
/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
enum gsi_generic_ee_result {
@@ -419,4 +345,34 @@ enum gsi_generic_ee_result {
GENERIC_EE_NO_RESOURCES = 0x7,
};
+extern const struct regs gsi_regs_v3_1;
+extern const struct regs gsi_regs_v3_5_1;
+extern const struct regs gsi_regs_v4_0;
+extern const struct regs gsi_regs_v4_5;
+extern const struct regs gsi_regs_v4_9;
+extern const struct regs gsi_regs_v4_11;
+
+/**
+ * gsi_reg() - Return the structure describing a GSI register
+ * @gsi: GSI pointer
+ * @reg_id: GSI register ID
+ */
+const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id);
+
+/**
+ * gsi_reg_init() - Perform GSI register initialization
+ * @gsi: GSI pointer
+ * @pdev: GSI (IPA) platform device
+ *
+ * Initialize GSI registers, including looking up and I/O mapping
+ * the "gsi" memory space.
+ */
+int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev);
+
+/**
+ * gsi_reg_exit() - Inverse of gsi_reg_init()
+ * @gsi: GSI pointer
+ */
+void gsi_reg_exit(struct gsi *gsi);
+
#endif /* _GSI_REG_H_ */
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 5372db58b5bd..f3355e040a9e 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -45,7 +45,6 @@ struct ipa_interrupt;
* @interrupt: IPA Interrupt information
* @uc_powered: true if power is active by proxy for microcontroller
* @uc_loaded: true after microcontroller has reported it's ready
- * @reg_addr: DMA address used for IPA register access
* @reg_virt: Virtual address used for IPA register access
* @regs: IPA register definitions
* @mem_addr: DMA address of IPA-local memory space
@@ -97,9 +96,8 @@ struct ipa {
bool uc_powered;
bool uc_loaded;
- dma_addr_t reg_addr;
void __iomem *reg_virt;
- const struct ipa_regs *regs;
+ const struct regs *regs;
dma_addr_t mem_addr;
void *mem_virt;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index bb3dfa9a2bc8..f1419fbd776c 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -94,11 +94,11 @@ struct ipa_cmd_register_write {
/* IPA_CMD_IP_PACKET_INIT */
struct ipa_cmd_ip_packet_init {
- u8 dest_endpoint;
+ u8 dest_endpoint; /* Full 8 bits used for IPA v5.0+ */
u8 reserved[7];
};
-/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
+/* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */
#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
/* IPA_CMD_DMA_SHARED_MEM */
@@ -157,9 +157,14 @@ static void ipa_cmd_validate_build(void)
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
- /* Valid endpoint numbers must fit in the IP packet init command */
- BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
- IPA_ENDPOINT_MAX - 1);
+ /* Prior to IPA v5.0, we supported no more than 32 endpoints,
+ * and this was reflected in some 5-bit fields that held
+ * endpoint numbers. Starting with IPA v5.0, the widths of
+ * these fields were extended to 8 bits, meaning up to 256
+ * endpoints. If the driver claims to support more than
+ * that it's an error.
+ */
+ BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX);
}
/* Validate a memory region holding a table */
@@ -282,7 +287,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
/* Check whether offsets passed to register_write are valid */
static bool ipa_cmd_register_write_valid(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
const char *name;
u32 offset;
@@ -290,8 +295,12 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
- reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
- offset = ipa_reg_offset(reg);
+ if (ipa->version < IPA_VERSION_5_0)
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ else
+ reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
+
+ offset = reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -305,7 +314,7 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* fits in the register write command field(s) that must hold it.
*/
reg = ipa_reg(ipa, ENDP_STATUS);
- offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
+ offset = reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -486,8 +495,13 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_init;
- payload->dest_endpoint = u8_encode_bits(endpoint_id,
- IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+ if (ipa->version < IPA_VERSION_5_0) {
+ payload->dest_endpoint =
+ u8_encode_bits(endpoint_id,
+ IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+ } else {
+ payload->dest_endpoint = endpoint_id;
+ }
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 136932464261..2ee80ed140b7 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -34,41 +34,181 @@
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
-/** enum ipa_status_opcode - status element opcode hardware values */
-enum ipa_status_opcode {
- IPA_STATUS_OPCODE_PACKET = 0x01,
- IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
- IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
- IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
+/** enum ipa_status_opcode - IPA status opcode field hardware values */
+enum ipa_status_opcode { /* *Not* a bitmask */
+ IPA_STATUS_OPCODE_PACKET = 1,
+ IPA_STATUS_OPCODE_NEW_RULE_PACKET = 2,
+ IPA_STATUS_OPCODE_DROPPED_PACKET = 4,
+ IPA_STATUS_OPCODE_SUSPENDED_PACKET = 8,
+ IPA_STATUS_OPCODE_LOG = 16,
+ IPA_STATUS_OPCODE_DCMP = 32,
+ IPA_STATUS_OPCODE_PACKET_2ND_PASS = 64,
};
-/** enum ipa_status_exception - status element exception type */
-enum ipa_status_exception {
+/** enum ipa_status_exception - IPA status exception field hardware values */
+enum ipa_status_exception { /* *Not* a bitmask */
/* 0 means no exception */
- IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
+ IPA_STATUS_EXCEPTION_DEAGGR = 1,
+ IPA_STATUS_EXCEPTION_IPTYPE = 4,
+ IPA_STATUS_EXCEPTION_PACKET_LENGTH = 8,
+ IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 16,
+ IPA_STATUS_EXCEPTION_SW_FILTER = 32,
+ IPA_STATUS_EXCEPTION_NAT = 64, /* IPv4 */
+ IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK = 64, /* IPv6 */
+ IPA_STATUS_EXCEPTION_UC = 128,
+ IPA_STATUS_EXCEPTION_INVALID_ENDPOINT = 129,
+ IPA_STATUS_EXCEPTION_HEADER_INSERT = 136,
+ IPA_STATUS_EXCEPTION_CHEKCSUM = 229,
};
-/* Status element provided by hardware */
-struct ipa_status {
- u8 opcode; /* enum ipa_status_opcode */
- u8 exception; /* enum ipa_status_exception */
- __le16 mask;
- __le16 pkt_len;
- u8 endp_src_idx;
- u8 endp_dst_idx;
- __le32 metadata;
- __le32 flags1;
- __le64 flags2;
- __le32 flags3;
- __le32 flags4;
+/** enum ipa_status_mask - IPA status mask field bitmask hardware values */
+enum ipa_status_mask {
+ IPA_STATUS_MASK_FRAG_PROCESS = BIT(0),
+ IPA_STATUS_MASK_FILT_PROCESS = BIT(1),
+ IPA_STATUS_MASK_NAT_PROCESS = BIT(2),
+ IPA_STATUS_MASK_ROUTE_PROCESS = BIT(3),
+ IPA_STATUS_MASK_TAG_VALID = BIT(4),
+ IPA_STATUS_MASK_FRAGMENT = BIT(5),
+ IPA_STATUS_MASK_FIRST_FRAGMENT = BIT(6),
+ IPA_STATUS_MASK_V4 = BIT(7),
+ IPA_STATUS_MASK_CKSUM_PROCESS = BIT(8),
+ IPA_STATUS_MASK_AGGR_PROCESS = BIT(9),
+ IPA_STATUS_MASK_DEST_EOT = BIT(10),
+ IPA_STATUS_MASK_DEAGGR_PROCESS = BIT(11),
+ IPA_STATUS_MASK_DEAGG_FIRST = BIT(12),
+ IPA_STATUS_MASK_SRC_EOT = BIT(13),
+ IPA_STATUS_MASK_PREV_EOT = BIT(14),
+ IPA_STATUS_MASK_BYTE_LIMIT = BIT(15),
};
-/* Field masks for struct ipa_status structure fields */
-#define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
-#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
-#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
-#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
-#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
+/* Special IPA filter/router rule field value indicating "rule miss" */
+#define IPA_STATUS_RULE_MISS 0x3ff /* 10-bit filter/router rule fields */
+
+/** The IPA status nat_type field uses enum ipa_nat_type hardware values */
+
+/* enum ipa_status_field_id - IPA packet status structure field identifiers */
+enum ipa_status_field_id {
+ STATUS_OPCODE, /* enum ipa_status_opcode */
+ STATUS_EXCEPTION, /* enum ipa_status_exception */
+ STATUS_MASK, /* enum ipa_status_mask (bitmask) */
+ STATUS_LENGTH,
+ STATUS_SRC_ENDPOINT,
+ STATUS_DST_ENDPOINT,
+ STATUS_METADATA,
+ STATUS_FILTER_LOCAL, /* Boolean */
+ STATUS_FILTER_HASH, /* Boolean */
+ STATUS_FILTER_GLOBAL, /* Boolean */
+ STATUS_FILTER_RETAIN, /* Boolean */
+ STATUS_FILTER_RULE_INDEX,
+ STATUS_ROUTER_LOCAL, /* Boolean */
+ STATUS_ROUTER_HASH, /* Boolean */
+ STATUS_UCP, /* Boolean */
+ STATUS_ROUTER_TABLE,
+ STATUS_ROUTER_RULE_INDEX,
+ STATUS_NAT_HIT, /* Boolean */
+ STATUS_NAT_INDEX,
+ STATUS_NAT_TYPE, /* enum ipa_nat_type */
+ STATUS_TAG_LOW32, /* Low-order 32 bits of 48-bit tag */
+ STATUS_TAG_HIGH16, /* High-order 16 bits of 48-bit tag */
+ STATUS_SEQUENCE,
+ STATUS_TIME_OF_DAY,
+ STATUS_HEADER_LOCAL, /* Boolean */
+ STATUS_HEADER_OFFSET,
+ STATUS_FRAG_HIT, /* Boolean */
+ STATUS_FRAG_RULE_INDEX,
+};
+
+/* Size in bytes of an IPA packet status structure */
+#define IPA_STATUS_SIZE sizeof(__le32[4])
+
+/* IPA status structure decoder; looks up field values for a structure */
+static u32 ipa_status_extract(struct ipa *ipa, const void *data,
+ enum ipa_status_field_id field)
+{
+ enum ipa_version version = ipa->version;
+ const __le32 *word = data;
+
+ switch (field) {
+ case STATUS_OPCODE:
+ return le32_get_bits(word[0], GENMASK(7, 0));
+ case STATUS_EXCEPTION:
+ return le32_get_bits(word[0], GENMASK(15, 8));
+ case STATUS_MASK:
+ return le32_get_bits(word[0], GENMASK(31, 16));
+ case STATUS_LENGTH:
+ return le32_get_bits(word[1], GENMASK(15, 0));
+ case STATUS_SRC_ENDPOINT:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[1], GENMASK(20, 16));
+ return le32_get_bits(word[1], GENMASK(23, 16));
+ /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
+ /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
+ case STATUS_DST_ENDPOINT:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[1], GENMASK(28, 24));
+ return le32_get_bits(word[7], GENMASK(23, 16));
+ /* Status word 1, bits 29-31 are reserved */
+ case STATUS_METADATA:
+ return le32_to_cpu(word[2]);
+ case STATUS_FILTER_LOCAL:
+ return le32_get_bits(word[3], GENMASK(0, 0));
+ case STATUS_FILTER_HASH:
+ return le32_get_bits(word[3], GENMASK(1, 1));
+ case STATUS_FILTER_GLOBAL:
+ return le32_get_bits(word[3], GENMASK(2, 2));
+ case STATUS_FILTER_RETAIN:
+ return le32_get_bits(word[3], GENMASK(3, 3));
+ case STATUS_FILTER_RULE_INDEX:
+ return le32_get_bits(word[3], GENMASK(13, 4));
+ /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
+ case STATUS_ROUTER_LOCAL:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(14, 14));
+ return le32_get_bits(word[1], GENMASK(27, 27));
+ case STATUS_ROUTER_HASH:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(15, 15));
+ return le32_get_bits(word[1], GENMASK(28, 28));
+ case STATUS_UCP:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(16, 16));
+ return le32_get_bits(word[7], GENMASK(31, 31));
+ case STATUS_ROUTER_TABLE:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(21, 17));
+ return le32_get_bits(word[3], GENMASK(21, 14));
+ case STATUS_ROUTER_RULE_INDEX:
+ return le32_get_bits(word[3], GENMASK(31, 22));
+ case STATUS_NAT_HIT:
+ return le32_get_bits(word[4], GENMASK(0, 0));
+ case STATUS_NAT_INDEX:
+ return le32_get_bits(word[4], GENMASK(13, 1));
+ case STATUS_NAT_TYPE:
+ return le32_get_bits(word[4], GENMASK(15, 14));
+ case STATUS_TAG_LOW32:
+ return le32_get_bits(word[4], GENMASK(31, 16)) |
+ (le32_get_bits(word[5], GENMASK(15, 0)) << 16);
+ case STATUS_TAG_HIGH16:
+ return le32_get_bits(word[5], GENMASK(31, 16));
+ case STATUS_SEQUENCE:
+ return le32_get_bits(word[6], GENMASK(7, 0));
+ case STATUS_TIME_OF_DAY:
+ return le32_get_bits(word[6], GENMASK(31, 8));
+ case STATUS_HEADER_LOCAL:
+ return le32_get_bits(word[7], GENMASK(0, 0));
+ case STATUS_HEADER_OFFSET:
+ return le32_get_bits(word[7], GENMASK(10, 1));
+ case STATUS_FRAG_HIT:
+ return le32_get_bits(word[7], GENMASK(11, 11));
+ case STATUS_FRAG_RULE_INDEX:
+ return le32_get_bits(word[7], GENMASK(15, 12));
+ /* Status word 7, bits 16-30 are reserved */
+ /* Status word 7, bit 31 is reserved (not IPA v5.0+) */
+ default:
+ WARN(true, "%s: bad field_id %u\n", __func__, field);
+ return 0;
+ }
+}
/* Compute the aggregation size value to use for a given buffer size */
static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
@@ -101,7 +241,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (!data->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 buffer_size;
u32 aggr_size;
u32 limit;
@@ -164,7 +304,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
rx_config->aggr_hard_limit);
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
- limit = ipa_reg_field_max(reg, BYTE_LIMIT);
+ limit = reg_field_max(reg, BYTE_LIMIT);
if (aggr_size > limit) {
dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
data->endpoint_id, aggr_size, limit);
@@ -307,7 +447,7 @@ static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 field_id;
u32 offset;
bool state;
@@ -320,11 +460,11 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
WARN_ON(ipa->version >= IPA_VERSION_4_0);
reg = ipa_reg(ipa, ENDP_INIT_CTRL);
- offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
- mask = ipa_reg_bit(reg, field_id);
+ mask = reg_bit(reg, field_id);
state = !!(val & mask);
@@ -353,13 +493,13 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
- val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
return !!(val & BIT(endpoint_id % 32));
}
@@ -370,12 +510,12 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
u32 mask = BIT(endpoint_id % 32);
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
- iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
}
/**
@@ -473,7 +613,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
/* We only reset modem TX endpoints */
@@ -482,7 +622,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
continue;
reg = ipa_reg(ipa, ENDP_STATUS);
- offset = ipa_reg_n_offset(reg, endpoint_id);
+ offset = reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
@@ -505,7 +645,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_CFG);
@@ -518,7 +658,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
/* Checksum header offset is in 4-byte units */
off = sizeof(struct rmnet_map_header) / sizeof(u32);
- val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
+ val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
@@ -531,26 +671,26 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
- val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
+ val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
reg = ipa_reg(ipa, ENDP_INIT_NAT);
- val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
+ val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static u32
@@ -576,13 +716,13 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
static u32 ipa_header_size_encode(enum ipa_version version,
- const struct ipa_reg *reg, u32 header_size)
+ const struct reg *reg, u32 header_size)
{
- u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
+ u32 field_max = reg_field_max(reg, HDR_LEN);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
- val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
+ val = reg_encode(reg, HDR_LEN, header_size & field_max);
if (version < IPA_VERSION_4_5) {
WARN_ON(header_size > field_max);
return val;
@@ -590,21 +730,21 @@ static u32 ipa_header_size_encode(enum ipa_version version,
/* IPA v4.5 adds a few more most-significant bits */
header_size >>= hweight32(field_max);
- WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
- val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
+ WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
+ val |= reg_encode(reg, HDR_LEN_MSB, header_size);
return val;
}
/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
static u32 ipa_metadata_offset_encode(enum ipa_version version,
- const struct ipa_reg *reg, u32 offset)
+ const struct reg *reg, u32 offset)
{
- u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
+ u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
- val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
+ val = reg_encode(reg, HDR_OFST_METADATA, offset);
if (version < IPA_VERSION_4_5) {
WARN_ON(offset > field_max);
return val;
@@ -612,8 +752,8 @@ static u32 ipa_metadata_offset_encode(enum ipa_version version,
/* IPA v4.5 adds a few more most-significant bits */
offset >>= hweight32(field_max);
- WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
- val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
+ WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
+ val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
return val;
}
@@ -643,7 +783,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR);
@@ -666,13 +806,13 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
off = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version >= IPA_VERSION_4_5)
- off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
- val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
- val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
+ val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
- val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
+ val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
@@ -680,7 +820,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
@@ -688,13 +828,13 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
u32 pad_align = endpoint->config.rx.pad_align;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
- val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
+ val |= reg_bit(reg, HDR_ENDIANNESS); /* big endian */
/* A QMAP header contains a 6 bit pad field at offset 0.
* The RMNet driver assumes this field is meaningful in
@@ -704,16 +844,16 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
* (although 0) should be ignored.
*/
if (!endpoint->toward_ipa) {
- val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
+ val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
+ val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
}
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
if (!endpoint->toward_ipa)
- val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
+ val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
@@ -721,25 +861,25 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->config.qmap && !endpoint->toward_ipa) {
- u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
u32 off; /* Field offset within header */
off = offsetof(struct rmnet_map_header, pkt_len);
/* Low bits are in the ENDP_INIT_HDR register */
off >>= hweight32(mask);
- val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
u32 offset;
@@ -747,7 +887,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
return; /* Register not valid for TX endpoints */
reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
- offset = ipa_reg_n_offset(reg, endpoint_id);
+ offset = reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->config.qmap)
@@ -759,7 +899,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -771,82 +911,90 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
- val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
- val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
+ val = reg_encode(reg, ENDP_MODE, IPA_DMA);
+ val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
} else {
- val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
+ val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
}
/* All other bits unspecified (and 0) */
- offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
iowrite32(val, ipa->reg_virt + offset);
}
-/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
- * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
- * they're configured to have granularity 100 usec and 1 msec, respectively.
- *
- * The return value is the positive or negative Qtime value to use to
- * express the (microsecond) time provided. A positive return value
- * means pulse generator 0 can be used; otherwise use pulse generator 1.
+/* For IPA v4.5+, times are expressed using Qtime. A time is represented
+ * at one of several available granularities, which are configured in
+ * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
+ * generators are set up with different "tick" periods. A Qtime value
+ * encodes a tick count along with an indication of a pulse generator
+ * (which has a fixed tick period). Two pulse generators are always
+ * available to the AP; a third is available starting with IPA v5.0.
+ * This function determines which pulse generator most accurately
+ * represents the time period provided, and returns the tick count to
+ * use to represent that time.
*/
-static int ipa_qtime_val(u32 microseconds, u32 max)
-{
- u32 val;
-
- /* Use 100 microsecond granularity if possible */
- val = DIV_ROUND_CLOSEST(microseconds, 100);
- if (val <= max)
- return (int)val;
-
- /* Have to use pulse generator 1 (millisecond granularity) */
- val = DIV_ROUND_CLOSEST(microseconds, 1000);
- WARN_ON(val > max);
+static u32
+ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
+{
+ u32 which = 0;
+ u32 ticks;
+
+ /* Pulse generator 0 has 100 microsecond granularity */
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (ticks <= max)
+ goto out;
+
+ /* Pulse generator 1 has millisecond granularity */
+ which = 1;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
+ if (ticks <= max)
+ goto out;
+
+ if (ipa->version >= IPA_VERSION_5_0) {
+ /* Pulse generator 2 has 10 millisecond granularity */
+ which = 2;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ }
+ WARN_ON(ticks > max);
+out:
+ *select = which;
- return (int)-val;
+ return ticks;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
-static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
+static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
+ u32 ticks;
u32 max;
- u32 val;
if (!microseconds)
return 0; /* Nothing to compute if time limit is 0 */
- max = ipa_reg_field_max(reg, TIME_LIMIT);
+ max = reg_field_max(reg, TIME_LIMIT);
if (ipa->version >= IPA_VERSION_4_5) {
- u32 gran_sel;
- int ret;
-
- /* Compute the Qtime limit value to use */
- ret = ipa_qtime_val(microseconds, max);
- if (ret < 0) {
- val = -ret;
- gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
- } else {
- val = ret;
- gran_sel = 0;
- }
+ u32 select;
- return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
+
+ return reg_encode(reg, AGGR_GRAN_SEL, select) |
+ reg_encode(reg, TIME_LIMIT, ticks);
}
/* We program aggregation granularity in ipa_hardware_config() */
- val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
- WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
+ ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
+ WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
microseconds, max * IPA_AGGR_GRANULARITY);
- return ipa_reg_encode(reg, TIME_LIMIT, val);
+ return reg_encode(reg, TIME_LIMIT, ticks);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
@@ -857,13 +1005,13 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
u32 limit;
rx_config = &endpoint->config.rx;
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
- val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
buffer_size = rx_config->buffer_size;
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
- val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
+ val |= reg_encode(reg, BYTE_LIMIT, limit);
limit = rx_config->aggr_time_limit;
val |= aggr_time_limit_encode(ipa, reg, limit);
@@ -871,20 +1019,20 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* AGGR_PKT_LIMIT is 0 (unlimited) */
if (rx_config->aggr_close_eof)
- val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
+ val |= reg_bit(reg, SW_EOF_ACTIVE);
} else {
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
- val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
+ val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
/* other fields ignored */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
@@ -895,7 +1043,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
* Return the encoded value representing the timeout period provided
* that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/
-static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
+static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
u32 width;
@@ -909,21 +1057,14 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
return 0; /* Nothing to compute if timer period is 0 */
if (ipa->version >= IPA_VERSION_4_5) {
- u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
- u32 gran_sel;
- int ret;
-
- /* Compute the Qtime limit value to use */
- ret = ipa_qtime_val(microseconds, max);
- if (ret < 0) {
- val = -ret;
- gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
- } else {
- val = ret;
- gran_sel = 0;
- }
+ u32 max = reg_field_max(reg, TIMER_LIMIT);
+ u32 select;
+ u32 ticks;
+
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
- return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
+ return reg_encode(reg, TIMER_GRAN_SEL, 1) |
+ reg_encode(reg, TIMER_LIMIT, ticks);
}
/* Use 64 bit arithmetic to avoid overflow */
@@ -931,11 +1072,11 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
/* We still need the result to fit into the field */
- WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
+ WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */
if (ipa->version < IPA_VERSION_4_2)
- return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
+ return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and
* scale fields within the 32-bit timer register, where:
@@ -946,7 +1087,7 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
* such that high bit is included.
*/
high = fls(ticks); /* 1..32 (or warning above) */
- width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
+ width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0;
if (scale) {
/* If we're scaling, round up to get a closer result */
@@ -956,8 +1097,8 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
scale++;
}
- val = ipa_reg_encode(reg, TIMER_SCALE, scale);
- val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
+ val = reg_encode(reg, TIMER_SCALE, scale);
+ val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
@@ -968,14 +1109,14 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
val = hol_block_timer_encode(ipa, reg, microseconds);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void
@@ -983,13 +1124,13 @@ ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
- offset = ipa_reg_n_offset(reg, endpoint_id);
- val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
+ offset = reg_n_offset(reg, endpoint_id);
+ val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
iowrite32(val, ipa->reg_virt + offset);
@@ -1030,7 +1171,7 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
if (!endpoint->toward_ipa)
@@ -1042,7 +1183,7 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
@@ -1050,20 +1191,20 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
u32 resource_group = endpoint->config.resource_group;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
- val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
+ val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
@@ -1072,14 +1213,14 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
reg = ipa_reg(ipa, ENDP_INIT_SEQ);
/* Low-order byte configures primary packet processing */
- val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
+ val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
/* Second byte (if supported) configures replicated packet processing */
if (ipa->version < IPA_VERSION_4_5)
- val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
- endpoint->config.tx.seq_rep_type);
+ val |= reg_encode(reg, SEQ_REP_TYPE,
+ endpoint->config.tx.seq_rep_type);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/**
@@ -1129,12 +1270,12 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_STATUS);
if (endpoint->config.status_enable) {
- val |= ipa_reg_bit(reg, STATUS_EN);
+ val |= reg_bit(reg, STATUS_EN);
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
@@ -1142,16 +1283,15 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
- val |= ipa_reg_encode(reg, STATUS_ENDP,
- status_endpoint_id);
+ val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
}
- /* STATUS_LOCATION is 0, meaning status element precedes
- * packet (not present for IPA v4.5+)
+ /* STATUS_LOCATION is 0, meaning IPA packet status
+ * precedes the packet (not present for IPA v4.5+)
*/
/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
@@ -1302,8 +1442,8 @@ static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
return skb != NULL;
}
-/* The format of a packet status element is the same for several status
- * types (opcodes). Other types aren't currently supported.
+ /* The format of an IPA packet status structure is the same for several
+ * status types (opcodes). Other types aren't currently supported.
*/
static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
{
@@ -1318,31 +1458,34 @@ static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
}
}
-static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
- const struct ipa_status *status)
+static bool
+ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
{
+ struct ipa *ipa = endpoint->ipa;
+ enum ipa_status_opcode opcode;
u32 endpoint_id;
- if (!ipa_status_format_packet(status->opcode))
+ opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
+ if (!ipa_status_format_packet(opcode))
return true;
- if (!status->pkt_len)
- return true;
- endpoint_id = u8_get_bits(status->endp_dst_idx,
- IPA_STATUS_DST_IDX_FMASK);
+
+ endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
if (endpoint_id != endpoint->endpoint_id)
return true;
return false; /* Don't skip this packet, process it */
}
-static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
- const struct ipa_status *status)
+static bool
+ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
{
struct ipa_endpoint *command_endpoint;
+ enum ipa_status_mask status_mask;
struct ipa *ipa = endpoint->ipa;
u32 endpoint_id;
- if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
+ status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
+ if (!status_mask)
return false; /* No valid tag */
/* The status contains a valid tag. We know the packet was sent to
@@ -1350,8 +1493,7 @@ static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
* If the packet came from the AP->command TX endpoint we know
* this packet was sent as part of the pipeline clear process.
*/
- endpoint_id = u8_get_bits(status->endp_src_idx,
- IPA_STATUS_SRC_IDX_FMASK);
+ endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
if (endpoint_id == command_endpoint->endpoint_id) {
complete(&ipa->completion);
@@ -1365,23 +1507,26 @@ static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
}
/* Return whether the status indicates the packet should be dropped */
-static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
- const struct ipa_status *status)
+static bool
+ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
{
- u32 val;
+ enum ipa_status_exception exception;
+ struct ipa *ipa = endpoint->ipa;
+ u32 rule;
/* If the status indicates a tagged transfer, we'll drop the packet */
- if (ipa_endpoint_status_tag(endpoint, status))
+ if (ipa_endpoint_status_tag_valid(endpoint, data))
return true;
/* Deaggregation exceptions we drop; all other types we consume */
- if (status->exception)
- return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
+ exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
+ if (exception)
+ return exception == IPA_STATUS_EXCEPTION_DEAGGR;
/* Drop the packet if it fails to match a routing rule; otherwise no */
- val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+ rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
- return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+ return rule == IPA_STATUS_RULE_MISS;
}
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
@@ -1390,47 +1535,46 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
u32 buffer_size = endpoint->config.rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
+ struct ipa *ipa = endpoint->ipa;
u32 resid = total_len;
while (resid) {
- const struct ipa_status *status = data;
+ u32 length;
u32 align;
u32 len;
- if (resid < sizeof(*status)) {
+ if (resid < IPA_STATUS_SIZE) {
dev_err(&endpoint->ipa->pdev->dev,
"short message (%u bytes < %zu byte status)\n",
- resid, sizeof(*status));
+ resid, IPA_STATUS_SIZE);
break;
}
/* Skip over status packets that lack packet data */
- if (ipa_endpoint_status_skip(endpoint, status)) {
- data += sizeof(*status);
- resid -= sizeof(*status);
+ length = ipa_status_extract(ipa, data, STATUS_LENGTH);
+ if (!length || ipa_endpoint_status_skip(endpoint, data)) {
+ data += IPA_STATUS_SIZE;
+ resid -= IPA_STATUS_SIZE;
continue;
}
/* Compute the amount of buffer space consumed by the packet,
- * including the status element. If the hardware is configured
- * to pad packet data to an aligned boundary, account for that.
+ * including the status. If the hardware is configured to
+ * pad packet data to an aligned boundary, account for that.
* And if checksum offload is enabled a trailer containing
* computed checksum information will be appended.
*/
align = endpoint->config.rx.pad_align ? : 1;
- len = le16_to_cpu(status->pkt_len);
- len = sizeof(*status) + ALIGN(len, align);
+ len = IPA_STATUS_SIZE + ALIGN(length, align);
if (endpoint->config.checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
- if (!ipa_endpoint_status_drop(endpoint, status)) {
+ if (!ipa_endpoint_status_drop(endpoint, data)) {
void *data2;
u32 extra;
- u32 len2;
/* Client receives only packet data (no status) */
- data2 = data + sizeof(*status);
- len2 = le16_to_cpu(status->pkt_len);
+ data2 = data + IPA_STATUS_SIZE;
/* Have the true size reflect the extra unused space in
* the original receive buffer. Distribute the "cost"
@@ -1438,7 +1582,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
* buffer.
*/
extra = DIV_ROUND_CLOSEST(unused * len, total_len);
- ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
+ ipa_endpoint_skb_copy(endpoint, data2, length, extra);
}
/* Consume status and the full packet it describes */
@@ -1491,18 +1635,18 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
- val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
- val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
+ val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
/* ROUTE_DEF_HDR_OFST is 0 */
- val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
- val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
+ val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
@@ -1840,8 +1984,9 @@ void ipa_endpoint_deconfig(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 endpoint_id;
+ u32 hw_limit;
u32 tx_count;
u32 rx_count;
u32 rx_base;
@@ -1873,12 +2018,12 @@ int ipa_endpoint_config(struct ipa *ipa)
* the highest one doesn't exceed the number supported by software.
*/
reg = ipa_reg(ipa, FLAVOR_0);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ val = ioread32(ipa->reg_virt + reg_offset(reg));
/* Our RX is an IPA producer; our TX is an IPA consumer. */
- tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
- rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
- rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
+ tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
+ rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
+ rx_base = reg_decode(reg, PROD_LOWEST, val);
limit = rx_base + rx_count;
if (limit > IPA_ENDPOINT_MAX) {
@@ -1887,6 +2032,14 @@ int ipa_endpoint_config(struct ipa *ipa)
return -EINVAL;
}
+ /* Until IPA v5.0, the max endpoint ID was 32 */
+ hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
+ if (limit > hw_limit) {
+ dev_err(dev, "unexpected endpoint count, %u > %u\n",
+ limit, hw_limit);
+ return -EINVAL;
+ }
+
/* Allocate and initialize the available endpoint bitmap */
ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
if (!ipa->available)
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 4a5c3bc549df..3ad2e802040a 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
@@ -38,7 +38,7 @@ enum ipa_endpoint_name {
IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
};
-#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
+#define IPA_ENDPOINT_MAX 36 /* Max supported by driver */
/**
* struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index c1b3977e1ae4..4bc05948f772 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -22,10 +22,13 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include "ipa.h"
#include "ipa_reg.h"
#include "ipa_endpoint.h"
+#include "ipa_power.h"
+#include "ipa_uc.h"
#include "ipa_interrupt.h"
/**
@@ -33,47 +36,47 @@
* @ipa: IPA pointer
* @irq: Linux IRQ number used for IPA interrupts
* @enabled: Mask indicating which interrupts are enabled
- * @handler: Array of handlers indexed by IPA interrupt ID
*/
struct ipa_interrupt {
struct ipa *ipa;
u32 irq;
u32 enabled;
- ipa_irq_handler_t handler[IPA_IRQ_COUNT];
};
-/* Returns true if the interrupt type is associated with the microcontroller */
-static bool ipa_interrupt_uc(struct ipa_interrupt *interrupt, u32 irq_id)
-{
- return irq_id == IPA_IRQ_UC_0 || irq_id == IPA_IRQ_UC_1;
-}
-
/* Process a particular interrupt type that has been received */
static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
- bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
struct ipa *ipa = interrupt->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 mask = BIT(irq_id);
u32 offset;
- /* For microcontroller interrupts, clear the interrupt right away,
- * "to avoid clearing unhandled interrupts."
- */
reg = ipa_reg(ipa, IPA_IRQ_CLR);
- offset = ipa_reg_offset(reg);
- if (uc_irq)
+ offset = reg_offset(reg);
+
+ switch (irq_id) {
+ case IPA_IRQ_UC_0:
+ case IPA_IRQ_UC_1:
+ /* For microcontroller interrupts, clear the interrupt right
+ * away, "to avoid clearing unhandled interrupts."
+ */
iowrite32(mask, ipa->reg_virt + offset);
-
- if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id])
- interrupt->handler[irq_id](interrupt->ipa, irq_id);
-
- /* Clearing the SUSPEND_TX interrupt also clears the register
- * that tells us which suspended endpoint(s) caused the interrupt,
- * so defer clearing until after the handler has been called.
- */
- if (!uc_irq)
+ ipa_uc_interrupt_handler(ipa, irq_id);
+ break;
+
+ case IPA_IRQ_TX_SUSPEND:
+ /* Clearing the SUSPEND_TX interrupt also clears the
+ * register that tells us which suspended endpoint(s)
+ * caused the interrupt, so defer clearing until after
+ * the handler has been called.
+ */
+ ipa_power_suspend_handler(ipa, irq_id);
+ fallthrough;
+
+ default: /* Silently ignore (and clear) any other condition */
iowrite32(mask, ipa->reg_virt + offset);
+ break;
+ }
}
/* IPA IRQ handler is threaded */
@@ -82,7 +85,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
- const struct ipa_reg *reg;
+ const struct reg *reg;
struct device *dev;
u32 pending;
u32 offset;
@@ -99,7 +102,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
* only the enabled ones.
*/
reg = ipa_reg(ipa, IPA_IRQ_STTS);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
@@ -117,8 +120,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
reg = ipa_reg(ipa, IPA_IRQ_CLR);
- offset = ipa_reg_offset(reg);
- iowrite32(pending, ipa->reg_virt + offset);
+ iowrite32(pending, ipa->reg_virt + reg_offset(reg));
}
out_power_put:
pm_runtime_mark_last_busy(dev);
@@ -127,6 +129,29 @@ out_power_put:
return IRQ_HANDLED;
}
+static void ipa_interrupt_enabled_update(struct ipa *ipa)
+{
+ const struct reg *reg = ipa_reg(ipa, IPA_IRQ_EN);
+
+ iowrite32(ipa->interrupt->enabled, ipa->reg_virt + reg_offset(reg));
+}
+
+/* Enable an IPA interrupt type */
+void ipa_interrupt_enable(struct ipa *ipa, enum ipa_irq_id ipa_irq)
+{
+ /* Update the IPA interrupt mask to enable it */
+ ipa->interrupt->enabled |= BIT(ipa_irq);
+ ipa_interrupt_enabled_update(ipa);
+}
+
+/* Disable an IPA interrupt type */
+void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq)
+{
+ /* Update the IPA interrupt mask to disable it */
+ ipa->interrupt->enabled &= ~BIT(ipa_irq);
+ ipa_interrupt_enabled_update(ipa);
+}
+
void ipa_interrupt_irq_disable(struct ipa *ipa)
{
disable_irq(ipa->interrupt->irq);
@@ -144,7 +169,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id % 32);
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -155,7 +180,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
return;
reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
- offset = ipa_reg_n_offset(reg, unit);
+ offset = reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset);
if (enable)
@@ -189,18 +214,18 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
unit_count = roundup(ipa->endpoint_count, 32);
for (unit = 0; unit < unit_count; unit++) {
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
- val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
continue;
reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
}
}
@@ -210,50 +235,12 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND);
}
-/* Add a handler for an IPA interrupt */
-void ipa_interrupt_add(struct ipa_interrupt *interrupt,
- enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
-{
- struct ipa *ipa = interrupt->ipa;
- const struct ipa_reg *reg;
-
- if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
- return;
-
- interrupt->handler[ipa_irq] = handler;
-
- /* Update the IPA interrupt mask to enable it */
- interrupt->enabled |= BIT(ipa_irq);
-
- reg = ipa_reg(ipa, IPA_IRQ_EN);
- iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
-}
-
-/* Remove the handler for an IPA interrupt type */
-void
-ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
-{
- struct ipa *ipa = interrupt->ipa;
- const struct ipa_reg *reg;
-
- if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
- return;
-
- /* Update the IPA interrupt mask to disable it */
- interrupt->enabled &= ~BIT(ipa_irq);
-
- reg = ipa_reg(ipa, IPA_IRQ_EN);
- iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
-
- interrupt->handler[ipa_irq] = NULL;
-}
-
/* Configure the IPA interrupt framework */
struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
- const struct ipa_reg *reg;
+ const struct reg *reg;
unsigned int irq;
int ret;
@@ -273,7 +260,7 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
/* Start with all IPA interrupts disabled */
reg = ipa_reg(ipa, IPA_IRQ_EN);
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
@@ -282,9 +269,9 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
goto err_kfree;
}
- ret = enable_irq_wake(irq);
+ ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
- dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret);
+ dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret);
goto err_free_irq;
}
@@ -302,11 +289,8 @@ err_kfree:
void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
{
struct device *dev = &interrupt->ipa->pdev->dev;
- int ret;
- ret = disable_irq_wake(interrupt->irq);
- if (ret)
- dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret);
+ dev_pm_clear_wake_irq(dev);
free_irq(interrupt->irq, interrupt);
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 8a1bd5b89393..12e3e798ccb3 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -11,39 +11,7 @@
struct ipa;
struct ipa_interrupt;
-
-/**
- * typedef ipa_irq_handler_t - IPA interrupt handler function type
- * @ipa: IPA pointer
- * @irq_id: interrupt type
- *
- * Callback function registered by ipa_interrupt_add() to handle a specific
- * IPA interrupt type
- */
-typedef void (*ipa_irq_handler_t)(struct ipa *ipa, enum ipa_irq_id irq_id);
-
-/**
- * ipa_interrupt_add() - Register a handler for an IPA interrupt type
- * @interrupt: IPA interrupt structure
- * @irq_id: IPA interrupt type
- * @handler: Handler function for the interrupt
- *
- * Add a handler for an IPA interrupt and enable it. IPA interrupt
- * handlers are run in threaded interrupt context, so are allowed to
- * block.
- */
-void ipa_interrupt_add(struct ipa_interrupt *interrupt, enum ipa_irq_id irq_id,
- ipa_irq_handler_t handler);
-
-/**
- * ipa_interrupt_remove() - Remove the handler for an IPA interrupt type
- * @interrupt: IPA interrupt structure
- * @irq_id: IPA interrupt type
- *
- * Remove an IPA interrupt handler and disable it.
- */
-void ipa_interrupt_remove(struct ipa_interrupt *interrupt,
- enum ipa_irq_id irq_id);
+enum ipa_irq_id;
/**
* ipa_interrupt_suspend_enable - Enable TX_SUSPEND for an endpoint
@@ -86,6 +54,20 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
/**
+ * ipa_interrupt_enable() - Enable an IPA interrupt type
+ * @ipa: IPA pointer
+ * @ipa_irq: IPA interrupt ID
+ */
+void ipa_interrupt_enable(struct ipa *ipa, enum ipa_irq_id ipa_irq);
+
+/**
+ * ipa_interrupt_disable() - Disable an IPA interrupt type
+ * @ipa: IPA pointer
+ * @ipa_irq: IPA interrupt ID
+ */
+void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq);
+
+/**
* ipa_interrupt_irq_enable() - Enable IPA interrupts
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 4fb92f771974..6cb7bf96a626 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -203,7 +203,7 @@ static void ipa_teardown(struct ipa *ipa)
static void
ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* IPA v4.5+ has no backward compatibility register */
@@ -212,13 +212,13 @@ ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
reg = ipa_reg(ipa, IPA_BCR);
val = data->backward_compat;
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
static void ipa_hardware_config_tx(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -227,11 +227,11 @@ static void ipa_hardware_config_tx(struct ipa *ipa)
/* Disable PA mask to allow HOLB drop */
reg = ipa_reg(ipa, IPA_TX_CFG);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
- val &= ~ipa_reg_bit(reg, PA_MASK_EN);
+ val &= ~reg_bit(reg, PA_MASK_EN);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -239,7 +239,7 @@ static void ipa_hardware_config_tx(struct ipa *ipa)
static void ipa_hardware_config_clkon(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (version >= IPA_VERSION_4_5)
@@ -252,20 +252,20 @@ static void ipa_hardware_config_clkon(struct ipa *ipa)
reg = ipa_reg(ipa, CLKON_CFG);
if (version == IPA_VERSION_3_1) {
/* Disable MISC clock gating */
- val = ipa_reg_bit(reg, CLKON_MISC);
+ val = reg_bit(reg, CLKON_MISC);
} else { /* IPA v4.0+ */
/* Enable open global clocks in the CLKON configuration */
- val = ipa_reg_bit(reg, CLKON_GLOBAL);
- val |= ipa_reg_bit(reg, GLOBAL_2X_CLK);
+ val = reg_bit(reg, CLKON_GLOBAL);
+ val |= reg_bit(reg, GLOBAL_2X_CLK);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -274,21 +274,22 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
return;
reg = ipa_reg(ipa, COMP_CFG);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
+
val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
- val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
- val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
- val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
} else if (ipa->version < IPA_VERSION_4_5) {
- val |= ipa_reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
+ val |= reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
} else {
/* For IPA v4.5 FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
}
- val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
- val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
+ val |= reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
+ val |= reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -299,7 +300,7 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
{
const struct ipa_qsb_data *data0;
const struct ipa_qsb_data *data1;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
@@ -310,29 +311,27 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
/* Max outstanding write accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_WRITES);
- val = ipa_reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
+ val = reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
if (data->qsb_count > 1)
- val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_WRITES,
- data1->max_writes);
+ val |= reg_encode(reg, GEN_QMB_1_MAX_WRITES, data1->max_writes);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_READS);
- val = ipa_reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
+ val = reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= ipa_reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
- data0->max_reads_beats);
+ val |= reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
+ data0->max_reads_beats);
if (data->qsb_count > 1) {
- val = ipa_reg_encode(reg, GEN_QMB_1_MAX_READS,
- data1->max_reads);
+ val = reg_encode(reg, GEN_QMB_1_MAX_READS, data1->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
- data1->max_reads_beats);
+ val |= reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
+ data1->max_reads_beats);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* The internal inactivity timer clock is used for the aggregation timer */
@@ -368,41 +367,47 @@ static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
*/
static void ipa_qtime_config(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
- val = ipa_reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
- val |= ipa_reg_bit(reg, DPL_TIMESTAMP_SEL);
+ val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
+ val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
/* Configure tag and NAT Qtime timestamp resolution as well */
- val = ipa_reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
- val = ipa_reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
+ val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
+ val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
- val = ipa_reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
- val |= ipa_reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
- val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+ val = reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
+ val |= reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
+ if (ipa->version >= IPA_VERSION_5_0) {
+ val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
+ val |= reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
+ } else {
+ val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+ }
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Actual divider is 1 more than value supplied here */
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
- offset = ipa_reg_offset(reg);
- val = ipa_reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
+ offset = reg_offset(reg);
+
+ val = reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
- val |= ipa_reg_bit(reg, DIV_ENABLE);
+ val |= reg_bit(reg, DIV_ENABLE);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -411,13 +416,13 @@ static void ipa_qtime_config(struct ipa *ipa)
static void ipa_hardware_config_counter(struct ipa *ipa)
{
u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, COUNTER_CFG);
/* If defined, EOT_COAL_GRANULARITY is 0 */
- val = ipa_reg_encode(reg, AGGR_GRANULARITY, granularity);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ val = reg_encode(reg, AGGR_GRANULARITY, granularity);
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
static void ipa_hardware_config_timing(struct ipa *ipa)
@@ -430,8 +435,13 @@ static void ipa_hardware_config_timing(struct ipa *ipa)
static void ipa_hardware_config_hashing(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
+ /* Other than IPA v4.2, all versions enable "hashing". Starting
+ * with IPA v5.0, the filter and router tables are implemented
+ * differently, but the default configuration enables this feature
+ * (now referred to as "cacheing"), so there's nothing to do here.
+ */
if (ipa->version != IPA_VERSION_4_2)
return;
@@ -441,26 +451,26 @@ static void ipa_hardware_config_hashing(struct ipa *ipa)
/* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
* IPV4_FILTER_HASH are all zero.
*/
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
}
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (ipa->version < IPA_VERSION_3_5_1)
return;
reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
- val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
- enter_idle_debounce_thresh);
+ val = reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
+ enter_idle_debounce_thresh);
if (const_non_idle_enable)
- val |= ipa_reg_bit(reg, CONST_NON_IDLE_ENABLE);
+ val |= reg_bit(reg, CONST_NON_IDLE_ENABLE);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/**
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 9ec5af323f73..85096d1efe5b 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -75,7 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
- const struct ipa_reg *reg;
+ const struct reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
@@ -115,8 +115,8 @@ int ipa_mem_setup(struct ipa *ipa)
offset = ipa->mem_offset + mem->offset;
reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
- val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ val = reg_encode(reg, IPA_BASE_ADDR, offset);
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
return 0;
}
@@ -163,6 +163,12 @@ static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
return false;
break;
+ case IPA_MEM_AP_V4_FILTER:
+ case IPA_MEM_AP_V6_FILTER:
+ if (version != IPA_VERSION_5_0)
+ return false;
+ break;
+
case IPA_MEM_NAT_TABLE:
case IPA_MEM_STATS_FILTER_ROUTE:
if (version < IPA_VERSION_4_5)
@@ -312,8 +318,8 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_reg *reg;
const struct ipa_mem *mem;
+ const struct reg *reg;
dma_addr_t addr;
u32 mem_size;
void *virt;
@@ -322,13 +328,13 @@ int ipa_mem_config(struct ipa *ipa)
/* Check the advertised location and size of the shared memory area */
reg = ipa_reg(ipa, SHARED_MEM_SIZE);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ val = ioread32(ipa->reg_virt + reg_offset(reg));
/* The fields in the register are in 8 byte units */
- ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
+ ipa->mem_offset = 8 * reg_decode(reg, MEM_BADDR, val);
/* Make sure the end is within the region's mapped space */
- mem_size = 8 * ipa_reg_decode(reg, MEM_SIZE, val);
+ mem_size = 8 * reg_decode(reg, MEM_SIZE, val);
/* If the sizes don't match, issue a warning */
if (ipa->mem_offset + mem_size < ipa->mem_size) {
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 570bfdd99bff..868e9c20e8c4 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
@@ -62,13 +62,15 @@ enum ipa_mem_id {
IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0+) */
IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0+) */
IPA_MEM_STATS_QUOTA_AP, /* 0 canaries, optional (IPA v4.0+) */
- IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries, optional (IPA v4.0+) */
IPA_MEM_STATS_DROP, /* 0 canaries, optional (IPA v4.0+) */
- /* The next 5 filter and route statistics regions are optional */
+ /* The next 7 filter and route statistics regions are optional */
IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_AP_V4_FILTER, /* 2 canaries (IPA v5.0) */
+ IPA_MEM_AP_V6_FILTER, /* 0 canaries (IPA v5.0) */
IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5+) */
IPA_MEM_NAT_TABLE, /* 4 canaries, optional (IPA v4.5+) */
IPA_MEM_END_MARKER, /* 1 canary (not a real region) */
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index 8057be8cda80..921eecf3eff6 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -219,17 +219,7 @@ u32 ipa_core_clock_rate(struct ipa *ipa)
return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
}
-/**
- * ipa_suspend_handler() - Handle the suspend IPA interrupt
- * @ipa: IPA pointer
- * @irq_id: IPA interrupt type (unused)
- *
- * If an RX endpoint is suspended, and the IPA has a packet destined for
- * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
- * that it should resume the endpoint. If we get one of these interrupts
- * we just wake up the system.
- */
-static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{
/* To handle an IPA interrupt we will have resumed the hardware
* just to handle the interrupt, so we're done. If we are in a
@@ -352,12 +342,11 @@ int ipa_power_setup(struct ipa *ipa)
{
int ret;
- ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
- ipa_suspend_handler);
+ ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
ret = device_init_wakeup(&ipa->pdev->dev, true);
if (ret)
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
return ret;
}
@@ -365,7 +354,7 @@ int ipa_power_setup(struct ipa *ipa)
void ipa_power_teardown(struct ipa *ipa)
{
(void)device_init_wakeup(&ipa->pdev->dev, false);
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
}
/* Initialize IPA power management */
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 896f052e51a1..3a4c59ea1222 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -10,6 +10,7 @@ struct device;
struct ipa;
struct ipa_power_data;
+enum ipa_irq_id;
/* IPA device power management function block */
extern const struct dev_pm_ops ipa_pm_ops;
@@ -48,6 +49,17 @@ void ipa_power_modem_queue_active(struct ipa *ipa);
void ipa_power_retention(struct ipa *ipa, bool enable);
/**
+ * ipa_power_suspend_handler() - Handler for SUSPEND IPA interrupts
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt ID (unused)
+ *
+ * If an RX endpoint is suspended, and the IPA has a packet destined for
+ * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
+ * that it should resume the endpoint.
+ */
+void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
+
+/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index ddd529153e15..735fa6591609 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -9,73 +9,96 @@
#include "ipa.h"
#include "ipa_reg.h"
-/* Is this register valid and defined for the current IPA version? */
-static bool ipa_reg_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
+/* Is this register ID valid for the current IPA version? */
+static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
{
enum ipa_version version = ipa->version;
- bool valid;
-
- /* Check for bogus (out of range) register IDs */
- if ((u32)reg_id >= ipa->regs->reg_count)
- return false;
switch (reg_id) {
case IPA_BCR:
case COUNTER_CFG:
- valid = version < IPA_VERSION_4_5;
- break;
+ return version < IPA_VERSION_4_5;
case IPA_TX_CFG:
case FLAVOR_0:
case IDLE_INDICATION_CFG:
- valid = version >= IPA_VERSION_3_5;
- break;
+ return version >= IPA_VERSION_3_5;
case QTIME_TIMESTAMP_CFG:
case TIMERS_XO_CLK_DIV_CFG:
case TIMERS_PULSE_GRAN_CFG:
- valid = version >= IPA_VERSION_4_5;
- break;
+ return version >= IPA_VERSION_4_5;
case SRC_RSRC_GRP_45_RSRC_TYPE:
case DST_RSRC_GRP_45_RSRC_TYPE:
- valid = version <= IPA_VERSION_3_1 ||
- version == IPA_VERSION_4_5;
- break;
+ return version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_4_5;
case SRC_RSRC_GRP_67_RSRC_TYPE:
case DST_RSRC_GRP_67_RSRC_TYPE:
- valid = version <= IPA_VERSION_3_1;
- break;
+ return version <= IPA_VERSION_3_1;
case ENDP_FILTER_ROUTER_HSH_CFG:
- valid = version != IPA_VERSION_4_2;
- break;
+ return version != IPA_VERSION_4_2;
case IRQ_SUSPEND_EN:
case IRQ_SUSPEND_CLR:
- valid = version >= IPA_VERSION_3_1;
- break;
+ return version >= IPA_VERSION_3_1;
+
+ case COMP_CFG:
+ case CLKON_CFG:
+ case ROUTE:
+ case SHARED_MEM_SIZE:
+ case QSB_MAX_WRITES:
+ case QSB_MAX_READS:
+ case FILT_ROUT_HASH_EN:
+ case FILT_ROUT_CACHE_CFG:
+ case FILT_ROUT_HASH_FLUSH:
+ case FILT_ROUT_CACHE_FLUSH:
+ case STATE_AGGR_ACTIVE:
+ case LOCAL_PKT_PROC_CNTXT:
+ case AGGR_FORCE_CLOSE:
+ case SRC_RSRC_GRP_01_RSRC_TYPE:
+ case SRC_RSRC_GRP_23_RSRC_TYPE:
+ case DST_RSRC_GRP_01_RSRC_TYPE:
+ case DST_RSRC_GRP_23_RSRC_TYPE:
+ case ENDP_INIT_CTRL:
+ case ENDP_INIT_CFG:
+ case ENDP_INIT_NAT:
+ case ENDP_INIT_HDR:
+ case ENDP_INIT_HDR_EXT:
+ case ENDP_INIT_HDR_METADATA_MASK:
+ case ENDP_INIT_MODE:
+ case ENDP_INIT_AGGR:
+ case ENDP_INIT_HOL_BLOCK_EN:
+ case ENDP_INIT_HOL_BLOCK_TIMER:
+ case ENDP_INIT_DEAGGR:
+ case ENDP_INIT_RSRC_GRP:
+ case ENDP_INIT_SEQ:
+ case ENDP_STATUS:
+ case ENDP_FILTER_CACHE_CFG:
+ case ENDP_ROUTER_CACHE_CFG:
+ case IPA_IRQ_STTS:
+ case IPA_IRQ_EN:
+ case IPA_IRQ_CLR:
+ case IPA_IRQ_UC:
+ case IRQ_SUSPEND_INFO:
+ return true; /* These should be defined for all versions */
default:
- valid = true; /* Others should be defined for all versions */
- break;
+ return false;
}
-
- /* To be valid, it must be defined */
-
- return valid && ipa->regs->reg[reg_id];
}
-const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
+const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
{
- if (WARN_ON(!ipa_reg_valid(ipa, reg_id)))
+ if (WARN(!ipa_reg_id_valid(ipa, reg_id), "invalid reg %u\n", reg_id))
return NULL;
- return ipa->regs->reg[reg_id];
+ return reg(ipa->regs, reg_id);
}
-static const struct ipa_regs *ipa_regs(enum ipa_version version)
+static const struct regs *ipa_regs(enum ipa_version version)
{
switch (version) {
case IPA_VERSION_3_1:
@@ -100,7 +123,7 @@ static const struct ipa_regs *ipa_regs(enum ipa_version version)
int ipa_reg_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_regs *regs;
+ const struct regs *regs;
struct resource *res;
regs = ipa_regs(ipa->version);
@@ -123,7 +146,6 @@ int ipa_reg_init(struct ipa *ipa)
dev_err(dev, "unable to remap \"ipa-reg\" memory\n");
return -ENOMEM;
}
- ipa->reg_addr = res->start;
ipa->regs = regs;
return 0;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index ff64b19a4df8..28aa1351dd48 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
@@ -10,6 +10,7 @@
#include <linux/bug.h>
#include "ipa_version.h"
+#include "reg.h"
struct ipa;
@@ -35,7 +36,7 @@ struct ipa;
* by register ID. Each entry in the array specifies the base offset and
* (for parameterized registers) a non-zero stride value. Not all versions
* of IPA define all registers. The offset for a register is returned by
- * ipa_reg_offset() when the register's ipa_reg structure is supplied;
+ * reg_offset() when the register's ipa_reg structure is supplied;
* zero is returned for an undefined register (this should never happen).
*
* Some registers encode multiple fields within them. Each field in
@@ -44,9 +45,9 @@ struct ipa;
* an array of field masks, indexed by field ID. Two functions are
* used to access register fields; both take an ipa_reg structure as
* argument. To encode a value to be represented in a register field,
- * the value and field ID are passed to ipa_reg_encode(). To extract
+ * the value and field ID are passed to reg_encode(). To extract
* a value encoded in a register field, the field ID is passed to
- * ipa_reg_decode(). In addition, for single-bit fields, ipa_reg_bit()
+ * reg_decode(). In addition, for single-bit fields, reg_bit()
* can be used to either encode the bit value, or to generate a mask
* used to extract the bit value.
*/
@@ -59,8 +60,10 @@ enum ipa_reg_id {
SHARED_MEM_SIZE,
QSB_MAX_WRITES,
QSB_MAX_READS,
- FILT_ROUT_HASH_EN,
- FILT_ROUT_HASH_FLUSH,
+ FILT_ROUT_HASH_EN, /* Not IPA v5.0+ */
+ FILT_ROUT_CACHE_CFG, /* IPA v5.0+ */
+ FILT_ROUT_HASH_FLUSH, /* Not IPA v5.0+ */
+ FILT_ROUT_CACHE_FLUSH, /* IPA v5.0+ */
STATE_AGGR_ACTIVE,
IPA_BCR, /* Not IPA v4.5+ */
LOCAL_PKT_PROC_CNTXT,
@@ -95,7 +98,9 @@ enum ipa_reg_id {
ENDP_INIT_SEQ, /* TX only */
ENDP_STATUS,
ENDP_FILTER_ROUTER_HSH_CFG, /* Not IPA v4.2 */
- /* The IRQ registers are only used for GSI_EE_AP */
+ ENDP_FILTER_CACHE_CFG, /* IPA v5.0+ */
+ ENDP_ROUTER_CACHE_CFG, /* IPA v5.0+ */
+ /* The IRQ registers that follow are only used for GSI_EE_AP */
IPA_IRQ_STTS,
IPA_IRQ_EN,
IPA_IRQ_CLR,
@@ -106,56 +111,6 @@ enum ipa_reg_id {
IPA_REG_ID_COUNT, /* Last; not an ID */
};
-/**
- * struct ipa_reg - An IPA register descriptor
- * @offset: Register offset relative to base of the "ipa-reg" memory
- * @stride: Distance between two instances, if parameterized
- * @fcount: Number of entries in the @fmask array
- * @fmask: Array of mask values defining position and width of fields
- * @name: Upper-case name of the IPA register
- */
-struct ipa_reg {
- u32 offset;
- u32 stride;
- u32 fcount;
- const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
- const char *name;
-};
-
-/* Helper macro for defining "simple" (non-parameterized) registers */
-#define IPA_REG(__NAME, __reg_id, __offset) \
- IPA_REG_STRIDE(__NAME, __reg_id, __offset, 0)
-
-/* Helper macro for defining parameterized registers, specifying stride */
-#define IPA_REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
- static const struct ipa_reg ipa_reg_ ## __reg_id = { \
- .name = #__NAME, \
- .offset = __offset, \
- .stride = __stride, \
- }
-
-#define IPA_REG_FIELDS(__NAME, __name, __offset) \
- IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
-
-#define IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
- static const struct ipa_reg ipa_reg_ ## __name = { \
- .name = #__NAME, \
- .offset = __offset, \
- .stride = __stride, \
- .fcount = ARRAY_SIZE(ipa_reg_ ## __name ## _fmask), \
- .fmask = ipa_reg_ ## __name ## _fmask, \
- }
-
-/**
- * struct ipa_regs - Description of registers supported by hardware
- * @reg_count: Number of registers in the @reg[] array
- * @reg: Array of register descriptors
- */
-struct ipa_regs {
- u32 reg_count;
- const struct ipa_reg **reg;
-};
-
/* COMP_CFG register */
enum ipa_reg_comp_cfg_field_id {
COMP_CFG_ENABLE, /* Not IPA v4.0+ */
@@ -251,14 +206,28 @@ enum ipa_reg_qsb_max_reads_field_id {
GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
};
+/* FILT_ROUT_CACHE_CFG register */
+enum ipa_reg_filt_rout_cache_cfg_field_id {
+ ROUTER_CACHE_EN,
+ FILTER_CACHE_EN,
+ LOW_PRI_HASH_HIT_DISABLE,
+ LRU_EVICTION_THRESHOLD,
+};
+
/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
-enum ipa_reg_rout_hash_field_id {
+enum ipa_reg_filt_rout_hash_field_id {
IPV6_ROUTER_HASH,
IPV6_FILTER_HASH,
IPV4_ROUTER_HASH,
IPV4_FILTER_HASH,
};
+/* FILT_ROUT_CACHE_FLUSH register */
+enum ipa_reg_filt_rout_cache_field_id {
+ ROUTER_CACHE,
+ FILTER_CACHE,
+};
+
/* BCR register */
enum ipa_bcr_compat {
BCR_CMDQ_L_LACK_ONE_ENTRY = 0x0, /* Not IPA v4.2+ */
@@ -298,6 +267,7 @@ enum ipa_reg_ipa_tx_cfg_field_id {
DUAL_TX_ENABLE, /* v4.5+ */
SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
SSPND_PA_NO_BQ_STATE, /* v4.2 only */
+ HOLB_STICKY_DROP_EN, /* v5.0+ */
};
/* FLAVOR_0 register */
@@ -333,6 +303,7 @@ enum ipa_reg_timers_pulse_gran_cfg_field_id {
PULSE_GRAN_0,
PULSE_GRAN_1,
PULSE_GRAN_2,
+ PULSE_GRAN_3,
};
/* Values for IPA_GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
@@ -382,11 +353,11 @@ enum ipa_reg_endp_init_nat_field_id {
NAT_EN,
};
-/** enum ipa_nat_en - ENDP_INIT_NAT register NAT_EN field value */
-enum ipa_nat_en {
- IPA_NAT_BYPASS = 0x0,
- IPA_NAT_SRC = 0x1,
- IPA_NAT_DST = 0x2,
+/** enum ipa_nat_type - ENDP_INIT_NAT register NAT_EN field value */
+enum ipa_nat_type {
+ IPA_NAT_TYPE_BYPASS = 0,
+ IPA_NAT_TYPE_SRC = 1,
+ IPA_NAT_TYPE_DST = 2,
};
/* ENDP_INIT_HDR register */
@@ -415,6 +386,8 @@ enum ipa_reg_endp_init_hdr_ext_field_id {
HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
+ HDR_BYTES_TO_REMOVE_VALID, /* v5.0+ */
+ HDR_BYTES_TO_REMOVE, /* v5.0+ */
};
/* ENDP_INIT_MODE register */
@@ -573,6 +546,17 @@ enum ipa_reg_endp_filter_router_hsh_cfg_field_id {
ROUTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
};
+/* ENDP_FILTER_CACHE_CFG and ENDP_ROUTER_CACHE_CFG registers */
+enum ipa_reg_endp_cache_cfg_field_id {
+ CACHE_MSK_SRC_ID,
+ CACHE_MSK_SRC_IP,
+ CACHE_MSK_DST_IP,
+ CACHE_MSK_SRC_PORT,
+ CACHE_MSK_DST_PORT,
+ CACHE_MSK_PROTOCOL,
+ CACHE_MSK_METADATA,
+};
+
/* IPA_IRQ_STTS, IPA_IRQ_EN, and IPA_IRQ_CLR registers */
/**
* enum ipa_irq_id - Bit positions representing type of IPA IRQ
@@ -654,79 +638,15 @@ enum ipa_reg_ipa_irq_uc_field_id {
UC_INTR,
};
-extern const struct ipa_regs ipa_regs_v3_1;
-extern const struct ipa_regs ipa_regs_v3_5_1;
-extern const struct ipa_regs ipa_regs_v4_2;
-extern const struct ipa_regs ipa_regs_v4_5;
-extern const struct ipa_regs ipa_regs_v4_7;
-extern const struct ipa_regs ipa_regs_v4_9;
-extern const struct ipa_regs ipa_regs_v4_11;
-
-/* Return the field mask for a field in a register */
-static inline u32 ipa_reg_fmask(const struct ipa_reg *reg, u32 field_id)
-{
- if (!reg || WARN_ON(field_id >= reg->fcount))
- return 0;
-
- return reg->fmask[field_id];
-}
-
-/* Return the mask for a single-bit field in a register */
-static inline u32 ipa_reg_bit(const struct ipa_reg *reg, u32 field_id)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- WARN_ON(!is_power_of_2(fmask));
-
- return fmask;
-}
-
-/* Encode a value into the given field of a register */
-static inline u32
-ipa_reg_encode(const struct ipa_reg *reg, u32 field_id, u32 val)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- if (!fmask)
- return 0;
-
- val <<= __ffs(fmask);
- if (WARN_ON(val & ~fmask))
- return 0;
-
- return val;
-}
-
-/* Given a register value, decode (extract) the value in the given field */
-static inline u32
-ipa_reg_decode(const struct ipa_reg *reg, u32 field_id, u32 val)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- return fmask ? (val & fmask) >> __ffs(fmask) : 0;
-}
-
-/* Return the maximum value representable by the given field; always 2^n - 1 */
-static inline u32 ipa_reg_field_max(const struct ipa_reg *reg, u32 field_id)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- return fmask ? fmask >> __ffs(fmask) : 0;
-}
-
-const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
-
-/* Returns 0 for NULL reg; warning will have already been issued */
-static inline u32 ipa_reg_offset(const struct ipa_reg *reg)
-{
- return reg ? reg->offset : 0;
-}
+extern const struct regs ipa_regs_v3_1;
+extern const struct regs ipa_regs_v3_5_1;
+extern const struct regs ipa_regs_v4_2;
+extern const struct regs ipa_regs_v4_5;
+extern const struct regs ipa_regs_v4_7;
+extern const struct regs ipa_regs_v4_9;
+extern const struct regs ipa_regs_v4_11;
-/* Returns 0 for NULL reg; warning will have already been issued */
-static inline u32 ipa_reg_n_offset(const struct ipa_reg *reg, u32 n)
-{
- return reg ? reg->offset + n * reg->stride : 0;
-}
+const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
int ipa_reg_init(struct ipa *ipa);
void ipa_reg_exit(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index a257f0e5e361..82c88a744d10 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -70,20 +70,20 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
static void
ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
- const struct ipa_reg *reg,
+ const struct reg *reg,
const struct ipa_resource_limits *xlimits,
const struct ipa_resource_limits *ylimits)
{
u32 val;
- val = ipa_reg_encode(reg, X_MIN_LIM, xlimits->min);
- val |= ipa_reg_encode(reg, X_MAX_LIM, xlimits->max);
+ val = reg_encode(reg, X_MIN_LIM, xlimits->min);
+ val |= reg_encode(reg, X_MAX_LIM, xlimits->max);
if (ylimits) {
- val |= ipa_reg_encode(reg, Y_MIN_LIM, ylimits->min);
- val |= ipa_reg_encode(reg, Y_MAX_LIM, ylimits->max);
+ val |= reg_encode(reg, Y_MIN_LIM, ylimits->min);
+ val |= reg_encode(reg, Y_MAX_LIM, ylimits->max);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, resource_type));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, resource_type));
}
static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
@@ -92,7 +92,7 @@ static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_src_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- const struct ipa_reg *reg;
+ const struct reg *reg;
resource = &data->resource_src[resource_type];
@@ -129,7 +129,7 @@ static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_dst_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- const struct ipa_reg *reg;
+ const struct reg *reg;
resource = &data->resource_dst[resource_type];
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index b81e27b61354..f0529c31d0b6 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -345,9 +345,8 @@ void ipa_table_reset(struct ipa *ipa, bool modem)
int ipa_table_hash_flush(struct ipa *ipa)
{
- const struct ipa_reg *reg;
struct gsi_trans *trans;
- u32 offset;
+ const struct reg *reg;
u32 val;
if (!ipa_table_hash_support(ipa))
@@ -359,15 +358,22 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
- offset = ipa_reg_offset(reg);
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
- val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
- val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
- val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
- val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
+ val = reg_bit(reg, IPV6_ROUTER_HASH);
+ val |= reg_bit(reg, IPV6_FILTER_HASH);
+ val |= reg_bit(reg, IPV4_ROUTER_HASH);
+ val |= reg_bit(reg, IPV4_FILTER_HASH);
+ } else {
+ reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
- ipa_cmd_register_write_add(trans, offset, val, val, false);
+ /* IPA v5.0+ uses a unified cache (both IPv4 and IPv6) */
+ val = reg_bit(reg, ROUTER_CACHE);
+ val |= reg_bit(reg, FILTER_CACHE);
+ }
+
+ ipa_cmd_register_write_add(trans, reg_offset(reg), val, val, false);
gsi_trans_commit_wait(trans);
@@ -486,17 +492,26 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
- reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+
+ offset = reg_n_offset(reg, endpoint_id);
+ val = ioread32(endpoint->ipa->reg_virt + offset);
- offset = ipa_reg_n_offset(reg, endpoint_id);
- val = ioread32(endpoint->ipa->reg_virt + offset);
+ /* Zero all filter-related fields, preserving the rest */
+ val &= ~reg_fmask(reg, FILTER_HASH_MSK_ALL);
+ } else {
+ /* IPA v5.0 separates filter and router cache configuration */
+ reg = ipa_reg(ipa, ENDP_FILTER_CACHE_CFG);
+ offset = reg_n_offset(reg, endpoint_id);
- /* Zero all filter-related fields, preserving the rest */
- val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
+ /* Zero all filter-related fields */
+ val = 0;
+ }
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -536,17 +551,26 @@ static bool ipa_route_id_modem(struct ipa *ipa, u32 route_id)
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
- reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
- offset = ipa_reg_n_offset(reg, route_id);
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = reg_n_offset(reg, route_id);
- val = ioread32(ipa->reg_virt + offset);
+ val = ioread32(ipa->reg_virt + offset);
- /* Zero all route-related fields, preserving the rest */
- val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
+ /* Zero all route-related fields, preserving the rest */
+ val &= ~reg_fmask(reg, ROUTER_HASH_MSK_ALL);
+ } else {
+ /* IPA v5.0 separates filter and router cache configuration */
+ reg = ipa_reg(ipa, ENDP_ROUTER_CACHE_CFG);
+ offset = reg_n_offset(reg, route_id);
+
+ /* Zero all route-related fields */
+ val = 0;
+ }
iowrite32(val, ipa->reg_virt + offset);
}
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index f0ee47281015..7eaa0b4ebed9 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -124,7 +124,7 @@ static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
}
/* Microcontroller event IPA interrupt handler */
-static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+static void ipa_uc_event_handler(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
struct device *dev = &ipa->pdev->dev;
@@ -138,7 +138,7 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
}
/* Microcontroller response IPA interrupt handler */
-static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
+static void ipa_uc_response_hdlr(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
struct device *dev = &ipa->pdev->dev;
@@ -170,13 +170,22 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
}
}
+void ipa_uc_interrupt_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ /* Silently ignore anything unrecognized */
+ if (irq_id == IPA_IRQ_UC_0)
+ ipa_uc_event_handler(ipa);
+ else if (irq_id == IPA_IRQ_UC_1)
+ ipa_uc_response_hdlr(ipa);
+}
+
/* Configure the IPA microcontroller subsystem */
void ipa_uc_config(struct ipa *ipa)
{
ipa->uc_powered = false;
ipa->uc_loaded = false;
- ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler);
- ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr);
+ ipa_interrupt_enable(ipa, IPA_IRQ_UC_0);
+ ipa_interrupt_enable(ipa, IPA_IRQ_UC_1);
}
/* Inverse of ipa_uc_config() */
@@ -184,8 +193,8 @@ void ipa_uc_deconfig(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ ipa_interrupt_disable(ipa, IPA_IRQ_UC_1);
+ ipa_interrupt_disable(ipa, IPA_IRQ_UC_0);
if (ipa->uc_loaded)
ipa_power_retention(ipa, false);
@@ -222,7 +231,7 @@ void ipa_uc_power(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* Fill in the command data */
@@ -234,9 +243,9 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
/* Use an interrupt to tell the microcontroller the command is ready */
reg = ipa_reg(ipa, IPA_IRQ_UC);
- val = ipa_reg_bit(reg, UC_INTR);
+ val = reg_bit(reg, UC_INTR);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index 8514096e6f36..85aa0df818c2 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -7,6 +7,14 @@
#define _IPA_UC_H_
struct ipa;
+enum ipa_irq_id;
+
+/**
+ * ipa_uc_interrupt_handler() - Handler for microcontroller IPA interrupts
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt ID
+ */
+void ipa_uc_interrupt_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
/**
* ipa_uc_config() - Configure the IPA microcontroller subsystem
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index d15821467743..06e75b8ece7e 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -9,7 +9,7 @@
/**
* enum ipa_version
* @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0
- * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.1
+ * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.0
* @IPA_VERSION_3_5: IPA version 3.5/GSI version 1.2
* @IPA_VERSION_3_5_1: IPA version 3.5.1/GSI version 1.3
* @IPA_VERSION_4_0: IPA version 4.0/GSI version 2.0
@@ -20,6 +20,8 @@
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
* @IPA_VERSION_5_0: IPA version 5.0/GSI version 3.0
+ * @IPA_VERSION_5_1: IPA version 5.1/GSI version 3.0
+ * @IPA_VERSION_5_5: IPA version 5.5/GSI version 5.5
* @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
@@ -38,6 +40,8 @@ enum ipa_version {
IPA_VERSION_4_9,
IPA_VERSION_4_11,
IPA_VERSION_5_0,
+ IPA_VERSION_5_1,
+ IPA_VERSION_5_5,
IPA_VERSION_COUNT, /* Last; not a version */
};
diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h
new file mode 100644
index 000000000000..57b457f39b6e
--- /dev/null
+++ b/drivers/net/ipa/reg.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* *Copyright (C) 2022-2023 Linaro Ltd. */
+
+#ifndef _REG_H_
+#define _REG_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+/**
+ * struct reg - A register descriptor
+ * @offset: Register offset relative to base of register memory
+ * @stride: Distance between two instances, if parameterized
+ * @fcount: Number of entries in the @fmask array
+ * @fmask: Array of mask values defining position and width of fields
+ * @name: Upper-case name of the register
+ */
+struct reg {
+ u32 offset;
+ u32 stride;
+ u32 fcount;
+ const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
+ const char *name;
+};
+
+/* Helper macro for defining "simple" (non-parameterized) registers */
+#define REG(__NAME, __reg_id, __offset) \
+ REG_STRIDE(__NAME, __reg_id, __offset, 0)
+
+/* Helper macro for defining parameterized registers, specifying stride */
+#define REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
+ static const struct reg reg_ ## __reg_id = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ }
+
+#define REG_FIELDS(__NAME, __name, __offset) \
+ REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
+
+#define REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
+ static const struct reg reg_ ## __name = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ .fcount = ARRAY_SIZE(reg_ ## __name ## _fmask), \
+ .fmask = reg_ ## __name ## _fmask, \
+ }
+
+/**
+ * struct regs - Description of registers supported by hardware
+ * @reg_count: Number of registers in the @reg[] array
+ * @reg: Array of register descriptors
+ */
+struct regs {
+ u32 reg_count;
+ const struct reg **reg;
+};
+
+static inline const struct reg *reg(const struct regs *regs, u32 reg_id)
+{
+ if (WARN(reg_id >= regs->reg_count,
+ "reg out of range (%u > %u)\n", reg_id, regs->reg_count - 1))
+ return NULL;
+
+ return regs->reg[reg_id];
+}
+
+/* Return the field mask for a field in a register, or 0 on error */
+static inline u32 reg_fmask(const struct reg *reg, u32 field_id)
+{
+ if (!reg || WARN_ON(field_id >= reg->fcount))
+ return 0;
+
+ return reg->fmask[field_id];
+}
+
+/* Return the mask for a single-bit field in a register, or 0 on error */
+static inline u32 reg_bit(const struct reg *reg, u32 field_id)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ if (WARN_ON(!is_power_of_2(fmask)))
+ return 0;
+
+ return fmask;
+}
+
+/* Return the maximum value representable by the given field; always 2^n - 1 */
+static inline u32 reg_field_max(const struct reg *reg, u32 field_id)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ return fmask ? fmask >> __ffs(fmask) : 0;
+}
+
+/* Encode a value into the given field of a register */
+static inline u32 reg_encode(const struct reg *reg, u32 field_id, u32 val)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ if (!fmask)
+ return 0;
+
+ val <<= __ffs(fmask);
+ if (WARN_ON(val & ~fmask))
+ return 0;
+
+ return val;
+}
+
+/* Given a register value, decode (extract) the value in the given field */
+static inline u32 reg_decode(const struct reg *reg, u32 field_id, u32 val)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ return fmask ? (val & fmask) >> __ffs(fmask) : 0;
+}
+
+/* Returns 0 for NULL reg; warning should have already been issued */
+static inline u32 reg_offset(const struct reg *reg)
+{
+ return reg ? reg->offset : 0;
+}
+
+/* Returns 0 for NULL reg; warning should have already been issued */
+static inline u32 reg_n_offset(const struct reg *reg, u32 n)
+{
+ return reg ? reg->offset + n * reg->stride : 0;
+}
+
+#endif /* _REG_H_ */
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.1.c b/drivers/net/ipa/reg/gsi_reg-v3.1.c
new file mode 100644
index 000000000000..e036805a7882
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v3.1.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ /* Bit 13 reserved */
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.5.1.c b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
new file mode 100644
index 000000000000..8c3ab3a5288e
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ /* Bit 13 reserved */
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ /* Bits 15-31 reserved */
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.0.c b/drivers/net/ipa/reg/gsi_reg-v4.0.c
new file mode 100644
index 000000000000..7cc7a21d07f9
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.0.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ /* Bit 13 reserved */
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [USE_ESCAPE_BUF_ONLY] = BIT(10),
+ /* Bits 11-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ /* Bits 30-31 reserved */
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_0 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.11.c b/drivers/net/ipa/reg/gsi_reg-v4.11.c
new file mode 100644
index 000000000000..01696519032f
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.11.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ [CHTYPE_PROTOCOL_MSB] = BIT(13),
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(19, 0),
+ /* Bits 20-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ [DB_IN_BYTES] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(19, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-23 reserved */
+ [GENERIC_PARAMS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x00012098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001209c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x000120a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x000120a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x000120b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x000120c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.5.c b/drivers/net/ipa/reg/gsi_reg-v4.5.c
new file mode 100644
index 000000000000..648b51b88d4e
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.5.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ [CHTYPE_PROTOCOL_MSB] = BIT(13),
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ /* Bits 24-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.9.c b/drivers/net/ipa/reg/gsi_reg-v4.9.c
new file mode 100644
index 000000000000..4bf45d264d6b
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.9.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ [CHTYPE_PROTOCOL_MSB] = BIT(13),
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(19, 0),
+ /* Bits 20-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ [DB_IN_BYTES] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x00012098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001209c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x000120a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x000120a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x000120b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x000120c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
index 677ece3bce9e..648dbfe1fce3 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -16,9 +16,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -39,9 +39,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -52,31 +52,31 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -87,9 +87,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -100,121 +100,121 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
-IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_counter_cfg_fmask[] = {
+static const u32 reg_counter_cfg_fmask[] = {
[EOT_COAL_GRANULARITY] = GENMASK(3, 0),
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
- 0x00000408, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
- 0x0000040c, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000040c, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
- 0x00000508, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
- 0x0000050c, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
-static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+static const u32 reg_endp_init_ctrl_fmask[] = {
[ENDP_SUSPEND] = BIT(0),
[ENDP_DELAY] = BIT(1),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -223,16 +223,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -245,9 +245,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
/* Bits 29-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -257,12 +257,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 14-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -274,9 +274,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
@@ -289,25 +289,25 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
/* Entire register is a tick count */
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(31, 0),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -317,25 +317,24 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(2, 0),
/* Bits 3-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
@@ -343,9 +342,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -366,84 +365,84 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [IPA_BCR] = &ipa_reg_ipa_bcr,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [COUNTER_CFG] = &ipa_reg_counter_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
- [SRC_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_67_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
- [DST_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_67_rsrc_type,
- [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v3_1 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CTRL] = &reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
index b9c6a50de243..78b1bf60cd02 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -16,9 +16,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -44,9 +44,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -57,31 +57,31 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -92,9 +92,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -105,42 +105,42 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
-IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_counter_cfg_fmask[] = {
+static const u32 reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
[TX0_PREFETCH_DISABLE] = BIT(0),
[TX1_PREFETCH_DISABLE] = BIT(1),
[PREFETCH_ALMOST_EMPTY_SIZE] = GENMASK(4, 2),
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -151,17 +151,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -172,10 +172,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -186,10 +186,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -200,10 +200,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -214,18 +214,18 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+static const u32 reg_endp_init_ctrl_fmask[] = {
[ENDP_SUSPEND] = BIT(0),
[ENDP_DELAY] = BIT(1),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -234,16 +234,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -256,9 +256,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
/* Bits 29-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -268,12 +268,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 14-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -285,9 +285,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
@@ -300,25 +300,25 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
/* Entire register is a tick count */
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(31, 0),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -328,25 +328,24 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
@@ -354,9 +353,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -377,83 +376,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [IPA_BCR] = &ipa_reg_ipa_bcr,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [COUNTER_CFG] = &ipa_reg_counter_cfg,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v3_5_1 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CTRL] = &reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
index 9a315130530d..29e71cce4a84 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.11.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -36,9 +36,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
[GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -73,9 +73,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
[DRBIP] = BIT(31),
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -86,24 +86,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -111,9 +111,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -124,9 +124,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -137,23 +137,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -166,9 +166,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 19-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -179,17 +179,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -199,26 +199,26 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
/* Bits 9-31 reserved */
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -229,10 +229,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -243,10 +243,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -257,10 +257,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -271,10 +271,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -283,16 +283,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -305,9 +305,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -321,12 +321,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -338,9 +338,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -355,27 +355,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -385,24 +385,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -410,9 +409,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -433,83 +432,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_11 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
index 7a95149f8ec7..bb7cf488144d 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.2.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -29,9 +29,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -65,9 +65,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -78,24 +78,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -103,9 +103,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -116,9 +116,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -129,33 +129,33 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_counter_cfg_fmask[] = {
+static const u32 reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 9-31 reserved */
};
-IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -169,9 +169,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 20-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -182,17 +182,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -203,10 +203,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -217,10 +217,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -231,10 +231,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -245,10 +245,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -257,16 +257,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -279,9 +279,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
/* Bits 29-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -291,12 +291,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 14-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -308,9 +308,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
@@ -323,27 +323,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_SCALE] = GENMASK(12, 8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -353,25 +353,24 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
@@ -380,80 +379,80 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [IPA_BCR] = &ipa_reg_ipa_bcr,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [COUNTER_CFG] = &ipa_reg_counter_cfg,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_2 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_2 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
index 587eb8d4e00f..1c58f78851c2 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.5.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -30,9 +30,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -67,9 +67,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -80,24 +80,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -105,9 +105,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -118,9 +118,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -131,23 +131,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -159,9 +159,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 18-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -172,17 +172,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -192,25 +192,25 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -221,10 +221,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -235,10 +235,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -249,10 +249,10 @@ static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
- 0x00000408, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -263,10 +263,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -277,10 +277,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -291,10 +291,10 @@ static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
- 0x00000508, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -303,16 +303,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -325,9 +325,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -341,12 +341,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -357,9 +357,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -374,27 +374,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -404,24 +404,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(2, 0),
/* Bits 3-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -429,9 +428,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -452,85 +451,85 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_5 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.7.c b/drivers/net/ipa/reg/ipa_reg-v4.7.c
index 21f8a58e59a0..731824fce1d4 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.7.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.7.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -30,9 +30,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -67,9 +67,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
[DRBIP] = BIT(31),
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -80,24 +80,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -105,9 +105,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -118,9 +118,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -131,23 +131,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -160,9 +160,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 19-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -173,17 +173,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -193,25 +193,25 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -222,10 +222,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -236,10 +236,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -250,10 +250,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -264,10 +264,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -276,16 +276,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -298,9 +298,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -314,12 +314,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -330,9 +330,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -347,27 +347,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -377,24 +377,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -402,9 +401,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -425,83 +424,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_7 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_7 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
index 1f67a03fe599..01f87b5290e0 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.9.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -35,9 +35,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
[GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -72,9 +72,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
[DRBIP] = BIT(31),
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -85,24 +85,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -110,9 +110,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -123,9 +123,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -136,23 +136,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -165,9 +165,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 19-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -178,17 +178,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -198,25 +198,25 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -227,10 +227,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -241,10 +241,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -255,10 +255,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -269,10 +269,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -281,16 +281,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -302,9 +302,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -318,12 +318,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -335,9 +335,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -352,27 +352,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -382,24 +382,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -407,9 +406,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -430,83 +429,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_9 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index bb1c298c1e78..460b3d4f2245 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -157,7 +157,7 @@ void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
return NULL;
ip4h = ip_hdr(skb);
- pktlen = ntohs(ip4h->tot_len);
+ pktlen = skb_ip_totlen(skb);
if (ip4h->ihl < 5 || ip4h->version != 4)
return NULL;
if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index becb04123d3e..25616247d7a5 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2583,16 +2583,56 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
return false;
}
+static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
+{
+ enum macsec_offload prev_offload;
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ struct macsec_dev *macsec;
+ int ret = 0;
+
+ macsec = macsec_priv(dev);
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (offload != MACSEC_OFFLOAD_OFF &&
+ !macsec_check_offload(offload, macsec))
+ return -EOPNOTSUPP;
+
+ /* Check if the net device is busy. */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Check if the device already has rules configured: we do not support
+ * rules migration.
+ */
+ if (macsec_is_configured(macsec))
+ return -EBUSY;
+
+ prev_offload = macsec->offload;
+
+ ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
+ macsec, &ctx);
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ macsec->offload = offload;
+
+ ctx.secy = &macsec->secy;
+ ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
+ : macsec_offload(ops->mdo_add_secy, &ctx);
+ if (ret)
+ macsec->offload = prev_offload;
+
+ return ret;
+}
+
static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
- enum macsec_offload offload, prev_offload;
- int (*func)(struct macsec_context *ctx);
struct nlattr **attrs = info->attrs;
- struct net_device *dev;
- const struct macsec_ops *ops;
- struct macsec_context ctx;
+ enum macsec_offload offload;
struct macsec_dev *macsec;
+ struct net_device *dev;
int ret = 0;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -2621,55 +2661,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
}
offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
- if (macsec->offload == offload)
- goto out;
-
- /* Check if the offloading mode is supported by the underlying layers */
- if (offload != MACSEC_OFFLOAD_OFF &&
- !macsec_check_offload(offload, macsec)) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- /* Check if the net device is busy. */
- if (netif_running(dev)) {
- ret = -EBUSY;
- goto out;
- }
-
- prev_offload = macsec->offload;
- macsec->offload = offload;
- /* Check if the device already has rules configured: we do not support
- * rules migration.
- */
- if (macsec_is_configured(macsec)) {
- ret = -EBUSY;
- goto rollback;
- }
-
- ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
- macsec, &ctx);
- if (!ops) {
- ret = -EOPNOTSUPP;
- goto rollback;
- }
-
- if (prev_offload == MACSEC_OFFLOAD_OFF)
- func = ops->mdo_add_secy;
- else
- func = ops->mdo_del_secy;
-
- ctx.secy = &macsec->secy;
- ret = macsec_offload(func, &ctx);
- if (ret)
- goto rollback;
-
- rtnl_unlock();
- return 0;
-
-rollback:
- macsec->offload = prev_offload;
+ if (macsec->offload != offload)
+ ret = macsec_update_offload(dev, offload);
out:
rtnl_unlock();
return ret;
@@ -3817,6 +3811,8 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
+ bool macsec_offload_state_change = false;
+ enum macsec_offload offload;
struct macsec_tx_sc tx_sc;
struct macsec_secy secy;
int ret;
@@ -3840,8 +3836,18 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
if (ret)
goto cleanup;
+ if (data[IFLA_MACSEC_OFFLOAD]) {
+ offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
+ if (macsec->offload != offload) {
+ macsec_offload_state_change = true;
+ ret = macsec_update_offload(dev, offload);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
/* If h/w offloading is available, propagate to the device */
- if (macsec_is_offloaded(macsec)) {
+ if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
@@ -4240,16 +4246,22 @@ static size_t macsec_get_size(const struct net_device *dev)
nla_total_size(1) + /* IFLA_MACSEC_SCB */
nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
+ nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
0;
}
static int macsec_fill_info(struct sk_buff *skb,
const struct net_device *dev)
{
- struct macsec_secy *secy = &macsec_priv(dev)->secy;
- struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_dev *macsec;
+ struct macsec_secy *secy;
u64 csid;
+ macsec = macsec_priv(dev);
+ secy = &macsec->secy;
+ tx_sc = &secy->tx_sc;
+
switch (secy->key_len) {
case MACSEC_GCM_AES_128_SAK_LEN:
csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
@@ -4274,6 +4286,7 @@ static int macsec_fill_info(struct sk_buff *skb,
nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
+ nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
0)
goto nla_put_failure;
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index bfa16826a6e1..90309980686e 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -215,6 +215,17 @@ config MDIO_BUS_MUX_MESON_G12A
the amlogic g12a SoC. The multiplexers connects either the external
or the internal MDIO bus to the parent bus.
+config MDIO_BUS_MUX_MESON_GXL
+ tristate "Amlogic GXL based MDIO bus multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
+ select MDIO_BUS_MUX
+ default m if ARCH_MESON
+ help
+ This module provides a driver for the MDIO multiplexer/glue of
+ the amlogic GXL SoC. The multiplexer connects either the external
+ or the internal MDIO bus to the parent bus.
+
config MDIO_BUS_MUX_BCM6368
tristate "Broadcom BCM6368 MDIO bus multiplexers"
depends on OF && OF_MDIO && (BMIPS_GENERIC || COMPILE_TEST)
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
index 15f8dc4042ce..7d4cb4c11e4e 100644
--- a/drivers/net/mdio/Makefile
+++ b/drivers/net/mdio/Makefile
@@ -28,5 +28,6 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM6368) += mdio-mux-bcm6368.o
obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
+obj-$(CONFIG_MDIO_BUS_MUX_MESON_GXL) += mdio-mux-meson-gxl.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index b782c35c4ac1..1183ef5e203e 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -115,7 +115,7 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
struct mii_timestamper *mii_ts = NULL;
struct pse_control *psec = NULL;
struct phy_device *phy;
- bool is_c45 = false;
+ bool is_c45;
u32 phy_id;
int rc;
@@ -129,11 +129,7 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
goto clean_pse;
}
- rc = fwnode_property_match_string(child, "compatible",
- "ethernet-phy-ieee802.3-c45");
- if (rc >= 0)
- is_c45 = true;
-
+ is_c45 = fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45");
if (is_c45 || fwnode_get_phy_id(child, &phy_id))
phy = get_phy_device(bus, addr, is_c45);
else
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index 944d005d2bd1..c727103c8b05 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -104,61 +104,36 @@ static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum,
addr, regnum, val);
}
-static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum)
+static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int devad,
+ int regnum)
{
- u8 c45_dev = (regnum >> 16) & 0x1F;
- u16 c45_addr = regnum & 0xFFFF;
int rc;
rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
- addr, c45_dev, c45_addr);
+ addr, devad, regnum);
if (rc < 0)
return rc;
rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ,
- addr, c45_dev, 0);
+ addr, devad, 0);
if (rc < 0)
return rc;
return aspeed_mdio_get_data(bus);
}
-static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum,
- u16 val)
+static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int devad,
+ int regnum, u16 val)
{
- u8 c45_dev = (regnum >> 16) & 0x1F;
- u16 c45_addr = regnum & 0xFFFF;
int rc;
rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
- addr, c45_dev, c45_addr);
+ addr, devad, regnum);
if (rc < 0)
return rc;
return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE,
- addr, c45_dev, val);
-}
-
-static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
- regnum);
-
- if (regnum & MII_ADDR_C45)
- return aspeed_mdio_read_c45(bus, addr, regnum);
-
- return aspeed_mdio_read_c22(bus, addr, regnum);
-}
-
-static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
-{
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
- __func__, addr, regnum, val);
-
- if (regnum & MII_ADDR_C45)
- return aspeed_mdio_write_c45(bus, addr, regnum, val);
-
- return aspeed_mdio_write_c22(bus, addr, regnum, val);
+ addr, devad, val);
}
static int aspeed_mdio_probe(struct platform_device *pdev)
@@ -185,9 +160,10 @@ static int aspeed_mdio_probe(struct platform_device *pdev)
bus->name = DRV_NAME;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
bus->parent = &pdev->dev;
- bus->read = aspeed_mdio_read;
- bus->write = aspeed_mdio_write;
- bus->probe_capabilities = MDIOBUS_C22_C45;
+ bus->read = aspeed_mdio_read_c22;
+ bus->write = aspeed_mdio_write_c22;
+ bus->read_c45 = aspeed_mdio_read_c45;
+ bus->write_c45 = aspeed_mdio_write_c45;
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 07609114a26b..b83932562be2 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -127,14 +127,12 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg)
/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the
lower 16 bits of the 21 bit address. This transfer is done identically to a
- MDIO_WRITE except for a different code. To enable clause 45 mode or
- MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices
- can exist on the same bus. Normal devices should ignore the MDIO_ADDR
+ MDIO_WRITE except for a different code. Theoretically clause 45 and normal
+ devices can exist on the same bus. Normal devices should ignore the MDIO_ADDR
phase. */
-static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
+static void mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, int dev_addr,
+ int reg)
{
- unsigned int dev_addr = (addr >> 16) & 0x1F;
- unsigned int reg = addr & 0xFFFF;
mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr);
/* send the turnaround (10) */
@@ -145,21 +143,13 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
ctrl->ops->set_mdio_dir(ctrl, 0);
mdiobb_get_bit(ctrl);
-
- return dev_addr;
}
-int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+static int mdiobb_read_common(struct mii_bus *bus, int phy)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
- if (reg & MII_ADDR_C45) {
- reg = mdiobb_cmd_addr(ctrl, phy, reg);
- mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
- } else
- mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg);
-
ctrl->ops->set_mdio_dir(ctrl, 0);
/* check the turnaround bit: the PHY should be driving it to zero, if this
@@ -180,17 +170,31 @@ int mdiobb_read(struct mii_bus *bus, int phy, int reg)
mdiobb_get_bit(ctrl);
return ret;
}
-EXPORT_SYMBOL(mdiobb_read);
-int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
- if (reg & MII_ADDR_C45) {
- reg = mdiobb_cmd_addr(ctrl, phy, reg);
- mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
- } else
- mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg);
+ mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg);
+
+ return mdiobb_read_common(bus, phy);
+}
+EXPORT_SYMBOL(mdiobb_read_c22);
+
+int mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, int reg)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+
+ mdiobb_cmd_addr(ctrl, phy, devad, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
+
+ return mdiobb_read_common(bus, phy);
+}
+EXPORT_SYMBOL(mdiobb_read_c45);
+
+static int mdiobb_write_common(struct mii_bus *bus, u16 val)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
/* send the turnaround (10) */
mdiobb_send_bit(ctrl, 1);
@@ -202,7 +206,27 @@ int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
mdiobb_get_bit(ctrl);
return 0;
}
-EXPORT_SYMBOL(mdiobb_write);
+
+int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+
+ mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg);
+
+ return mdiobb_write_common(bus, val);
+}
+EXPORT_SYMBOL(mdiobb_write_c22);
+
+int mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, int reg, u16 val)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+
+ mdiobb_cmd_addr(ctrl, phy, devad, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
+
+ return mdiobb_write_common(bus, val);
+}
+EXPORT_SYMBOL(mdiobb_write_c45);
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
@@ -214,8 +238,11 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
__module_get(ctrl->ops->owner);
- bus->read = mdiobb_read;
- bus->write = mdiobb_write;
+ bus->read = mdiobb_read_c22;
+ bus->write = mdiobb_write_c22;
+ bus->read_c45 = mdiobb_read_c45;
+ bus->write_c45 = mdiobb_write_c45;
+
bus->priv = ctrl;
if (!ctrl->override_op_c22) {
ctrl->op_c22_read = MDIO_READ;
diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c
index 95ce274c1be1..100e46a702ee 100644
--- a/drivers/net/mdio/mdio-cavium.c
+++ b/drivers/net/mdio/mdio-cavium.c
@@ -26,7 +26,7 @@ static void cavium_mdiobus_set_mode(struct cavium_mdiobus *p,
}
static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p,
- int phy_id, int regnum)
+ int phy_id, int devad, int regnum)
{
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_wr_dat smi_wr;
@@ -38,12 +38,10 @@ static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p,
smi_wr.s.dat = regnum & 0xffff;
oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
- regnum = (regnum >> 16) & 0x1f;
-
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = regnum;
+ smi_cmd.s.reg_adr = devad;
oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
@@ -59,28 +57,51 @@ static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p,
return 0;
}
-int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
+int cavium_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
struct cavium_mdiobus *p = bus->priv;
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_rd_dat smi_rd;
- unsigned int op = 1; /* MDIO_CLAUSE_22_READ */
int timeout = 1000;
- if (regnum & MII_ADDR_C45) {
- int r = cavium_mdiobus_c45_addr(p, phy_id, regnum);
+ cavium_mdiobus_set_mode(p, C22);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
+
+ do {
+ /* Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ __delay(1000);
+ smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
+ } while (smi_rd.s.pending && --timeout);
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return -EIO;
+}
+EXPORT_SYMBOL(cavium_mdiobus_read_c22);
- if (r < 0)
- return r;
+int cavium_mdiobus_read_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum)
+{
+ struct cavium_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_rd_dat smi_rd;
+ int timeout = 1000;
+ int r;
- regnum = (regnum >> 16) & 0x1f;
- op = 3; /* MDIO_CLAUSE_45_READ */
- } else {
- cavium_mdiobus_set_mode(p, C22);
- }
+ r = cavium_mdiobus_c45_addr(p, phy_id, devad, regnum);
+ if (r < 0)
+ return r;
smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = op;
+ smi_cmd.s.phy_op = 3; /* MDIO_CLAUSE_45_READ */
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
@@ -98,36 +119,64 @@ int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
else
return -EIO;
}
-EXPORT_SYMBOL(cavium_mdiobus_read);
+EXPORT_SYMBOL(cavium_mdiobus_read_c45);
-int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
+int cavium_mdiobus_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 val)
{
struct cavium_mdiobus *p = bus->priv;
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_wr_dat smi_wr;
- unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */
int timeout = 1000;
- if (regnum & MII_ADDR_C45) {
- int r = cavium_mdiobus_c45_addr(p, phy_id, regnum);
+ cavium_mdiobus_set_mode(p, C22);
- if (r < 0)
- return r;
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
- regnum = (regnum >> 16) & 0x1f;
- op = 1; /* MDIO_CLAUSE_45_WRITE */
- } else {
- cavium_mdiobus_set_mode(p, C22);
- }
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
+
+ do {
+ /* Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ __delay(1000);
+ smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
+ } while (smi_wr.s.pending && --timeout);
+
+ if (timeout <= 0)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL(cavium_mdiobus_write_c22);
+
+int cavium_mdiobus_write_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum, u16 val)
+{
+ struct cavium_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_wr_dat smi_wr;
+ int timeout = 1000;
+ int r;
+
+ r = cavium_mdiobus_c45_addr(p, phy_id, devad, regnum);
+ if (r < 0)
+ return r;
smi_wr.u64 = 0;
smi_wr.s.dat = val;
oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = op;
+ smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_45_WRITE */
smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = regnum;
+ smi_cmd.s.reg_adr = devad;
oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
@@ -143,7 +192,7 @@ int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
return 0;
}
-EXPORT_SYMBOL(cavium_mdiobus_write);
+EXPORT_SYMBOL(cavium_mdiobus_write_c45);
MODULE_DESCRIPTION("Common code for OCTEON and Thunder MDIO bus drivers");
MODULE_AUTHOR("David Daney");
diff --git a/drivers/net/mdio/mdio-cavium.h b/drivers/net/mdio/mdio-cavium.h
index a2245d436f5d..71b8e20cd664 100644
--- a/drivers/net/mdio/mdio-cavium.h
+++ b/drivers/net/mdio/mdio-cavium.h
@@ -114,5 +114,10 @@ static inline u64 oct_mdio_readq(void __iomem *addr)
#define oct_mdio_readq(addr) readq(addr)
#endif
-int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum);
-int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val);
+int cavium_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int regnum);
+int cavium_mdiobus_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 val);
+int cavium_mdiobus_read_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum);
+int cavium_mdiobus_write_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum, u16 val);
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index bf8bf5e20faf..1e0c206d0f2e 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -30,7 +30,8 @@ static unsigned int i2c_mii_phy_addr(int phy_id)
return phy_id + 0x40;
}
-static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg)
+static int i2c_mii_read_default_c45(struct mii_bus *bus, int phy_id, int devad,
+ int reg)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msgs[2];
@@ -41,8 +42,8 @@ static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg)
return 0xffff;
p = addr;
- if (reg & MII_ADDR_C45) {
- *p++ = 0x20 | ((reg >> 16) & 31);
+ if (devad >= 0) {
+ *p++ = 0x20 | devad;
*p++ = reg >> 8;
}
*p++ = reg;
@@ -64,8 +65,8 @@ static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg)
return data[0] << 8 | data[1];
}
-static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg,
- u16 val)
+static int i2c_mii_write_default_c45(struct mii_bus *bus, int phy_id,
+ int devad, int reg, u16 val)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msg;
@@ -76,8 +77,8 @@ static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg,
return 0;
p = data;
- if (reg & MII_ADDR_C45) {
- *p++ = (reg >> 16) & 31;
+ if (devad >= 0) {
+ *p++ = devad;
*p++ = reg >> 8;
}
*p++ = reg;
@@ -94,6 +95,17 @@ static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg,
return ret < 0 ? ret : 0;
}
+static int i2c_mii_read_default_c22(struct mii_bus *bus, int phy_id, int reg)
+{
+ return i2c_mii_read_default_c45(bus, phy_id, -1, reg);
+}
+
+static int i2c_mii_write_default_c22(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ return i2c_mii_write_default_c45(bus, phy_id, -1, reg, val);
+}
+
/* RollBall SFPs do not access internal PHY via I2C address 0x56, but
* instead via address 0x51, when SFP page is set to 0x03 and password to
* 0xffffffff.
@@ -285,9 +297,6 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
int bus_addr, ret;
u16 val;
- if (!(reg & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
bus_addr = i2c_mii_phy_addr(phy_id);
if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
return 0xffff;
@@ -319,9 +328,6 @@ static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
int bus_addr, ret;
u8 buf[6];
- if (!(reg & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
bus_addr = i2c_mii_phy_addr(phy_id);
if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
return 0;
@@ -403,8 +409,10 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
mii->write = i2c_mii_write_rollball;
break;
default:
- mii->read = i2c_mii_read_default;
- mii->write = i2c_mii_write_default;
+ mii->read = i2c_mii_read_default_c22;
+ mii->write = i2c_mii_write_default_c22;
+ mii->read_c45 = i2c_mii_read_default_c45;
+ mii->write_c45 = i2c_mii_write_default_c45;
break;
}
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 4eba5a91075c..78b93de636f5 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -53,7 +53,8 @@ static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
IPQ4019_MDIO_SLEEP, IPQ4019_MDIO_TIMEOUT);
}
-static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int ipq4019_mdio_read_c45(struct mii_bus *bus, int mii_id, int mmd,
+ int reg)
{
struct ipq4019_mdio_data *priv = bus->priv;
unsigned int data;
@@ -62,61 +63,71 @@ static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* Clause 45 support */
- if (regnum & MII_ADDR_C45) {
- unsigned int mmd = (regnum >> 16) & 0x1F;
- unsigned int reg = regnum & 0xFFFF;
+ data = readl(priv->membase + MDIO_MODE_REG);
- /* Enter Clause 45 mode */
- data = readl(priv->membase + MDIO_MODE_REG);
+ data |= MDIO_MODE_C45;
- data |= MDIO_MODE_C45;
+ writel(data, priv->membase + MDIO_MODE_REG);
- writel(data, priv->membase + MDIO_MODE_REG);
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
- /* issue the phy address and mmd */
- writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
- /* issue reg */
- writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
- } else {
- /* Enter Clause 22 mode */
- data = readl(priv->membase + MDIO_MODE_REG);
+ /* issue read command */
+ writel(cmd, priv->membase + MDIO_CMD_REG);
- data &= ~MDIO_MODE_C45;
+ /* Wait read complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
- writel(data, priv->membase + MDIO_MODE_REG);
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ writel(cmd, priv->membase + MDIO_CMD_REG);
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
- }
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
- /* issue read command */
- writel(cmd, priv->membase + MDIO_CMD_REG);
+ /* Read and return data */
+ return readl(priv->membase + MDIO_DATA_READ_REG);
+}
+
+static int ipq4019_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
+ unsigned int cmd;
- /* Wait read complete */
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- if (regnum & MII_ADDR_C45) {
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ;
+ data = readl(priv->membase + MDIO_MODE_REG);
- writel(cmd, priv->membase + MDIO_CMD_REG);
+ data &= ~MDIO_MODE_C45;
- if (ipq4019_mdio_wait_busy(bus))
- return -ETIMEDOUT;
- }
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+
+ /* issue read command */
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ /* Wait read complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
/* Read and return data */
return readl(priv->membase + MDIO_DATA_READ_REG);
}
-static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int ipq4019_mdio_write_c45(struct mii_bus *bus, int mii_id, int mmd,
+ int reg, u16 value)
{
struct ipq4019_mdio_data *priv = bus->priv;
unsigned int data;
@@ -125,50 +136,63 @@ static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* Clause 45 support */
- if (regnum & MII_ADDR_C45) {
- unsigned int mmd = (regnum >> 16) & 0x1F;
- unsigned int reg = regnum & 0xFFFF;
+ data = readl(priv->membase + MDIO_MODE_REG);
- /* Enter Clause 45 mode */
- data = readl(priv->membase + MDIO_MODE_REG);
+ data |= MDIO_MODE_C45;
- data |= MDIO_MODE_C45;
+ writel(data, priv->membase + MDIO_MODE_REG);
- writel(data, priv->membase + MDIO_MODE_REG);
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
- /* issue the phy address and mmd */
- writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
- /* issue reg */
- writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
+ writel(cmd, priv->membase + MDIO_CMD_REG);
- writel(cmd, priv->membase + MDIO_CMD_REG);
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
- if (ipq4019_mdio_wait_busy(bus))
- return -ETIMEDOUT;
- } else {
- /* Enter Clause 22 mode */
- data = readl(priv->membase + MDIO_MODE_REG);
+ /* issue write data */
+ writel(value, priv->membase + MDIO_DATA_WRITE_REG);
- data &= ~MDIO_MODE_C45;
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE;
+ writel(cmd, priv->membase + MDIO_CMD_REG);
- writel(data, priv->membase + MDIO_MODE_REG);
+ /* Wait write complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
- }
+ return 0;
+}
+
+static int ipq4019_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
+ unsigned int cmd;
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ /* Enter Clause 22 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data &= ~MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
/* issue write data */
writel(value, priv->membase + MDIO_DATA_WRITE_REG);
/* issue write command */
- if (regnum & MII_ADDR_C45)
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE;
- else
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
writel(cmd, priv->membase + MDIO_CMD_REG);
@@ -235,8 +259,10 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
bus->name = "ipq4019_mdio";
- bus->read = ipq4019_mdio_read;
- bus->write = ipq4019_mdio_write;
+ bus->read = ipq4019_mdio_read_c22;
+ bus->write = ipq4019_mdio_write_c22;
+ bus->read_c45 = ipq4019_mdio_read_c45;
+ bus->write_c45 = ipq4019_mdio_write_c45;
bus->reset = ipq_mdio_reset;
bus->parent = &pdev->dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index 37e0d8b6da07..fd9716960106 100644
--- a/drivers/net/mdio/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
@@ -57,10 +57,6 @@ ipq8064_mdio_read(struct mii_bus *bus, int phy_addr, int reg_offset)
u32 ret_val;
int err;
- /* Reject clause 45 */
- if (reg_offset & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
@@ -81,10 +77,6 @@ ipq8064_mdio_write(struct mii_bus *bus, int phy_addr, int reg_offset, u16 data)
u32 miiaddr = MII_WRITE | MII_BUSY | MII_CLKRANGE_250_300M;
struct ipq8064_mdio *priv = bus->priv;
- /* Reject clause 45 */
- if (reg_offset & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
regmap_write(priv->base, MII_DATA_REG_ADDR, data);
miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 51f68daac152..c87e991d1a17 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -108,9 +108,6 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
u32 val;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = mscc_miim_wait_pending(bus);
if (ret)
goto out;
@@ -154,9 +151,6 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
struct mscc_miim_dev *miim = bus->priv;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = mscc_miim_wait_pending(bus);
if (ret < 0)
goto out;
diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 014c0baedbd2..956d54846b62 100644
--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
@@ -98,7 +98,7 @@ static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
* Return value: Successful Read operation returns read reg values and write
* operation returns 0. Failure operation returns negative error code.
*/
-static int start_miim_ops(void __iomem *base,
+static int start_miim_ops(void __iomem *base, bool c45,
u16 phyid, u32 reg, u16 val, u32 op)
{
u32 param;
@@ -112,7 +112,7 @@ static int start_miim_ops(void __iomem *base,
param = readl(base + MDIO_PARAM_OFFSET);
param |= phyid << MDIO_PARAM_PHY_ID;
param |= val << MDIO_PARAM_PHY_DATA;
- if (reg & MII_ADDR_C45)
+ if (c45)
param |= BIT(MDIO_PARAM_C45_SEL);
writel(param, base + MDIO_PARAM_OFFSET);
@@ -131,28 +131,58 @@ err:
return ret;
}
-static int iproc_mdiomux_read(struct mii_bus *bus, int phyid, int reg)
+static int iproc_mdiomux_read_c22(struct mii_bus *bus, int phyid, int reg)
{
struct iproc_mdiomux_desc *md = bus->priv;
int ret;
- ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP);
+ ret = start_miim_ops(md->base, false, phyid, reg, 0, MDIO_CTRL_READ_OP);
if (ret < 0)
- dev_err(&bus->dev, "mdiomux read operation failed!!!");
+ dev_err(&bus->dev, "mdiomux c22 read operation failed!!!");
return ret;
}
-static int iproc_mdiomux_write(struct mii_bus *bus,
- int phyid, int reg, u16 val)
+static int iproc_mdiomux_read_c45(struct mii_bus *bus, int phyid, int devad,
+ int reg)
+{
+ struct iproc_mdiomux_desc *md = bus->priv;
+ int ret;
+
+ ret = start_miim_ops(md->base, true, phyid, reg | devad << 16, 0,
+ MDIO_CTRL_READ_OP);
+ if (ret < 0)
+ dev_err(&bus->dev, "mdiomux read c45 operation failed!!!");
+
+ return ret;
+}
+
+static int iproc_mdiomux_write_c22(struct mii_bus *bus,
+ int phyid, int reg, u16 val)
+{
+ struct iproc_mdiomux_desc *md = bus->priv;
+ int ret;
+
+ /* Write val at reg offset */
+ ret = start_miim_ops(md->base, false, phyid, reg, val,
+ MDIO_CTRL_WRITE_OP);
+ if (ret < 0)
+ dev_err(&bus->dev, "mdiomux write c22 operation failed!!!");
+
+ return ret;
+}
+
+static int iproc_mdiomux_write_c45(struct mii_bus *bus,
+ int phyid, int devad, int reg, u16 val)
{
struct iproc_mdiomux_desc *md = bus->priv;
int ret;
/* Write val at reg offset */
- ret = start_miim_ops(md->base, phyid, reg, val, MDIO_CTRL_WRITE_OP);
+ ret = start_miim_ops(md->base, true, phyid, reg | devad << 16, val,
+ MDIO_CTRL_WRITE_OP);
if (ret < 0)
- dev_err(&bus->dev, "mdiomux write operation failed!!!");
+ dev_err(&bus->dev, "mdiomux write c45 operation failed!!!");
return ret;
}
@@ -223,8 +253,10 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
bus->name = "iProc MDIO mux bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
bus->parent = &pdev->dev;
- bus->read = iproc_mdiomux_read;
- bus->write = iproc_mdiomux_write;
+ bus->read = iproc_mdiomux_read_c22;
+ bus->write = iproc_mdiomux_write_c22;
+ bus->read_c45 = iproc_mdiomux_read_c45;
+ bus->write_c45 = iproc_mdiomux_write_c45;
bus->phy_mask = ~0;
bus->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index c4542ecf5623..910e5cf74e89 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -53,10 +53,8 @@
#define MESON_G12A_MDIO_INTERNAL_ID 1
struct g12a_mdio_mux {
- bool pll_is_enabled;
void __iomem *regs;
void *mux_handle;
- struct clk *pclk;
struct clk *pll;
};
@@ -155,14 +153,12 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
int ret;
/* Enable the phy clock */
- if (!priv->pll_is_enabled) {
+ if (!__clk_is_enabled(priv->pll)) {
ret = clk_prepare_enable(priv->pll);
if (ret)
return ret;
}
- priv->pll_is_enabled = true;
-
/* Initialize ephy control */
writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
@@ -193,10 +189,8 @@ static int g12a_enable_external_mdio(struct g12a_mdio_mux *priv)
writel_relaxed(0x0, priv->regs + ETH_PHY_CNTL2);
/* Disable the phy clock if enabled */
- if (priv->pll_is_enabled) {
+ if (__clk_is_enabled(priv->pll))
clk_disable_unprepare(priv->pll);
- priv->pll_is_enabled = false;
- }
return 0;
}
@@ -311,6 +305,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct g12a_mdio_mux *priv;
+ struct clk *pclk;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -323,34 +318,21 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
- priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk))
- return dev_err_probe(dev, PTR_ERR(priv->pclk),
+ pclk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(pclk))
+ return dev_err_probe(dev, PTR_ERR(pclk),
"failed to get peripheral clock\n");
- /* Make sure the device registers are clocked */
- ret = clk_prepare_enable(priv->pclk);
- if (ret) {
- dev_err(dev, "failed to enable peripheral clock");
- return ret;
- }
-
/* Register PLL in CCF */
ret = g12a_ephy_glue_clk_register(dev);
if (ret)
- goto err;
+ return ret;
ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn,
&priv->mux_handle, dev, NULL);
- if (ret) {
+ if (ret)
dev_err_probe(dev, ret, "mdio multiplexer init failed\n");
- goto err;
- }
- return 0;
-
-err:
- clk_disable_unprepare(priv->pclk);
return ret;
}
@@ -360,11 +342,9 @@ static int g12a_mdio_mux_remove(struct platform_device *pdev)
mdio_mux_uninit(priv->mux_handle);
- if (priv->pll_is_enabled)
+ if (__clk_is_enabled(priv->pll))
clk_disable_unprepare(priv->pll);
- clk_disable_unprepare(priv->pclk);
-
return 0;
}
diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c
new file mode 100644
index 000000000000..76188575ca1f
--- /dev/null
+++ b/drivers/net/mdio/mdio-mux-meson-gxl.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Baylibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define ETH_REG2 0x0
+#define REG2_PHYID GENMASK(21, 0)
+#define EPHY_GXL_ID 0x110181
+#define REG2_LEDACT GENMASK(23, 22)
+#define REG2_LEDLINK GENMASK(25, 24)
+#define REG2_DIV4SEL BIT(27)
+#define REG2_ADCBYPASS BIT(30)
+#define REG2_CLKINSEL BIT(31)
+#define ETH_REG3 0x4
+#define REG3_ENH BIT(3)
+#define REG3_CFGMODE GENMASK(6, 4)
+#define REG3_AUTOMDIX BIT(7)
+#define REG3_PHYADDR GENMASK(12, 8)
+#define REG3_PWRUPRST BIT(21)
+#define REG3_PWRDOWN BIT(22)
+#define REG3_LEDPOL BIT(23)
+#define REG3_PHYMDI BIT(26)
+#define REG3_CLKINEN BIT(29)
+#define REG3_PHYIP BIT(30)
+#define REG3_PHYEN BIT(31)
+#define ETH_REG4 0x8
+#define REG4_PWRUPRSTSIG BIT(0)
+
+#define MESON_GXL_MDIO_EXTERNAL_ID 0
+#define MESON_GXL_MDIO_INTERNAL_ID 1
+
+struct gxl_mdio_mux {
+ void __iomem *regs;
+ void *mux_handle;
+};
+
+static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv)
+{
+ u32 val;
+
+ /* Setup the internal phy */
+ val = (REG3_ENH |
+ FIELD_PREP(REG3_CFGMODE, 0x7) |
+ REG3_AUTOMDIX |
+ FIELD_PREP(REG3_PHYADDR, 8) |
+ REG3_LEDPOL |
+ REG3_PHYMDI |
+ REG3_CLKINEN |
+ REG3_PHYIP);
+
+ writel(REG4_PWRUPRSTSIG, priv->regs + ETH_REG4);
+ writel(val, priv->regs + ETH_REG3);
+ mdelay(10);
+
+ /* NOTE: The HW kept the phy id configurable at runtime.
+ * The id below is arbitrary. It is the one used in the vendor code.
+ * The only constraint is that it must match the one in
+ * drivers/net/phy/meson-gxl.c to properly match the PHY.
+ */
+ writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
+ priv->regs + ETH_REG2);
+
+ /* Enable the internal phy */
+ val |= REG3_PHYEN;
+ writel(val, priv->regs + ETH_REG3);
+ writel(0, priv->regs + ETH_REG4);
+
+ /* The phy needs a bit of time to power up */
+ mdelay(10);
+}
+
+static void gxl_enable_external_mdio(struct gxl_mdio_mux *priv)
+{
+ /* Reset the mdio bus mux to the external phy */
+ writel(0, priv->regs + ETH_REG3);
+}
+
+static int gxl_mdio_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct gxl_mdio_mux *priv = dev_get_drvdata(data);
+
+ if (current_child == desired_child)
+ return 0;
+
+ switch (desired_child) {
+ case MESON_GXL_MDIO_EXTERNAL_ID:
+ gxl_enable_external_mdio(priv);
+ break;
+ case MESON_GXL_MDIO_INTERNAL_ID:
+ gxl_enable_internal_mdio(priv);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id gxl_mdio_mux_match[] = {
+ { .compatible = "amlogic,gxl-mdio-mux", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gxl_mdio_mux_match);
+
+static int gxl_mdio_mux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gxl_mdio_mux *priv;
+ struct clk *rclk;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ rclk = devm_clk_get_enabled(dev, "ref");
+ if (IS_ERR(rclk))
+ return dev_err_probe(dev, PTR_ERR(rclk),
+ "failed to get reference clock\n");
+
+ ret = mdio_mux_init(dev, dev->of_node, gxl_mdio_switch_fn,
+ &priv->mux_handle, dev, NULL);
+ if (ret)
+ dev_err_probe(dev, ret, "mdio multiplexer init failed\n");
+
+ return ret;
+}
+
+static int gxl_mdio_mux_remove(struct platform_device *pdev)
+{
+ struct gxl_mdio_mux *priv = platform_get_drvdata(pdev);
+
+ mdio_mux_uninit(priv->mux_handle);
+
+ return 0;
+}
+
+static struct platform_driver gxl_mdio_mux_driver = {
+ .probe = gxl_mdio_mux_probe,
+ .remove = gxl_mdio_mux_remove,
+ .driver = {
+ .name = "gxl-mdio-mux",
+ .of_match_table = gxl_mdio_mux_match,
+ },
+};
+module_platform_driver(gxl_mdio_mux_driver);
+
+MODULE_DESCRIPTION("Amlogic GXL MDIO multiplexer driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/mdio/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c
index d5eabddfdf51..68fc55906e78 100644
--- a/drivers/net/mdio/mdio-mvusb.c
+++ b/drivers/net/mdio/mdio-mvusb.c
@@ -34,9 +34,6 @@ static int mvusb_mdio_read(struct mii_bus *mdio, int dev, int reg)
struct mvusb_mdio *mvusb = mdio->priv;
int err, alen;
- if (dev & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0xa400 | (dev << 5) | reg);
err = usb_bulk_msg(mvusb->udev, usb_sndbulkpipe(mvusb->udev, 2),
@@ -57,9 +54,6 @@ static int mvusb_mdio_write(struct mii_bus *mdio, int dev, int reg, u16 val)
struct mvusb_mdio *mvusb = mdio->priv;
int alen;
- if (dev & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0x8000 | (dev << 5) | reg);
mvusb->buf[MVUSB_CMD_VAL] = cpu_to_le16(val);
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index e096e68ac667..7c65c547d377 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -58,8 +58,10 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%px", bus->register_base);
bus->mii_bus->parent = &pdev->dev;
- bus->mii_bus->read = cavium_mdiobus_read;
- bus->mii_bus->write = cavium_mdiobus_write;
+ bus->mii_bus->read = cavium_mdiobus_read_c22;
+ bus->mii_bus->write = cavium_mdiobus_write_c22;
+ bus->mii_bus->read_c45 = cavium_mdiobus_read_c45;
+ bus->mii_bus->write_c45 = cavium_mdiobus_write_c45;
platform_set_drvdata(pdev, bus);
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 822d2cdd2f35..3847ee92c109 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -93,8 +93,10 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
bus->mii_bus->name = KBUILD_MODNAME;
snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", r.start);
bus->mii_bus->parent = &pdev->dev;
- bus->mii_bus->read = cavium_mdiobus_read;
- bus->mii_bus->write = cavium_mdiobus_write;
+ bus->mii_bus->read = cavium_mdiobus_read_c22;
+ bus->mii_bus->write = cavium_mdiobus_write_c22;
+ bus->mii_bus->read_c45 = cavium_mdiobus_read_c45;
+ bus->mii_bus->write_c45 = cavium_mdiobus_write_c45;
err = of_mdiobus_register(bus->mii_bus, node);
if (err)
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 50854265864d..f60eb97e3a62 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -315,10 +315,6 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
return -EINVAL;
}
- if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) {
- NSIM_EA(bpf->extack, "program bound to different dev");
- return -EINVAL;
- }
state = bpf->prog->aux->offload->dev_priv;
if (WARN_ON(strcmp(state->state, "xlated"))) {
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index b962fc8e1397..6045bece2654 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -527,13 +527,13 @@ static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev,
union devlink_param_value value;
value.vu32 = nsim_dev->max_macs;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
value.vbool = nsim_dev->test1;
- devlink_param_driverinit_value_set(devlink,
- NSIM_DEVLINK_PARAM_ID_TEST1,
- value);
+ devl_param_driverinit_value_set(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ value);
}
static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
@@ -542,14 +542,14 @@ static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
union devlink_param_value saved_value;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
if (!err)
nsim_dev->max_macs = saved_value.vu32;
- err = devlink_param_driverinit_value_get(devlink,
- NSIM_DEVLINK_PARAM_ID_TEST1,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ &saved_value);
if (!err)
nsim_dev->test1 = saved_value.vbool;
}
@@ -1556,14 +1556,18 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
goto err_devlink_unlock;
}
- err = nsim_dev_resources_register(devlink);
+ err = devl_register(devlink);
if (err)
goto err_vfc_free;
- err = devlink_params_register(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
+ err = nsim_dev_resources_register(devlink);
if (err)
goto err_dl_unregister;
+
+ err = devl_params_register(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+ if (err)
+ goto err_resource_unregister;
nsim_devlink_set_params_init_values(nsim_dev, devlink);
err = nsim_dev_dummy_region_init(nsim_dev, devlink);
@@ -1605,9 +1609,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
goto err_hwstats_exit;
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devl_unlock(devlink);
- devlink_register(devlink);
return 0;
err_hwstats_exit:
@@ -1627,10 +1629,12 @@ err_traps_exit:
err_dummy_region_exit:
nsim_dev_dummy_region_exit(nsim_dev);
err_params_unregister:
- devlink_params_unregister(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
-err_dl_unregister:
+ devl_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+err_resource_unregister:
devl_resources_unregister(devlink);
+err_dl_unregister:
+ devl_unregister(devlink);
err_vfc_free:
kfree(nsim_dev->vfconfigs);
err_devlink_unlock:
@@ -1668,15 +1672,15 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
struct devlink *devlink = priv_to_devlink(nsim_dev);
- devlink_unregister(devlink);
devl_lock(devlink);
nsim_dev_reload_destroy(nsim_dev);
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
- devlink_params_unregister(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
+ devl_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
devl_resources_unregister(devlink);
+ devl_unregister(devlink);
kfree(nsim_dev->vfconfigs);
kfree(nsim_dev->fa_cookie);
devl_unlock(devlink);
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index aa77af4a68df..eb04ed715d2d 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -233,16 +233,16 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
int err;
health->empty_reporter =
- devlink_health_reporter_create(devlink,
- &nsim_dev_empty_reporter_ops,
- 0, health);
+ devl_health_reporter_create(devlink,
+ &nsim_dev_empty_reporter_ops,
+ 0, health);
if (IS_ERR(health->empty_reporter))
return PTR_ERR(health->empty_reporter);
health->dummy_reporter =
- devlink_health_reporter_create(devlink,
- &nsim_dev_dummy_reporter_ops,
- 0, health);
+ devl_health_reporter_create(devlink,
+ &nsim_dev_dummy_reporter_ops,
+ 0, health);
if (IS_ERR(health->dummy_reporter)) {
err = PTR_ERR(health->dummy_reporter);
goto err_empty_reporter_destroy;
@@ -266,9 +266,9 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
return 0;
err_dummy_reporter_destroy:
- devlink_health_reporter_destroy(health->dummy_reporter);
+ devl_health_reporter_destroy(health->dummy_reporter);
err_empty_reporter_destroy:
- devlink_health_reporter_destroy(health->empty_reporter);
+ devl_health_reporter_destroy(health->empty_reporter);
return err;
}
@@ -278,6 +278,6 @@ void nsim_dev_health_exit(struct nsim_dev *nsim_dev)
debugfs_remove_recursive(health->ddir);
kfree(health->recovered_break_msg);
- devlink_health_reporter_destroy(health->dummy_reporter);
- devlink_health_reporter_destroy(health->empty_reporter);
+ devl_health_reporter_destroy(health->dummy_reporter);
+ devl_health_reporter_destroy(health->empty_reporter);
}
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index b93baf5c8bee..f0d58092e7e9 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -125,7 +125,8 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
return 0;
}
-static int nsim_ipsec_add_sa(struct xfrm_state *xs)
+static int nsim_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct nsim_ipsec *ipsec;
struct net_device *dev;
@@ -139,25 +140,24 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
ipsec = &ns->ipsec;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
- netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
- xs->id.proto);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for ipsec offload");
return -EINVAL;
}
if (xs->calg) {
- netdev_err(dev, "Compression offload not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- netdev_err(dev, "Unsupported ipsec offload type\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");
return -EINVAL;
}
/* find the first unused index */
ret = nsim_ipsec_find_empty_idx(ipsec);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Rx table!\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");
return ret;
}
sa_idx = (u16)ret;
@@ -172,7 +172,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
/* get the key and salt */
ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for SA table");
return ret;
}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 6db6a75ff9b9..35fa1ca98671 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -286,6 +286,7 @@ static void nsim_setup(struct net_device *dev)
NETIF_F_TSO;
dev->hw_features |= NETIF_F_HW_TC;
dev->max_mtu = ETH_MAX_MTU;
+ dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
}
static int nsim_init_netdevsim(struct netdevsim *ns)
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index 7d5fc7f54b2f..3903f3baba2b 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -10,9 +10,6 @@
#define SGMII_CLOCK_PERIOD_NS 8 /* PCS is clocked at 125 MHz */
#define LINK_TIMER_VAL(ns) ((u32)((ns) / SGMII_CLOCK_PERIOD_NS))
-#define SGMII_AN_LINK_TIMER_NS 1600000 /* defined by SGMII spec */
-#define IEEE8023_LINK_TIMER_NS 10000000
-
#define LINK_TIMER_LO 0x12
#define LINK_TIMER_HI 0x13
#define IF_MODE 0x14
@@ -126,26 +123,25 @@ static int lynx_pcs_config_giga(struct mdio_device *pcs, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertising)
{
+ int link_timer_ns;
u32 link_timer;
u16 if_mode;
int err;
- if (interface == PHY_INTERFACE_MODE_1000BASEX) {
- link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS);
+ link_timer_ns = phylink_get_link_timer_ns(interface);
+ if (link_timer_ns > 0) {
+ link_timer = LINK_TIMER_VAL(link_timer_ns);
+
mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
+ }
+ if (interface == PHY_INTERFACE_MODE_1000BASEX) {
if_mode = 0;
} else {
if_mode = IF_MODE_SGMII_EN;
- if (mode == MLO_AN_INBAND) {
+ if (mode == MLO_AN_INBAND)
if_mode |= IF_MODE_USE_SGMII_AN;
-
- /* Adjust link timer for SGMII */
- link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
- mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
- mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
- }
}
err = mdiodev_modify(pcs, IF_MODE,
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index c1424119e821..323bec5e57f8 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -121,15 +121,11 @@ static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
* struct miic - MII converter structure
* @base: base address of the MII converter
* @dev: Device associated to the MII converter
- * @clks: Clocks used for this device
- * @nclk: Number of clocks
* @lock: Lock used for read-modify-write access
*/
struct miic {
void __iomem *base;
struct device *dev;
- struct clk_bulk_data *clks;
- int nclk;
spinlock_t lock;
};
@@ -232,7 +228,7 @@ static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
}
miic_reg_rmw(miic, MIIC_CONVCTRL(port), mask, val);
- miic_converter_enable(miic_port->miic, miic_port->port, 1);
+ miic_converter_enable(miic, miic_port->port, 1);
return 0;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index f6a038a1d51e..bc428a816719 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -199,9 +199,7 @@ int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val)
static int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg,
u16 mask, u16 set)
{
- u32 reg_addr = mdiobus_c45_addr(dev, reg);
-
- return mdiodev_modify_changed(xpcs->mdiodev, reg_addr, mask, set);
+ return mdiodev_c45_modify_changed(xpcs->mdiodev, dev, reg, mask, set);
}
static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1327290decab..54874555c921 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -257,7 +257,7 @@ config MOTORCOMM_PHY
tristate "Motorcomm PHYs"
help
Enables support for Motorcomm network PHYs.
- Currently supports the YT8511, YT8521, YT8531S Gigabit Ethernet PHYs.
+ Currently supports YT85xx Gigabit Ethernet PHYs.
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
@@ -277,6 +277,13 @@ config NXP_TJA11XX_PHY
help
Currently supports the NXP TJA1100 and TJA1101 PHY.
+config NCN26000_PHY
+ tristate "Onsemi 10BASE-T1S Ethernet PHY"
+ help
+ Adds support for the onsemi 10BASE-T1S Ethernet PHY.
+ Currently supports the NCN26000 10BASE-T1S Industrial PHY
+ with MII interface.
+
config AT803X_PHY
tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
depends on REGULATOR
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index f7138d3c896b..b5138066ba04 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc/
obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
+obj-$(CONFIG_NCN26000_PHY) += ncn26000.o
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 0d706ee266af..63a3644d86c9 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1467,7 +1467,7 @@ static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
/* According to the Marvell data sheet EEE must be disabled for
* Fast Link Down detection to work properly
*/
- ret = phy_ethtool_get_eee(phydev, &eee);
+ ret = genphy_c45_ethtool_get_eee(phydev, &eee);
if (!ret && eee.eee_enabled) {
phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n");
return -EBUSY;
diff --git a/drivers/net/phy/mdio-open-alliance.h b/drivers/net/phy/mdio-open-alliance.h
new file mode 100644
index 000000000000..931e14660d75
--- /dev/null
+++ b/drivers/net/phy/mdio-open-alliance.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mdio-open-alliance.h - definition of OPEN Alliance SIG standard registers
+ */
+
+#ifndef __MDIO_OPEN_ALLIANCE__
+#define __MDIO_OPEN_ALLIANCE__
+
+#include <linux/mdio.h>
+
+/* NOTE: all OATC14 registers are located in MDIO_MMD_VEND2 */
+
+/* Open Alliance TC14 (10BASE-T1S) registers */
+#define MDIO_OATC14_PLCA_IDVER 0xca00 /* PLCA ID and version */
+#define MDIO_OATC14_PLCA_CTRL0 0xca01 /* PLCA Control register 0 */
+#define MDIO_OATC14_PLCA_CTRL1 0xca02 /* PLCA Control register 1 */
+#define MDIO_OATC14_PLCA_STATUS 0xca03 /* PLCA Status register */
+#define MDIO_OATC14_PLCA_TOTMR 0xca04 /* PLCA TO Timer register */
+#define MDIO_OATC14_PLCA_BURST 0xca05 /* PLCA BURST mode register */
+
+/* Open Alliance TC14 PLCA IDVER register */
+#define MDIO_OATC14_PLCA_IDM 0xff00 /* PLCA MAP ID */
+#define MDIO_OATC14_PLCA_VER 0x00ff /* PLCA MAP version */
+
+/* Open Alliance TC14 PLCA CTRL0 register */
+#define MDIO_OATC14_PLCA_EN BIT(15) /* PLCA enable */
+#define MDIO_OATC14_PLCA_RST BIT(14) /* PLCA reset */
+
+/* Open Alliance TC14 PLCA CTRL1 register */
+#define MDIO_OATC14_PLCA_NCNT 0xff00 /* PLCA node count */
+#define MDIO_OATC14_PLCA_ID 0x00ff /* PLCA local node ID */
+
+/* Open Alliance TC14 PLCA STATUS register */
+#define MDIO_OATC14_PLCA_PST BIT(15) /* PLCA status indication */
+
+/* Open Alliance TC14 PLCA TOTMR register */
+#define MDIO_OATC14_PLCA_TOT 0x00ff
+
+/* Open Alliance TC14 PLCA BURST register */
+#define MDIO_OATC14_PLCA_MAXBC 0xff00
+#define MDIO_OATC14_PLCA_BTMR 0x00ff
+
+/* Version Identifiers */
+#define OATC14_IDM 0x0a00
+
+#endif /* __MDIO_OPEN_ALLIANCE__ */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 16e021b477f0..00d5bcdf0e6f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/micrel_phy.h>
#include <linux/mii.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -108,9 +109,10 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
{
+ bool addr_valid = addr >= 0 && addr < ARRAY_SIZE(bus->mdio_map);
struct mdio_device *mdiodev;
- if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
+ if (WARN_ONCE(!addr_valid, "addr %d out of range\n", addr))
return NULL;
mdiodev = bus->mdio_map[addr];
@@ -511,6 +513,126 @@ static int mdiobus_create_device(struct mii_bus *bus,
return ret;
}
+static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
+{
+ struct phy_device *phydev = ERR_PTR(-ENODEV);
+ int err;
+
+ phydev = get_phy_device(bus, addr, c45);
+ if (IS_ERR(phydev))
+ return phydev;
+
+ /* For DT, see if the auto-probed phy has a corresponding child
+ * in the bus node, and set the of_node pointer in this case.
+ */
+ of_mdiobus_link_mdiodev(bus, &phydev->mdio);
+
+ err = phy_device_register(phydev);
+ if (err) {
+ phy_device_free(phydev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return phydev;
+}
+
+/**
+ * mdiobus_scan_c22 - scan one address on a bus for C22 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, false);
+}
+EXPORT_SYMBOL(mdiobus_scan_c22);
+
+/**
+ * mdiobus_scan_c45 - scan one address on a bus for C45 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+static struct phy_device *mdiobus_scan_c45(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, true);
+}
+
+static int mdiobus_scan_bus_c22(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan_c22(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+static int mdiobus_scan_bus_c45(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ /* Don't scan C45 if we already have a C22 device */
+ if (bus->mdio_map[i])
+ continue;
+
+ phydev = mdiobus_scan_c45(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+/* There are some C22 PHYs which do bad things when where is a C45
+ * transaction on the bus, like accepting a read themselves, and
+ * stomping over the true devices reply, to performing a write to
+ * themselves which was intended for another device. Now that C22
+ * devices have been found, see if any of them are bad for C45, and if we
+ * should skip the C45 scan.
+ */
+static bool mdiobus_prevent_c45_scan(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+ u32 oui;
+
+ phydev = mdiobus_get_phy(bus, i);
+ if (!phydev)
+ continue;
+ oui = phydev->phy_id >> 10;
+
+ if (oui == MICREL_OUI)
+ return true;
+ }
+ return false;
+}
+
/**
* __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
* @bus: target mii_bus
@@ -528,11 +650,19 @@ static int mdiobus_create_device(struct mii_bus *bus,
int __mdiobus_register(struct mii_bus *bus, struct module *owner)
{
struct mdio_device *mdiodev;
- int i, err;
struct gpio_desc *gpiod;
+ bool prevent_c45_scan;
+ int i, err;
+
+ if (!bus || !bus->name)
+ return -EINVAL;
- if (NULL == bus || NULL == bus->name ||
- NULL == bus->read || NULL == bus->write)
+ /* An access method always needs both read and write operations */
+ if (!!bus->read != !!bus->write || !!bus->read_c45 != !!bus->write_c45)
+ return -EINVAL;
+
+ /* At least one method is mandatory */
+ if (!bus->read && !bus->read_c45)
return -EINVAL;
if (bus->parent && bus->parent->of_node)
@@ -587,16 +717,18 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
goto error_reset_gpiod;
}
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & BIT(i)) == 0) {
- struct phy_device *phydev;
+ if (bus->read) {
+ err = mdiobus_scan_bus_c22(bus);
+ if (err)
+ goto error;
+ }
- phydev = mdiobus_scan(bus, i);
- if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) {
- err = PTR_ERR(phydev);
- goto error;
- }
- }
+ prevent_c45_scan = mdiobus_prevent_c45_scan(bus);
+
+ if (!prevent_c45_scan && bus->read_c45) {
+ err = mdiobus_scan_bus_c45(bus);
+ if (err)
+ goto error;
}
mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
@@ -606,7 +738,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
return 0;
error:
- while (--i >= 0) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
mdiodev = bus->mdio_map[i];
if (!mdiodev)
continue;
@@ -677,57 +809,6 @@ void mdiobus_free(struct mii_bus *bus)
}
EXPORT_SYMBOL(mdiobus_free);
-/**
- * mdiobus_scan - scan a bus for MDIO devices.
- * @bus: mii_bus to scan
- * @addr: address on bus to scan
- *
- * This function scans the MDIO bus, looking for devices which can be
- * identified using a vendor/product ID in registers 2 and 3. Not all
- * MDIO devices have such registers, but PHY devices typically
- * do. Hence this function assumes anything found is a PHY, or can be
- * treated as a PHY. Other MDIO devices, such as switches, will
- * probably not be found during the scan.
- */
-struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
-{
- struct phy_device *phydev = ERR_PTR(-ENODEV);
- int err;
-
- switch (bus->probe_capabilities) {
- case MDIOBUS_NO_CAP:
- case MDIOBUS_C22:
- phydev = get_phy_device(bus, addr, false);
- break;
- case MDIOBUS_C45:
- phydev = get_phy_device(bus, addr, true);
- break;
- case MDIOBUS_C22_C45:
- phydev = get_phy_device(bus, addr, false);
- if (IS_ERR(phydev))
- phydev = get_phy_device(bus, addr, true);
- break;
- }
-
- if (IS_ERR(phydev))
- return phydev;
-
- /*
- * For DT, see if the auto-probed phy has a correspoding child
- * in the bus node, and set the of_node pointer in this case.
- */
- of_mdiobus_link_mdiodev(bus, &phydev->mdio);
-
- err = phy_device_register(phydev);
- if (err) {
- phy_device_free(phydev);
- return ERR_PTR(-ENODEV);
- }
-
- return phydev;
-}
-EXPORT_SYMBOL(mdiobus_scan);
-
static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
{
preempt_disable();
@@ -764,7 +845,10 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
- retval = bus->read(bus, addr, regnum);
+ if (bus->read)
+ retval = bus->read(bus, addr, regnum);
+ else
+ retval = -EOPNOTSUPP;
trace_mdio_access(bus, 1, addr, regnum, retval, retval);
mdiobus_stats_acct(&bus->stats[addr], true, retval);
@@ -790,7 +874,10 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
lockdep_assert_held_once(&bus->mdio_lock);
- err = bus->write(bus, addr, regnum, val);
+ if (bus->write)
+ err = bus->write(bus, addr, regnum, val);
+ else
+ err = -EOPNOTSUPP;
trace_mdio_access(bus, 0, addr, regnum, val, err);
mdiobus_stats_acct(&bus->stats[addr], false, err);
@@ -832,6 +919,99 @@ int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
EXPORT_SYMBOL_GPL(__mdiobus_modify_changed);
/**
+ * __mdiobus_c45_read - Unlocked version of the mdiobus_c45_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to read
+ *
+ * Read a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
+{
+ int retval;
+
+ lockdep_assert_held_once(&bus->mdio_lock);
+
+ if (bus->read_c45)
+ retval = bus->read_c45(bus, addr, devad, regnum);
+ else
+ retval = -EOPNOTSUPP;
+
+ trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+ mdiobus_stats_acct(&bus->stats[addr], true, retval);
+
+ return retval;
+}
+EXPORT_SYMBOL(__mdiobus_c45_read);
+
+/**
+ * __mdiobus_c45_write - Unlocked version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Write a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val)
+{
+ int err;
+
+ lockdep_assert_held_once(&bus->mdio_lock);
+
+ if (bus->write_c45)
+ err = bus->write_c45(bus, addr, devad, regnum, val);
+ else
+ err = -EOPNOTSUPP;
+
+ trace_mdio_access(bus, 0, addr, regnum, val, err);
+ mdiobus_stats_acct(&bus->stats[addr], false, err);
+
+ return err;
+}
+EXPORT_SYMBOL(__mdiobus_c45_write);
+
+/**
+ * __mdiobus_c45_modify_changed - Unlocked version of the mdiobus_modify function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Read, modify, and if any change, write the register value back to the
+ * device. Any error returns a negative number.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+static int __mdiobus_c45_modify_changed(struct mii_bus *bus, int addr,
+ int devad, u32 regnum, u16 mask,
+ u16 set)
+{
+ int new, ret;
+
+ ret = __mdiobus_c45_read(bus, addr, devad, regnum);
+ if (ret < 0)
+ return ret;
+
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ ret = __mdiobus_c45_write(bus, addr, devad, regnum, new);
+
+ return ret < 0 ? ret : 1;
+}
+
+/**
* mdiobus_read_nested - Nested version of the mdiobus_read function
* @bus: the mii_bus struct
* @addr: the phy address
@@ -879,6 +1059,56 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
EXPORT_SYMBOL(mdiobus_read);
/**
+ * mdiobus_c45_read - Convenience function for reading a given MII mgmt register
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to read
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
+{
+ int retval;
+
+ mutex_lock(&bus->mdio_lock);
+ retval = __mdiobus_c45_read(bus, addr, devad, regnum);
+ mutex_unlock(&bus->mdio_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(mdiobus_c45_read);
+
+/**
+ * mdiobus_c45_read_nested - Nested version of the mdiobus_c45_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to read
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum)
+{
+ int retval;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ retval = __mdiobus_c45_read(bus, addr, devad, regnum);
+ mutex_unlock(&bus->mdio_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(mdiobus_c45_read_nested);
+
+/**
* mdiobus_write_nested - Nested version of the mdiobus_write function
* @bus: the mii_bus struct
* @addr: the phy address
@@ -928,6 +1158,59 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
EXPORT_SYMBOL(mdiobus_write);
/**
+ * mdiobus_c45_write - Convenience function for writing a given MII mgmt register
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val)
+{
+ int err;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_c45_write(bus, addr, devad, regnum, val);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(mdiobus_c45_write);
+
+/**
+ * mdiobus_c45_write_nested - Nested version of the mdiobus_c45_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 val)
+{
+ int err;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ err = __mdiobus_c45_write(bus, addr, devad, regnum, val);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(mdiobus_c45_write_nested);
+
+/**
* mdiobus_modify - Convenience function for modifying a given mdio device
* register
* @bus: the mii_bus struct
@@ -949,6 +1232,30 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
EXPORT_SYMBOL_GPL(mdiobus_modify);
/**
+ * mdiobus_c45_modify - Convenience function for modifying a given mdio device
+ * register
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 mask, u16 set)
+{
+ int err;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_c45_modify_changed(bus, addr, devad, regnum,
+ mask, set);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err < 0 ? err : 0;
+}
+EXPORT_SYMBOL_GPL(mdiobus_c45_modify);
+
+/**
* mdiobus_modify_changed - Convenience function for modifying a given mdio
* device register and returning if it changed
* @bus: the mii_bus struct
@@ -971,6 +1278,29 @@ int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
EXPORT_SYMBOL_GPL(mdiobus_modify_changed);
/**
+ * mdiobus_c45_modify_changed - Convenience function for modifying a given mdio
+ * device register and returning if it changed
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int mdiobus_c45_modify_changed(struct mii_bus *bus, int devad, int addr,
+ u32 regnum, u16 mask, u16 set)
+{
+ int err;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_c45_modify_changed(bus, addr, devad, regnum, mask, set);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mdiobus_c45_modify_changed);
+
+/**
* mdio_bus_match - determine if given MDIO driver supports the given
* MDIO device
* @dev: target MDIO device
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 26ce0c5defcd..2c84fccef4f6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -268,6 +268,9 @@ struct kszphy_type {
u16 interrupt_level_mask;
u16 cable_diag_reg;
unsigned long pair_mask;
+ u16 disable_dll_tx_bit;
+ u16 disable_dll_rx_bit;
+ u16 disable_dll_mask;
bool has_broadcast_disable;
bool has_nand_tree_disable;
bool has_rmii_ref_clk_sel;
@@ -310,6 +313,11 @@ struct kszphy_ptp_priv {
enum hwtstamp_rx_filters rx_filter;
int layer;
int version;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ /* Lock for ptp_clock */
+ struct mutex ptp_lock;
};
struct kszphy_priv {
@@ -364,6 +372,21 @@ static const struct kszphy_type ksz9021_type = {
.interrupt_level_mask = BIT(14),
};
+static const struct kszphy_type ksz9131_type = {
+ .interrupt_level_mask = BIT(14),
+ .disable_dll_tx_bit = BIT(12),
+ .disable_dll_rx_bit = BIT(12),
+ .disable_dll_mask = BIT_MASK(12),
+};
+
+static const struct kszphy_type lan8841_type = {
+ .disable_dll_tx_bit = BIT(14),
+ .disable_dll_rx_bit = BIT(14),
+ .disable_dll_mask = BIT_MASK(14),
+ .cable_diag_reg = LAN8814_CABLE_DIAG,
+ .pair_mask = LAN8814_WIRE_PAIR_MASK,
+};
+
static int kszphy_extended_write(struct phy_device *phydev,
u32 regnum, u16 val)
{
@@ -1172,19 +1195,18 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev,
#define KSZ9131RN_MMD_COMMON_CTRL_REG 2
#define KSZ9131RN_RXC_DLL_CTRL 76
#define KSZ9131RN_TXC_DLL_CTRL 77
-#define KSZ9131RN_DLL_CTRL_BYPASS BIT_MASK(12)
#define KSZ9131RN_DLL_ENABLE_DELAY 0
-#define KSZ9131RN_DLL_DISABLE_DELAY BIT(12)
static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
u16 rxcdll_val, txcdll_val;
int ret;
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
- rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
- txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ rxcdll_val = type->disable_dll_rx_bit;
+ txcdll_val = type->disable_dll_tx_bit;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
@@ -1192,10 +1214,10 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
- txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ txcdll_val = type->disable_dll_tx_bit;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
- rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ rxcdll_val = type->disable_dll_rx_bit;
txcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
break;
default:
@@ -1203,13 +1225,13 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
}
ret = phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
- KSZ9131RN_RXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ KSZ9131RN_RXC_DLL_CTRL, type->disable_dll_mask,
rxcdll_val);
if (ret < 0)
return ret;
return phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
- KSZ9131RN_TXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ KSZ9131RN_TXC_DLL_CTRL, type->disable_dll_mask,
txcdll_val);
}
@@ -1370,6 +1392,26 @@ static int ksz9131_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int ksz9477_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The "EEE control and capability 1" (Register 3.20) seems to be
+ * influenced by the "EEE advertisement 1" (Register 7.60). Changes
+ * on the 7.60 will affect 3.20. So, we need to construct our own list
+ * of caps.
+ * KSZ8563R should have 100BaseTX/Full only.
+ */
+ linkmode_and(phydev->supported_eee, phydev->supported,
+ PHY_EEE_CAP1_FEATURES);
+
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX BIT(6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED BIT(4)
@@ -2088,7 +2130,8 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
const struct kszphy_type *type = phydev->drv->driver_data;
unsigned long pair_mask = type->pair_mask;
int retries = 20;
- int pair, ret;
+ int ret = 0;
+ int pair;
*finished = false;
@@ -2380,8 +2423,8 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
}
-static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
- struct sk_buff *skb)
+static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
+ struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shhwtstamps;
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
@@ -2430,7 +2473,7 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
/* If we failed to match then add it to the queue for when the timestamp
* will come
*/
- if (!lan8814_match_rx_ts(ptp_priv, skb))
+ if (!lan8814_match_rx_skb(ptp_priv, skb))
skb_queue_tail(&ptp_priv->rx_queue, skb);
return true;
@@ -2680,18 +2723,14 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
}
-static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
+static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
+ u32 seconds, u32 nsec, u16 seq_id)
{
- struct phy_device *phydev = ptp_priv->phydev;
struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff *skb, *skb_tmp;
unsigned long flags;
- u32 seconds, nsec;
bool ret = false;
u16 skb_sig;
- u16 seq_id;
-
- lan8814_ptp_tx_ts_get(phydev, &seconds, &nsec, &seq_id);
spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
@@ -2713,6 +2752,16 @@ static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
}
}
+static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ u32 seconds, nsec;
+ u16 seq_id;
+
+ lan8814_ptp_tx_ts_get(phydev, &seconds, &nsec, &seq_id);
+ lan8814_match_tx_skb(ptp_priv, seconds, nsec, seq_id);
+}
+
static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv)
{
struct phy_device *phydev = ptp_priv->phydev;
@@ -2761,11 +2810,27 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
return ret;
}
+static void lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
+ struct lan8814_ptp_rx_ts *rx_ts)
+{
+ unsigned long flags;
+
+ /* If we failed to match the skb add it to the queue for when
+ * the frame will come
+ */
+ if (!lan8814_match_skb(ptp_priv, rx_ts)) {
+ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+ list_add(&rx_ts->list, &ptp_priv->rx_ts_list);
+ spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
+ } else {
+ kfree(rx_ts);
+ }
+}
+
static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
{
struct phy_device *phydev = ptp_priv->phydev;
struct lan8814_ptp_rx_ts *rx_ts;
- unsigned long flags;
u32 reg;
do {
@@ -2775,17 +2840,7 @@ static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
lan8814_ptp_rx_ts_get(phydev, &rx_ts->seconds, &rx_ts->nsec,
&rx_ts->seq_id);
-
- /* If we failed to match the skb add it to the queue for when
- * the frame will come
- */
- if (!lan8814_match_skb(ptp_priv, rx_ts)) {
- spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
- list_add(&rx_ts->list, &ptp_priv->rx_ts_list);
- spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
- } else {
- kfree(rx_ts);
- }
+ lan8814_match_rx_ts(ptp_priv, rx_ts);
/* If other timestamps are available in the FIFO,
* process them.
@@ -2794,13 +2849,11 @@ static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
} while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0);
}
-static void lan8814_handle_ptp_interrupt(struct phy_device *phydev)
+static void lan8814_handle_ptp_interrupt(struct phy_device *phydev, u16 status)
{
struct kszphy_priv *priv = phydev->priv;
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
- u16 status;
- status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_)
lan8814_get_tx_ts(ptp_priv);
@@ -2899,8 +2952,8 @@ static int lan8804_config_intr(struct phy_device *phydev)
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
- int irq_status, tsu_irq_status;
int ret = IRQ_NONE;
+ int irq_status;
irq_status = phy_read(phydev, LAN8814_INTS);
if (irq_status < 0) {
@@ -2913,20 +2966,13 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
ret = IRQ_HANDLED;
}
- while (1) {
- tsu_irq_status = lanphy_read_page_reg(phydev, 4,
- LAN8814_INTR_STS_REG);
-
- if (tsu_irq_status > 0 &&
- (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
- LAN8814_INTR_STS_REG_1588_TSU1_ |
- LAN8814_INTR_STS_REG_1588_TSU2_ |
- LAN8814_INTR_STS_REG_1588_TSU3_))) {
- lan8814_handle_ptp_interrupt(phydev);
- ret = IRQ_HANDLED;
- } else {
+ while (true) {
+ irq_status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ if (!irq_status)
break;
- }
+
+ lan8814_handle_ptp_interrupt(phydev, irq_status);
+ ret = IRQ_HANDLED;
}
return ret;
@@ -3016,10 +3062,6 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
{
struct lan8814_shared_priv *shared = phydev->shared->priv;
- if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
- !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
- return 0;
-
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@ -3039,12 +3081,16 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
&phydev->mdio.dev);
- if (IS_ERR_OR_NULL(shared->ptp_clock)) {
+ if (IS_ERR(shared->ptp_clock)) {
phydev_err(phydev, "ptp_clock_register failed %lu\n",
PTR_ERR(shared->ptp_clock));
return -EINVAL;
}
+ /* Check if PHC support is missing at the configuration level */
+ if (!shared->ptp_clock)
+ return 0;
+
phydev_dbg(phydev, "successfully registered ptp clock\n");
shared->phydev = phydev;
@@ -3160,6 +3206,704 @@ static int lan8814_probe(struct phy_device *phydev)
return 0;
}
+#define LAN8841_MMD_TIMER_REG 0
+#define LAN8841_MMD0_REGISTER_17 17
+#define LAN8841_MMD0_REGISTER_17_DROP_OPT(x) ((x) & 0x3)
+#define LAN8841_MMD0_REGISTER_17_XMIT_TOG_TX_DIS BIT(3)
+#define LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG 2
+#define LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG_MAGJACK BIT(14)
+#define LAN8841_MMD_ANALOG_REG 28
+#define LAN8841_ANALOG_CONTROL_1 1
+#define LAN8841_ANALOG_CONTROL_1_PLL_TRIM(x) (((x) & 0x3) << 5)
+#define LAN8841_ANALOG_CONTROL_10 13
+#define LAN8841_ANALOG_CONTROL_10_PLL_DIV(x) ((x) & 0x3)
+#define LAN8841_ANALOG_CONTROL_11 14
+#define LAN8841_ANALOG_CONTROL_11_LDO_REF(x) (((x) & 0x7) << 12)
+#define LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT 69
+#define LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT_VAL 0xbffc
+#define LAN8841_BTRX_POWER_DOWN 70
+#define LAN8841_BTRX_POWER_DOWN_QBIAS_CH_A BIT(0)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_A BIT(1)
+#define LAN8841_BTRX_POWER_DOWN_QBIAS_CH_B BIT(2)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_B BIT(3)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_C BIT(5)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_D BIT(7)
+#define LAN8841_ADC_CHANNEL_MASK 198
+#define LAN8841_PTP_RX_PARSE_L2_ADDR_EN 370
+#define LAN8841_PTP_RX_PARSE_IP_ADDR_EN 371
+#define LAN8841_PTP_TX_PARSE_L2_ADDR_EN 434
+#define LAN8841_PTP_TX_PARSE_IP_ADDR_EN 435
+#define LAN8841_PTP_CMD_CTL 256
+#define LAN8841_PTP_CMD_CTL_PTP_ENABLE BIT(2)
+#define LAN8841_PTP_CMD_CTL_PTP_DISABLE BIT(1)
+#define LAN8841_PTP_CMD_CTL_PTP_RESET BIT(0)
+#define LAN8841_PTP_RX_PARSE_CONFIG 368
+#define LAN8841_PTP_TX_PARSE_CONFIG 432
+
+static int lan8841_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz9131_config_init(phydev);
+ if (ret)
+ return ret;
+
+ /* Initialize the HW by resetting everything */
+ phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_RESET,
+ LAN8841_PTP_CMD_CTL_PTP_RESET);
+
+ phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_ENABLE,
+ LAN8841_PTP_CMD_CTL_PTP_ENABLE);
+
+ /* Don't process any frames */
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_RX_PARSE_CONFIG, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_TX_PARSE_CONFIG, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_TX_PARSE_L2_ADDR_EN, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_RX_PARSE_L2_ADDR_EN, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_TX_PARSE_IP_ADDR_EN, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_RX_PARSE_IP_ADDR_EN, 0);
+
+ /* 100BT Clause 40 improvenent errata */
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_1,
+ LAN8841_ANALOG_CONTROL_1_PLL_TRIM(0x2));
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_10,
+ LAN8841_ANALOG_CONTROL_10_PLL_DIV(0x1));
+
+ /* 10M/100M Ethernet Signal Tuning Errata for Shorted-Center Tap
+ * Magnetics
+ */
+ ret = phy_read_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG);
+ if (ret & LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG_MAGJACK) {
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT,
+ LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT_VAL);
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_BTRX_POWER_DOWN,
+ LAN8841_BTRX_POWER_DOWN_QBIAS_CH_A |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_A |
+ LAN8841_BTRX_POWER_DOWN_QBIAS_CH_B |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_B |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_C |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_D);
+ }
+
+ /* LDO Adjustment errata */
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_11,
+ LAN8841_ANALOG_CONTROL_11_LDO_REF(1));
+
+ /* 100BT RGMII latency tuning errata */
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
+ LAN8841_ADC_CHANNEL_MASK, 0x0);
+ phy_write_mmd(phydev, LAN8841_MMD_TIMER_REG,
+ LAN8841_MMD0_REGISTER_17,
+ LAN8841_MMD0_REGISTER_17_DROP_OPT(2) |
+ LAN8841_MMD0_REGISTER_17_XMIT_TOG_TX_DIS);
+
+ return 0;
+}
+
+#define LAN8841_OUTPUT_CTRL 25
+#define LAN8841_OUTPUT_CTRL_INT_BUFFER BIT(14)
+#define LAN8841_INT_PTP BIT(9)
+
+static int lan8841_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ phy_modify(phydev, LAN8841_OUTPUT_CTRL,
+ LAN8841_OUTPUT_CTRL_INT_BUFFER, 0);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, LAN8814_INTS);
+ if (err)
+ return err;
+
+ /* Enable / disable interrupts. It is OK to enable PTP interrupt
+ * even if it PTP is not enabled. Because the underneath blocks
+ * will not enable the PTP so we will never get the PTP
+ * interrupt.
+ */
+ err = phy_write(phydev, LAN8814_INTC,
+ LAN8814_INT_LINK | LAN8841_INT_PTP);
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = phy_read(phydev, LAN8814_INTS);
+ }
+
+ return err;
+}
+
+#define LAN8841_PTP_TX_EGRESS_SEC_LO 453
+#define LAN8841_PTP_TX_EGRESS_SEC_HI 452
+#define LAN8841_PTP_TX_EGRESS_NS_LO 451
+#define LAN8841_PTP_TX_EGRESS_NS_HI 450
+#define LAN8841_PTP_TX_EGRESS_NSEC_HI_VALID BIT(15)
+#define LAN8841_PTP_TX_MSG_HEADER2 455
+
+static bool lan8841_ptp_get_tx_ts(struct kszphy_ptp_priv *ptp_priv,
+ u32 *sec, u32 *nsec, u16 *seq)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+
+ *nsec = phy_read_mmd(phydev, 2, LAN8841_PTP_TX_EGRESS_NS_HI);
+ if (!(*nsec & LAN8841_PTP_TX_EGRESS_NSEC_HI_VALID))
+ return false;
+
+ *nsec = ((*nsec & 0x3fff) << 16);
+ *nsec = *nsec | phy_read_mmd(phydev, 2, LAN8841_PTP_TX_EGRESS_NS_LO);
+
+ *sec = phy_read_mmd(phydev, 2, LAN8841_PTP_TX_EGRESS_SEC_HI);
+ *sec = *sec << 16;
+ *sec = *sec | phy_read_mmd(phydev, 2, LAN8841_PTP_TX_EGRESS_SEC_LO);
+
+ *seq = phy_read_mmd(phydev, 2, LAN8841_PTP_TX_MSG_HEADER2);
+
+ return true;
+}
+
+static void lan8841_ptp_process_tx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ u32 sec, nsec;
+ u16 seq;
+
+ while (lan8841_ptp_get_tx_ts(ptp_priv, &sec, &nsec, &seq))
+ lan8814_match_tx_skb(ptp_priv, sec, nsec, seq);
+}
+
+#define LAN8841_PTP_RX_INGRESS_SEC_LO 389
+#define LAN8841_PTP_RX_INGRESS_SEC_HI 388
+#define LAN8841_PTP_RX_INGRESS_NS_LO 387
+#define LAN8841_PTP_RX_INGRESS_NS_HI 386
+#define LAN8841_PTP_RX_INGRESS_NSEC_HI_VALID BIT(15)
+#define LAN8841_PTP_RX_MSG_HEADER2 391
+
+static struct lan8814_ptp_rx_ts *lan8841_ptp_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_ptp_rx_ts *rx_ts;
+ u32 sec, nsec;
+ u16 seq;
+
+ nsec = phy_read_mmd(phydev, 2, LAN8841_PTP_RX_INGRESS_NS_HI);
+ if (!(nsec & LAN8841_PTP_RX_INGRESS_NSEC_HI_VALID))
+ return NULL;
+
+ nsec = ((nsec & 0x3fff) << 16);
+ nsec = nsec | phy_read_mmd(phydev, 2, LAN8841_PTP_RX_INGRESS_NS_LO);
+
+ sec = phy_read_mmd(phydev, 2, LAN8841_PTP_RX_INGRESS_SEC_HI);
+ sec = sec << 16;
+ sec = sec | phy_read_mmd(phydev, 2, LAN8841_PTP_RX_INGRESS_SEC_LO);
+
+ seq = phy_read_mmd(phydev, 2, LAN8841_PTP_RX_MSG_HEADER2);
+
+ rx_ts = kzalloc(sizeof(*rx_ts), GFP_KERNEL);
+ if (!rx_ts)
+ return NULL;
+
+ rx_ts->seconds = sec;
+ rx_ts->nsec = nsec;
+ rx_ts->seq_id = seq;
+
+ return rx_ts;
+}
+
+static void lan8841_ptp_process_rx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct lan8814_ptp_rx_ts *rx_ts;
+
+ while ((rx_ts = lan8841_ptp_get_rx_ts(ptp_priv)) != NULL)
+ lan8814_match_rx_ts(ptp_priv, rx_ts);
+}
+
+#define LAN8841_PTP_INT_STS 259
+#define LAN8841_PTP_INT_STS_PTP_TX_TS_OVRFL_INT BIT(13)
+#define LAN8841_PTP_INT_STS_PTP_TX_TS_INT BIT(12)
+#define LAN8841_PTP_INT_STS_PTP_RX_TS_OVRFL_INT BIT(9)
+#define LAN8841_PTP_INT_STS_PTP_RX_TS_INT BIT(8)
+
+static void lan8841_ptp_flush_fifo(struct kszphy_ptp_priv *ptp_priv, bool egress)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ int i;
+
+ for (i = 0; i < FIFO_SIZE; ++i)
+ phy_read_mmd(phydev, 2,
+ egress ? LAN8841_PTP_TX_MSG_HEADER2 :
+ LAN8841_PTP_RX_MSG_HEADER2);
+
+ phy_read_mmd(phydev, 2, LAN8841_PTP_INT_STS);
+}
+
+static void lan8841_handle_ptp_interrupt(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+ u16 status;
+
+ do {
+ status = phy_read_mmd(phydev, 2, LAN8841_PTP_INT_STS);
+ if (status & LAN8841_PTP_INT_STS_PTP_TX_TS_INT)
+ lan8841_ptp_process_tx_ts(ptp_priv);
+
+ if (status & LAN8841_PTP_INT_STS_PTP_RX_TS_INT)
+ lan8841_ptp_process_rx_ts(ptp_priv);
+
+ if (status & LAN8841_PTP_INT_STS_PTP_TX_TS_OVRFL_INT) {
+ lan8841_ptp_flush_fifo(ptp_priv, true);
+ skb_queue_purge(&ptp_priv->tx_queue);
+ }
+
+ if (status & LAN8841_PTP_INT_STS_PTP_RX_TS_OVRFL_INT) {
+ lan8841_ptp_flush_fifo(ptp_priv, false);
+ skb_queue_purge(&ptp_priv->rx_queue);
+ }
+
+ } while (status);
+}
+
+#define LAN8841_INTS_PTP BIT(9)
+
+static irqreturn_t lan8841_handle_interrupt(struct phy_device *phydev)
+{
+ irqreturn_t ret = IRQ_NONE;
+ int irq_status;
+
+ irq_status = phy_read(phydev, LAN8814_INTS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status & LAN8814_INT_LINK) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
+ if (irq_status & LAN8841_INTS_PTP) {
+ lan8841_handle_ptp_interrupt(phydev);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int lan8841_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *info)
+{
+ struct kszphy_ptp_priv *ptp_priv;
+
+ ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+
+ info->phc_index = ptp_priv->ptp_clock ?
+ ptp_clock_index(ptp_priv->ptp_clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+#define LAN8841_PTP_INT_EN 260
+#define LAN8841_PTP_INT_EN_PTP_TX_TS_OVRFL_EN BIT(13)
+#define LAN8841_PTP_INT_EN_PTP_TX_TS_EN BIT(12)
+#define LAN8841_PTP_INT_EN_PTP_RX_TS_OVRFL_EN BIT(9)
+#define LAN8841_PTP_INT_EN_PTP_RX_TS_EN BIT(8)
+
+static void lan8841_ptp_enable_int(struct kszphy_ptp_priv *ptp_priv,
+ bool enable)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+
+ if (enable)
+ /* Enable interrupts */
+ phy_modify_mmd(phydev, 2, LAN8841_PTP_INT_EN,
+ LAN8841_PTP_INT_EN_PTP_TX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_TX_TS_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_EN,
+ LAN8841_PTP_INT_EN_PTP_TX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_TX_TS_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_EN);
+ else
+ /* Disable interrupts */
+ phy_modify_mmd(phydev, 2, LAN8841_PTP_INT_EN,
+ LAN8841_PTP_INT_EN_PTP_TX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_TX_TS_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_EN, 0);
+}
+
+#define LAN8841_PTP_RX_TIMESTAMP_EN 379
+#define LAN8841_PTP_TX_TIMESTAMP_EN 443
+#define LAN8841_PTP_TX_MOD 445
+
+static int lan8841_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_ptp_rx_ts *rx_ts, *tmp;
+ struct hwtstamp_config config;
+ int txcfg = 0, rxcfg = 0;
+ int pkt_ts_enable;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ptp_priv->hwts_tx_type = config.tx_type;
+ ptp_priv->rx_filter = config.rx_filter;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ptp_priv->layer = 0;
+ ptp_priv->version = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Setup parsing of the frames and enable the timestamping for ptp
+ * frames
+ */
+ if (ptp_priv->layer & PTP_CLASS_L2) {
+ rxcfg |= PTP_RX_PARSE_CONFIG_LAYER2_EN_;
+ txcfg |= PTP_TX_PARSE_CONFIG_LAYER2_EN_;
+ } else if (ptp_priv->layer & PTP_CLASS_L4) {
+ rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_;
+ txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_;
+ }
+
+ phy_write_mmd(phydev, 2, LAN8841_PTP_RX_PARSE_CONFIG, rxcfg);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_TX_PARSE_CONFIG, txcfg);
+
+ pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ |
+ PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_;
+ phy_write_mmd(phydev, 2, LAN8841_PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+
+ /* Enable / disable of the TX timestamp in the SYNC frames */
+ phy_modify_mmd(phydev, 2, LAN8841_PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
+ ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC ?
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ : 0);
+
+ /* Now enable/disable the timestamping */
+ lan8841_ptp_enable_int(ptp_priv,
+ config.rx_filter != HWTSTAMP_FILTER_NONE);
+
+ /* In case of multiple starts and stops, these needs to be cleared */
+ list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+ }
+
+ skb_queue_purge(&ptp_priv->rx_queue);
+ skb_queue_purge(&ptp_priv->tx_queue);
+
+ lan8841_ptp_flush_fifo(ptp_priv, false);
+ lan8841_ptp_flush_fifo(ptp_priv, true);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+}
+
+#define LAN8841_PTP_LTC_SET_SEC_HI 262
+#define LAN8841_PTP_LTC_SET_SEC_MID 263
+#define LAN8841_PTP_LTC_SET_SEC_LO 264
+#define LAN8841_PTP_LTC_SET_NS_HI 265
+#define LAN8841_PTP_LTC_SET_NS_LO 266
+#define LAN8841_PTP_CMD_CTL_PTP_LTC_LOAD BIT(4)
+
+static int lan8841_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(ptp, struct kszphy_ptp_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = ptp_priv->phydev;
+
+ /* Set the value to be stored */
+ mutex_lock(&ptp_priv->ptp_lock);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_SEC_LO, lower_16_bits(ts->tv_sec));
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_SEC_MID, upper_16_bits(ts->tv_sec));
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_SEC_HI, upper_32_bits(ts->tv_sec) & 0xffff);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_NS_LO, lower_16_bits(ts->tv_nsec));
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_NS_HI, upper_16_bits(ts->tv_nsec) & 0x3fff);
+
+ /* Set the command to load the LTC */
+ phy_write_mmd(phydev, 2, LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_LTC_LOAD);
+ mutex_unlock(&ptp_priv->ptp_lock);
+
+ return 0;
+}
+
+#define LAN8841_PTP_LTC_RD_SEC_HI 358
+#define LAN8841_PTP_LTC_RD_SEC_MID 359
+#define LAN8841_PTP_LTC_RD_SEC_LO 360
+#define LAN8841_PTP_LTC_RD_NS_HI 361
+#define LAN8841_PTP_LTC_RD_NS_LO 362
+#define LAN8841_PTP_CMD_CTL_PTP_LTC_READ BIT(3)
+
+static int lan8841_ptp_gettime64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(ptp, struct kszphy_ptp_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = ptp_priv->phydev;
+ time64_t s;
+ s64 ns;
+
+ mutex_lock(&ptp_priv->ptp_lock);
+ /* Issue the command to read the LTC */
+ phy_write_mmd(phydev, 2, LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_LTC_READ);
+
+ /* Read the LTC */
+ s = phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_SEC_HI);
+ s <<= 16;
+ s |= phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_SEC_MID);
+ s <<= 16;
+ s |= phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_SEC_LO);
+
+ ns = phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_NS_HI) & 0x3fff;
+ ns <<= 16;
+ ns |= phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_NS_LO);
+ mutex_unlock(&ptp_priv->ptp_lock);
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+#define LAN8841_PTP_LTC_STEP_ADJ_LO 276
+#define LAN8841_PTP_LTC_STEP_ADJ_HI 275
+#define LAN8841_PTP_LTC_STEP_ADJ_DIR BIT(15)
+#define LAN8841_PTP_CMD_CTL_PTP_LTC_STEP_SECONDS BIT(5)
+#define LAN8841_PTP_CMD_CTL_PTP_LTC_STEP_NANOSECONDS BIT(6)
+
+static int lan8841_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(ptp, struct kszphy_ptp_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct timespec64 ts;
+ bool add = true;
+ u32 nsec;
+ s32 sec;
+
+ /* The HW allows up to 15 sec to adjust the time, but here we limit to
+ * 10 sec the adjustment. The reason is, in case the adjustment is 14
+ * sec and 999999999 nsec, then we add 8ns to compansate the actual
+ * increment so the value can be bigger than 15 sec. Therefore limit the
+ * possible adjustments so we will not have these corner cases
+ */
+ if (delta > 10000000000LL || delta < -10000000000LL) {
+ /* The timeadjustment is too big, so fall back using set time */
+ u64 now;
+
+ ptp->gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ ptp->settime64(ptp, &ts);
+ return 0;
+ }
+
+ sec = div_u64_rem(delta < 0 ? -delta : delta, NSEC_PER_SEC, &nsec);
+ if (delta < 0 && nsec != 0) {
+ /* It is not allowed to adjust low the nsec part, therefore
+ * subtract more from second part and add to nanosecond such
+ * that would roll over, so the second part will increase
+ */
+ sec--;
+ nsec = NSEC_PER_SEC - nsec;
+ }
+
+ /* Calculate the adjustments and the direction */
+ if (delta < 0)
+ add = false;
+
+ if (nsec > 0)
+ /* add 8 ns to cover the likely normal increment */
+ nsec += 8;
+
+ if (nsec >= NSEC_PER_SEC) {
+ /* carry into seconds */
+ sec++;
+ nsec -= NSEC_PER_SEC;
+ }
+
+ mutex_lock(&ptp_priv->ptp_lock);
+ if (sec) {
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_STEP_ADJ_LO, sec);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_STEP_ADJ_HI,
+ add ? LAN8841_PTP_LTC_STEP_ADJ_DIR : 0);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_LTC_STEP_SECONDS);
+ }
+
+ if (nsec) {
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_STEP_ADJ_LO,
+ nsec & 0xffff);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_STEP_ADJ_HI,
+ (nsec >> 16) & 0x3fff);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_LTC_STEP_NANOSECONDS);
+ }
+ mutex_unlock(&ptp_priv->ptp_lock);
+
+ return 0;
+}
+
+#define LAN8841_PTP_LTC_RATE_ADJ_HI 269
+#define LAN8841_PTP_LTC_RATE_ADJ_HI_DIR BIT(15)
+#define LAN8841_PTP_LTC_RATE_ADJ_LO 270
+
+static int lan8841_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(ptp, struct kszphy_ptp_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = ptp_priv->phydev;
+ bool faster = true;
+ u32 rate;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ faster = false;
+ }
+
+ rate = LAN8814_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (LAN8814_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+
+ mutex_lock(&ptp_priv->ptp_lock);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_RATE_ADJ_HI,
+ faster ? LAN8841_PTP_LTC_RATE_ADJ_HI_DIR | (upper_16_bits(rate) & 0x3fff)
+ : upper_16_bits(rate) & 0x3fff);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_RATE_ADJ_LO, lower_16_bits(rate));
+ mutex_unlock(&ptp_priv->ptp_lock);
+
+ return 0;
+}
+
+static struct ptp_clock_info lan8841_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "lan8841 ptp",
+ .max_adj = 31249999,
+ .gettime64 = lan8841_ptp_gettime64,
+ .settime64 = lan8841_ptp_settime64,
+ .adjtime = lan8841_ptp_adjtime,
+ .adjfine = lan8841_ptp_adjfine,
+};
+
+#define LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER 3
+#define LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER_STRAP_RGMII_EN BIT(0)
+
+static int lan8841_probe(struct phy_device *phydev)
+{
+ struct kszphy_ptp_priv *ptp_priv;
+ struct kszphy_priv *priv;
+ int err;
+
+ err = kszphy_probe(phydev);
+ if (err)
+ return err;
+
+ if (phy_read_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER) &
+ LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER_STRAP_RGMII_EN)
+ phydev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
+
+ /* Register the clock */
+ if (!IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
+ return 0;
+
+ priv = phydev->priv;
+ ptp_priv = &priv->ptp_priv;
+
+ ptp_priv->ptp_clock_info = lan8841_ptp_clock_info;
+ ptp_priv->ptp_clock = ptp_clock_register(&ptp_priv->ptp_clock_info,
+ &phydev->mdio.dev);
+ if (IS_ERR(ptp_priv->ptp_clock)) {
+ phydev_err(phydev, "ptp_clock_register failed: %lu\n",
+ PTR_ERR(ptp_priv->ptp_clock));
+ return -EINVAL;
+ }
+
+ if (!ptp_priv->ptp_clock)
+ return 0;
+
+ /* Initialize the SW */
+ skb_queue_head_init(&ptp_priv->tx_queue);
+ skb_queue_head_init(&ptp_priv->rx_queue);
+ INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
+ spin_lock_init(&ptp_priv->rx_ts_lock);
+ ptp_priv->phydev = phydev;
+ mutex_init(&ptp_priv->ptp_lock);
+
+ ptp_priv->mii_ts.rxtstamp = lan8814_rxtstamp;
+ ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
+ ptp_priv->mii_ts.hwtstamp = lan8841_hwtstamp;
+ ptp_priv->mii_ts.ts_info = lan8841_ts_info;
+
+ phydev->mii_ts = &ptp_priv->mii_ts;
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -3370,12 +4114,30 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = lan8804_config_intr,
.handle_interrupt = lan8804_handle_interrupt,
}, {
+ .phy_id = PHY_ID_LAN8841,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip LAN8841 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
+ .driver_data = &lan8841_type,
+ .config_init = lan8841_config_init,
+ .probe = lan8841_probe,
+ .soft_reset = genphy_soft_reset,
+ .config_intr = lan8841_config_intr,
+ .handle_interrupt = lan8841_handle_interrupt,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
+}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
- .driver_data = &ksz9021_type,
+ .driver_data = &ksz9131_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
.config_intr = kszphy_config_intr,
@@ -3430,6 +4192,7 @@ static struct phy_driver ksphy_driver[] = {
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .get_features = ksz9477_get_features,
} };
module_phy_driver(ksphy_driver);
@@ -3454,6 +4217,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 8569a545e0a3..a838b61cd844 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -245,15 +245,42 @@ static int lan87xx_config_rgmii_delay(struct phy_device *phydev)
PHYACC_ATTR_BANK_MISC, LAN87XX_CTRL_1, rc);
}
+static int lan87xx_phy_init_cmd(struct phy_device *phydev,
+ const struct access_ereg_val *cmd_seq, int cnt)
+{
+ int ret, i;
+
+ for (i = 0; i < cnt; i++) {
+ if (cmd_seq[i].mode == PHYACC_ATTR_MODE_POLL &&
+ cmd_seq[i].bank == PHYACC_ATTR_BANK_SMI) {
+ ret = access_smi_poll_timeout(phydev,
+ cmd_seq[i].offset,
+ cmd_seq[i].val,
+ cmd_seq[i].mask);
+ } else {
+ ret = access_ereg(phydev, cmd_seq[i].mode,
+ cmd_seq[i].bank, cmd_seq[i].offset,
+ cmd_seq[i].val);
+ }
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
static int lan87xx_phy_init(struct phy_device *phydev)
{
- static const struct access_ereg_val init[] = {
+ static const struct access_ereg_val hw_init[] = {
/* TXPD/TXAMP6 Configs */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE,
T1_AFE_PORT_CFG1_REG, 0x002D, 0 },
/* HW_Init Hi and Force_ED */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
T1_POWER_DOWN_CONTROL_REG, 0x0308, 0 },
+ };
+
+ static const struct access_ereg_val slave_init[] = {
/* Equalizer Full Duplex Freeze - T1 Slave */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_EQ_FD_STG1_FRZ_CFG, 0x0002, 0 },
@@ -267,6 +294,9 @@ static int lan87xx_phy_init(struct phy_device *phydev)
T1_EQ_WT_FD_LCK_FRZ_CFG, 0x0002, 0 },
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_PST_EQ_LCK_STG1_FRZ_CFG, 0x0002, 0 },
+ };
+
+ static const struct access_ereg_val phy_init[] = {
/* Slave Full Duplex Multi Configs */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_SLV_FD_MULT_CFG_REG, 0x0D53, 0 },
@@ -397,7 +427,7 @@ static int lan87xx_phy_init(struct phy_device *phydev)
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
T1_POWER_DOWN_CONTROL_REG, 0x0300, 0 },
};
- int rc, i;
+ int rc;
/* phy Soft reset */
rc = genphy_soft_reset(phydev);
@@ -405,21 +435,28 @@ static int lan87xx_phy_init(struct phy_device *phydev)
return rc;
/* PHY Initialization */
- for (i = 0; i < ARRAY_SIZE(init); i++) {
- if (init[i].mode == PHYACC_ATTR_MODE_POLL &&
- init[i].bank == PHYACC_ATTR_BANK_SMI) {
- rc = access_smi_poll_timeout(phydev,
- init[i].offset,
- init[i].val,
- init[i].mask);
- } else {
- rc = access_ereg(phydev, init[i].mode, init[i].bank,
- init[i].offset, init[i].val);
- }
+ rc = lan87xx_phy_init_cmd(phydev, hw_init, ARRAY_SIZE(hw_init));
+ if (rc < 0)
+ return rc;
+
+ rc = genphy_read_master_slave(phydev);
+ if (rc)
+ return rc;
+
+ /* The following squence needs to run only if phydev is in
+ * slave mode.
+ */
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_SLAVE) {
+ rc = lan87xx_phy_init_cmd(phydev, slave_init,
+ ARRAY_SIZE(slave_init));
if (rc < 0)
return rc;
}
+ rc = lan87xx_phy_init_cmd(phydev, phy_init, ARRAY_SIZE(phy_init));
+ if (rc < 0)
+ return rc;
+
return lan87xx_config_rgmii_delay(phydev);
}
@@ -775,6 +812,7 @@ static int lan87xx_read_status(struct phy_device *phydev)
static int lan87xx_config_aneg(struct phy_device *phydev)
{
u16 ctl = 0;
+ int ret;
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -790,7 +828,11 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
return -EOPNOTSUPP;
}
- return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+ ret = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+ if (ret == 1)
+ return phy_init_hw(phydev);
+
+ return ret;
}
static int lan87xx_get_sqi(struct phy_device *phydev)
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 685190db72de..2fa5a90e073b 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Motorcomm 8511/8521/8531S PHY driver.
+ * Motorcomm 8511/8521/8531/8531S PHY driver.
*
* Author: Peter Geis <pgwipeout@gmail.com>
* Author: Frank <Frank.Sae@motor-comm.com>
@@ -10,10 +10,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/of.h>
#define PHY_ID_YT8511 0x0000010a
-#define PHY_ID_YT8521 0x0000011A
-#define PHY_ID_YT8531S 0x4F51E91A
+#define PHY_ID_YT8521 0x0000011a
+#define PHY_ID_YT8531 0x4f51e91b
+#define PHY_ID_YT8531S 0x4f51e91a
/* YT8521/YT8531S Register Overview
* UTP Register space | FIBER Register space
@@ -161,6 +163,11 @@
#define YT8521_CHIP_CONFIG_REG 0xA001
#define YT8521_CCR_SW_RST BIT(15)
+/* 1b0 disable 1.9ns rxc clock delay *default*
+ * 1b1 enable 1.9ns rxc clock delay
+ */
+#define YT8521_CCR_RXC_DLY_EN BIT(8)
+#define YT8521_CCR_RXC_DLY_1_900_NS 1900
#define YT8521_CCR_MODE_SEL_MASK (BIT(2) | BIT(1) | BIT(0))
#define YT8521_CCR_MODE_UTP_TO_RGMII 0
@@ -178,22 +185,29 @@
#define YT8521_MODE_POLL 0x3
#define YT8521_RGMII_CONFIG1_REG 0xA003
-
-/* TX Gig-E Delay is bits 3:0, default 0x1
- * TX Fast-E Delay is bits 7:4, default 0xf
- * RX Delay is bits 13:10, default 0x0
- * Delay = 150ps * N
- * On = 2250ps, off = 0ps
+/* 1b0 use original tx_clk_rgmii *default*
+ * 1b1 use inverted tx_clk_rgmii.
*/
-#define YT8521_RC1R_RX_DELAY_MASK (0xF << 10)
-#define YT8521_RC1R_RX_DELAY_EN (0xF << 10)
-#define YT8521_RC1R_RX_DELAY_DIS (0x0 << 10)
-#define YT8521_RC1R_FE_TX_DELAY_MASK (0xF << 4)
-#define YT8521_RC1R_FE_TX_DELAY_EN (0xF << 4)
-#define YT8521_RC1R_FE_TX_DELAY_DIS (0x0 << 4)
-#define YT8521_RC1R_GE_TX_DELAY_MASK (0xF << 0)
-#define YT8521_RC1R_GE_TX_DELAY_EN (0xF << 0)
-#define YT8521_RC1R_GE_TX_DELAY_DIS (0x0 << 0)
+#define YT8521_RC1R_TX_CLK_SEL_INVERTED BIT(14)
+#define YT8521_RC1R_RX_DELAY_MASK GENMASK(13, 10)
+#define YT8521_RC1R_FE_TX_DELAY_MASK GENMASK(7, 4)
+#define YT8521_RC1R_GE_TX_DELAY_MASK GENMASK(3, 0)
+#define YT8521_RC1R_RGMII_0_000_NS 0
+#define YT8521_RC1R_RGMII_0_150_NS 1
+#define YT8521_RC1R_RGMII_0_300_NS 2
+#define YT8521_RC1R_RGMII_0_450_NS 3
+#define YT8521_RC1R_RGMII_0_600_NS 4
+#define YT8521_RC1R_RGMII_0_750_NS 5
+#define YT8521_RC1R_RGMII_0_900_NS 6
+#define YT8521_RC1R_RGMII_1_050_NS 7
+#define YT8521_RC1R_RGMII_1_200_NS 8
+#define YT8521_RC1R_RGMII_1_350_NS 9
+#define YT8521_RC1R_RGMII_1_500_NS 10
+#define YT8521_RC1R_RGMII_1_650_NS 11
+#define YT8521_RC1R_RGMII_1_800_NS 12
+#define YT8521_RC1R_RGMII_1_950_NS 13
+#define YT8521_RC1R_RGMII_2_100_NS 14
+#define YT8521_RC1R_RGMII_2_250_NS 15
#define YTPHY_MISC_CONFIG_REG 0xA006
#define YTPHY_MCR_FIBER_SPEED_MASK BIT(0)
@@ -222,11 +236,36 @@
*/
#define YTPHY_WCR_TYPE_PULSE BIT(0)
-#define YT8531S_SYNCE_CFG_REG 0xA012
-#define YT8531S_SCR_SYNCE_ENABLE BIT(6)
+#define YTPHY_SYNCE_CFG_REG 0xA012
+#define YT8521_SCR_SYNCE_ENABLE BIT(5)
+/* 1b0 output 25m clock
+ * 1b1 output 125m clock *default*
+ */
+#define YT8521_SCR_CLK_FRE_SEL_125M BIT(3)
+#define YT8521_SCR_CLK_SRC_MASK GENMASK(2, 1)
+#define YT8521_SCR_CLK_SRC_PLL_125M 0
+#define YT8521_SCR_CLK_SRC_UTP_RX 1
+#define YT8521_SCR_CLK_SRC_SDS_RX 2
+#define YT8521_SCR_CLK_SRC_REF_25M 3
+#define YT8531_SCR_SYNCE_ENABLE BIT(6)
+/* 1b0 output 25m clock *default*
+ * 1b1 output 125m clock
+ */
+#define YT8531_SCR_CLK_FRE_SEL_125M BIT(4)
+#define YT8531_SCR_CLK_SRC_MASK GENMASK(3, 1)
+#define YT8531_SCR_CLK_SRC_PLL_125M 0
+#define YT8531_SCR_CLK_SRC_UTP_RX 1
+#define YT8531_SCR_CLK_SRC_SDS_RX 2
+#define YT8531_SCR_CLK_SRC_CLOCK_FROM_DIGITAL 3
+#define YT8531_SCR_CLK_SRC_REF_25M 4
+#define YT8531_SCR_CLK_SRC_SSC_25M 5
/* Extended Register end */
+#define YTPHY_DTS_OUTPUT_CLK_DIS 0
+#define YTPHY_DTS_OUTPUT_CLK_25M 25000000
+#define YTPHY_DTS_OUTPUT_CLK_125M 125000000
+
struct yt8521_priv {
/* combo_advertising is used for case of YT8521 in combo mode,
* this means that yt8521 may work in utp or fiber mode which depends
@@ -479,6 +518,61 @@ err_restore_page:
return phy_restore_page(phydev, old_page, ret);
}
+static int yt8531_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ const u16 mac_addr_reg[] = {
+ YTPHY_WOL_MACADDR2_REG,
+ YTPHY_WOL_MACADDR1_REG,
+ YTPHY_WOL_MACADDR0_REG,
+ };
+ const u8 *mac_addr;
+ u16 mask, val;
+ int ret;
+ u8 i;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ mac_addr = phydev->attached_dev->dev_addr;
+
+ /* Store the device address for the magic packet */
+ for (i = 0; i < 3; i++) {
+ ret = ytphy_write_ext_with_lock(phydev, mac_addr_reg[i],
+ ((mac_addr[i * 2] << 8)) |
+ (mac_addr[i * 2 + 1]));
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable WOL feature */
+ mask = YTPHY_WCR_PULSE_WIDTH_MASK | YTPHY_WCR_INTR_SEL;
+ val = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL;
+ val |= YTPHY_WCR_TYPE_PULSE | YTPHY_WCR_PULSE_WIDTH_672MS;
+ ret = ytphy_modify_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG,
+ mask, val);
+ if (ret < 0)
+ return ret;
+
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, 0,
+ YTPHY_IER_WOL);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Disable WOL feature */
+ mask = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL;
+ ret = ytphy_modify_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG,
+ mask, 0);
+
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG,
+ YTPHY_IER_WOL, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int yt8511_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, YT8511_PAGE_SELECT);
@@ -594,6 +688,153 @@ static int yt8521_write_page(struct phy_device *phydev, int page)
};
/**
+ * struct ytphy_cfg_reg_map - map a config value to a register value
+ * @cfg: value in device configuration
+ * @reg: value in the register
+ */
+struct ytphy_cfg_reg_map {
+ u32 cfg;
+ u32 reg;
+};
+
+static const struct ytphy_cfg_reg_map ytphy_rgmii_delays[] = {
+ /* for tx delay / rx delay with YT8521_CCR_RXC_DLY_EN is not set. */
+ { 0, YT8521_RC1R_RGMII_0_000_NS },
+ { 150, YT8521_RC1R_RGMII_0_150_NS },
+ { 300, YT8521_RC1R_RGMII_0_300_NS },
+ { 450, YT8521_RC1R_RGMII_0_450_NS },
+ { 600, YT8521_RC1R_RGMII_0_600_NS },
+ { 750, YT8521_RC1R_RGMII_0_750_NS },
+ { 900, YT8521_RC1R_RGMII_0_900_NS },
+ { 1050, YT8521_RC1R_RGMII_1_050_NS },
+ { 1200, YT8521_RC1R_RGMII_1_200_NS },
+ { 1350, YT8521_RC1R_RGMII_1_350_NS },
+ { 1500, YT8521_RC1R_RGMII_1_500_NS },
+ { 1650, YT8521_RC1R_RGMII_1_650_NS },
+ { 1800, YT8521_RC1R_RGMII_1_800_NS },
+ { 1950, YT8521_RC1R_RGMII_1_950_NS }, /* default tx/rx delay */
+ { 2100, YT8521_RC1R_RGMII_2_100_NS },
+ { 2250, YT8521_RC1R_RGMII_2_250_NS },
+
+ /* only for rx delay with YT8521_CCR_RXC_DLY_EN is set. */
+ { 0 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_000_NS },
+ { 150 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_150_NS },
+ { 300 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_300_NS },
+ { 450 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_450_NS },
+ { 600 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_600_NS },
+ { 750 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_750_NS },
+ { 900 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_900_NS },
+ { 1050 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_050_NS },
+ { 1200 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_200_NS },
+ { 1350 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_350_NS },
+ { 1500 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_500_NS },
+ { 1650 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_650_NS },
+ { 1800 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_800_NS },
+ { 1950 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_950_NS },
+ { 2100 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_2_100_NS },
+ { 2250 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_2_250_NS }
+};
+
+static u32 ytphy_get_delay_reg_value(struct phy_device *phydev,
+ const char *prop_name,
+ const struct ytphy_cfg_reg_map *tbl,
+ int tb_size,
+ u16 *rxc_dly_en,
+ u32 dflt)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ int tb_size_half = tb_size / 2;
+ u32 val;
+ int i;
+
+ if (of_property_read_u32(node, prop_name, &val))
+ goto err_dts_val;
+
+ /* when rxc_dly_en is NULL, it is get the delay for tx, only half of
+ * tb_size is valid.
+ */
+ if (!rxc_dly_en)
+ tb_size = tb_size_half;
+
+ for (i = 0; i < tb_size; i++) {
+ if (tbl[i].cfg == val) {
+ if (rxc_dly_en && i < tb_size_half)
+ *rxc_dly_en = 0;
+ return tbl[i].reg;
+ }
+ }
+
+ phydev_warn(phydev, "Unsupported value %d for %s using default (%u)\n",
+ val, prop_name, dflt);
+
+err_dts_val:
+ /* when rxc_dly_en is not NULL, it is get the delay for rx.
+ * The rx default in dts and ytphy_rgmii_clk_delay_config is 1950 ps,
+ * so YT8521_CCR_RXC_DLY_EN should not be set.
+ */
+ if (rxc_dly_en)
+ *rxc_dly_en = 0;
+
+ return dflt;
+}
+
+static int ytphy_rgmii_clk_delay_config(struct phy_device *phydev)
+{
+ int tb_size = ARRAY_SIZE(ytphy_rgmii_delays);
+ u16 rxc_dly_en = YT8521_CCR_RXC_DLY_EN;
+ u32 rx_reg, tx_reg;
+ u16 mask, val = 0;
+ int ret;
+
+ rx_reg = ytphy_get_delay_reg_value(phydev, "rx-internal-delay-ps",
+ ytphy_rgmii_delays, tb_size,
+ &rxc_dly_en,
+ YT8521_RC1R_RGMII_1_950_NS);
+ tx_reg = ytphy_get_delay_reg_value(phydev, "tx-internal-delay-ps",
+ ytphy_rgmii_delays, tb_size, NULL,
+ YT8521_RC1R_RGMII_1_950_NS);
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ rxc_dly_en = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val |= FIELD_PREP(YT8521_RC1R_RX_DELAY_MASK, rx_reg);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rxc_dly_en = 0;
+ val |= FIELD_PREP(YT8521_RC1R_GE_TX_DELAY_MASK, tx_reg);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val |= FIELD_PREP(YT8521_RC1R_RX_DELAY_MASK, rx_reg) |
+ FIELD_PREP(YT8521_RC1R_GE_TX_DELAY_MASK, tx_reg);
+ break;
+ default: /* do not support other modes */
+ return -EOPNOTSUPP;
+ }
+
+ ret = ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG,
+ YT8521_CCR_RXC_DLY_EN, rxc_dly_en);
+ if (ret < 0)
+ return ret;
+
+ /* Generally, it is not necessary to adjust YT8521_RC1R_FE_TX_DELAY */
+ mask = YT8521_RC1R_RX_DELAY_MASK | YT8521_RC1R_GE_TX_DELAY_MASK;
+ return ytphy_modify_ext(phydev, YT8521_RGMII_CONFIG1_REG, mask, val);
+}
+
+static int ytphy_rgmii_clk_delay_config_with_lock(struct phy_device *phydev)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = ytphy_rgmii_clk_delay_config(phydev);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
* yt8521_probe() - read chip config then set suitable polling_mode
* @phydev: a pointer to a &struct phy_device
*
@@ -601,9 +842,12 @@ static int yt8521_write_page(struct phy_device *phydev, int page)
*/
static int yt8521_probe(struct phy_device *phydev)
{
+ struct device_node *node = phydev->mdio.dev.of_node;
struct device *dev = &phydev->mdio.dev;
struct yt8521_priv *priv;
int chip_config;
+ u16 mask, val;
+ u32 freq;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -648,27 +892,107 @@ static int yt8521_probe(struct phy_device *phydev)
return ret;
}
- return 0;
+ if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq))
+ freq = YTPHY_DTS_OUTPUT_CLK_DIS;
+
+ if (phydev->drv->phy_id == PHY_ID_YT8521) {
+ switch (freq) {
+ case YTPHY_DTS_OUTPUT_CLK_DIS:
+ mask = YT8521_SCR_SYNCE_ENABLE;
+ val = 0;
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_25M:
+ mask = YT8521_SCR_SYNCE_ENABLE |
+ YT8521_SCR_CLK_SRC_MASK |
+ YT8521_SCR_CLK_FRE_SEL_125M;
+ val = YT8521_SCR_SYNCE_ENABLE |
+ FIELD_PREP(YT8521_SCR_CLK_SRC_MASK,
+ YT8521_SCR_CLK_SRC_REF_25M);
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_125M:
+ mask = YT8521_SCR_SYNCE_ENABLE |
+ YT8521_SCR_CLK_SRC_MASK |
+ YT8521_SCR_CLK_FRE_SEL_125M;
+ val = YT8521_SCR_SYNCE_ENABLE |
+ YT8521_SCR_CLK_FRE_SEL_125M |
+ FIELD_PREP(YT8521_SCR_CLK_SRC_MASK,
+ YT8521_SCR_CLK_SRC_PLL_125M);
+ break;
+ default:
+ phydev_warn(phydev, "Freq err:%u\n", freq);
+ return -EINVAL;
+ }
+ } else if (phydev->drv->phy_id == PHY_ID_YT8531S) {
+ switch (freq) {
+ case YTPHY_DTS_OUTPUT_CLK_DIS:
+ mask = YT8531_SCR_SYNCE_ENABLE;
+ val = 0;
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_25M:
+ mask = YT8531_SCR_SYNCE_ENABLE |
+ YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_REF_25M);
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_125M:
+ mask = YT8531_SCR_SYNCE_ENABLE |
+ YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE |
+ YT8531_SCR_CLK_FRE_SEL_125M |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_PLL_125M);
+ break;
+ default:
+ phydev_warn(phydev, "Freq err:%u\n", freq);
+ return -EINVAL;
+ }
+ } else {
+ phydev_warn(phydev, "PHY id err\n");
+ return -EINVAL;
+ }
+
+ return ytphy_modify_ext_with_lock(phydev, YTPHY_SYNCE_CFG_REG, mask,
+ val);
}
-/**
- * yt8531s_probe() - read chip config then set suitable polling_mode
- * @phydev: a pointer to a &struct phy_device
- *
- * returns 0 or negative errno code
- */
-static int yt8531s_probe(struct phy_device *phydev)
+static int yt8531_probe(struct phy_device *phydev)
{
- int ret;
+ struct device_node *node = phydev->mdio.dev.of_node;
+ u16 mask, val;
+ u32 freq;
- /* Disable SyncE clock output by default */
- ret = ytphy_modify_ext_with_lock(phydev, YT8531S_SYNCE_CFG_REG,
- YT8531S_SCR_SYNCE_ENABLE, 0);
- if (ret < 0)
- return ret;
+ if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq))
+ freq = YTPHY_DTS_OUTPUT_CLK_DIS;
+
+ switch (freq) {
+ case YTPHY_DTS_OUTPUT_CLK_DIS:
+ mask = YT8531_SCR_SYNCE_ENABLE;
+ val = 0;
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_25M:
+ mask = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_REF_25M);
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_125M:
+ mask = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_FRE_SEL_125M |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_PLL_125M);
+ break;
+ default:
+ phydev_warn(phydev, "Freq err:%u\n", freq);
+ return -EINVAL;
+ }
- /* same as yt8521_probe */
- return yt8521_probe(phydev);
+ return ytphy_modify_ext_with_lock(phydev, YTPHY_SYNCE_CFG_REG, mask,
+ val);
}
/**
@@ -1133,65 +1457,128 @@ static int yt8521_resume(struct phy_device *phydev)
*/
static int yt8521_config_init(struct phy_device *phydev)
{
+ struct device_node *node = phydev->mdio.dev.of_node;
int old_page;
int ret = 0;
- u16 val;
old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
if (old_page < 0)
goto err_restore_page;
- switch (phydev->interface) {
- case PHY_INTERFACE_MODE_RGMII:
- val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_FE_TX_DELAY_DIS;
- val |= YT8521_RC1R_RX_DELAY_DIS;
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_FE_TX_DELAY_DIS;
- val |= YT8521_RC1R_RX_DELAY_EN;
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_FE_TX_DELAY_EN;
- val |= YT8521_RC1R_RX_DELAY_DIS;
- break;
- case PHY_INTERFACE_MODE_RGMII_ID:
- val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_FE_TX_DELAY_EN;
- val |= YT8521_RC1R_RX_DELAY_EN;
- break;
- case PHY_INTERFACE_MODE_SGMII:
- break;
- default: /* do not support other modes */
- ret = -EOPNOTSUPP;
- goto err_restore_page;
- }
-
/* set rgmii delay mode */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII) {
- ret = ytphy_modify_ext(phydev, YT8521_RGMII_CONFIG1_REG,
- (YT8521_RC1R_RX_DELAY_MASK |
- YT8521_RC1R_FE_TX_DELAY_MASK |
- YT8521_RC1R_GE_TX_DELAY_MASK),
- val);
+ ret = ytphy_rgmii_clk_delay_config(phydev);
if (ret < 0)
goto err_restore_page;
}
- /* disable auto sleep */
- ret = ytphy_modify_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1_REG,
- YT8521_ESC1R_SLEEP_SW, 0);
- if (ret < 0)
- goto err_restore_page;
-
- /* enable RXC clock when no wire plug */
- ret = ytphy_modify_ext(phydev, YT8521_CLOCK_GATING_REG,
- YT8521_CGR_RX_CLK_EN, 0);
- if (ret < 0)
- goto err_restore_page;
+ if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) {
+ /* disable auto sleep */
+ ret = ytphy_modify_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW, 0);
+ if (ret < 0)
+ goto err_restore_page;
+ }
+ if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) {
+ /* enable RXC clock when no wire plug */
+ ret = ytphy_modify_ext(phydev, YT8521_CLOCK_GATING_REG,
+ YT8521_CGR_RX_CLK_EN, 0);
+ if (ret < 0)
+ goto err_restore_page;
+ }
err_restore_page:
return phy_restore_page(phydev, old_page, ret);
}
+static int yt8531_config_init(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ int ret;
+
+ ret = ytphy_rgmii_clk_delay_config_with_lock(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) {
+ /* disable auto sleep */
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) {
+ /* enable RXC clock when no wire plug */
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YT8521_CLOCK_GATING_REG,
+ YT8521_CGR_RX_CLK_EN, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * yt8531_link_change_notify() - Adjust the tx clock direction according to
+ * the current speed and dts config.
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * NOTE: This function is only used to adapt to VF2 with JH7110 SoC. Please
+ * keep "motorcomm,tx-clk-adj-enabled" not exist in dts when the soc is not
+ * JH7110.
+ */
+static void yt8531_link_change_notify(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ bool tx_clk_1000_inverted = false;
+ bool tx_clk_100_inverted = false;
+ bool tx_clk_10_inverted = false;
+ bool tx_clk_adj_enabled = false;
+ u16 val = 0;
+ int ret;
+
+ if (of_property_read_bool(node, "motorcomm,tx-clk-adj-enabled"))
+ tx_clk_adj_enabled = true;
+
+ if (!tx_clk_adj_enabled)
+ return;
+
+ if (of_property_read_bool(node, "motorcomm,tx-clk-10-inverted"))
+ tx_clk_10_inverted = true;
+ if (of_property_read_bool(node, "motorcomm,tx-clk-100-inverted"))
+ tx_clk_100_inverted = true;
+ if (of_property_read_bool(node, "motorcomm,tx-clk-1000-inverted"))
+ tx_clk_1000_inverted = true;
+
+ if (phydev->speed < 0)
+ return;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ if (tx_clk_1000_inverted)
+ val = YT8521_RC1R_TX_CLK_SEL_INVERTED;
+ break;
+ case SPEED_100:
+ if (tx_clk_100_inverted)
+ val = YT8521_RC1R_TX_CLK_SEL_INVERTED;
+ break;
+ case SPEED_10:
+ if (tx_clk_10_inverted)
+ val = YT8521_RC1R_TX_CLK_SEL_INVERTED;
+ break;
+ default:
+ return;
+ }
+
+ ret = ytphy_modify_ext_with_lock(phydev, YT8521_RGMII_CONFIG1_REG,
+ YT8521_RC1R_TX_CLK_SEL_INVERTED, val);
+ if (ret < 0)
+ phydev_warn(phydev, "Modify TX_CLK_SEL err:%d\n", ret);
+}
+
/**
* yt8521_prepare_fiber_features() - A small helper function that setup
* fiber's features.
@@ -1775,10 +2162,21 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.resume = yt8521_resume,
},
{
+ PHY_ID_MATCH_EXACT(PHY_ID_YT8531),
+ .name = "YT8531 Gigabit Ethernet",
+ .probe = yt8531_probe,
+ .config_init = yt8531_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .get_wol = ytphy_get_wol,
+ .set_wol = yt8531_set_wol,
+ .link_change_notify = yt8531_link_change_notify,
+ },
+ {
PHY_ID_MATCH_EXACT(PHY_ID_YT8531S),
.name = "YT8531S Gigabit Ethernet",
.get_features = yt8521_get_features,
- .probe = yt8531s_probe,
+ .probe = yt8521_probe,
.read_page = yt8521_read_page,
.write_page = yt8521_write_page,
.get_wol = ytphy_get_wol,
@@ -1795,7 +2193,7 @@ static struct phy_driver motorcomm_phy_drvs[] = {
module_phy_driver(motorcomm_phy_drvs);
-MODULE_DESCRIPTION("Motorcomm 8511/8521/8531S PHY driver");
+MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S PHY driver");
MODULE_AUTHOR("Peter Geis");
MODULE_AUTHOR("Frank");
MODULE_LICENSE("GPL");
@@ -1803,8 +2201,9 @@ MODULE_LICENSE("GPL");
static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8511) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8521) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_YT8531) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) },
- { /* sentinal */ }
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(mdio, motorcomm_tbl);
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 147d7a5a9b35..e5972b4ef6e8 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -12,6 +12,7 @@
#include <linux/mutex.h>
#include <linux/phy.h>
#include <linux/polynomial.h>
+#include <linux/property.h>
#include <linux/netdevice.h>
/* PHY ID */
@@ -292,6 +293,10 @@ static int gpy_probe(struct phy_device *phydev)
phydev->priv = priv;
mutex_init(&priv->mbox_lock);
+ if (gpy_has_broken_mdint(phydev) &&
+ !device_property_present(dev, "maxlinear,use-broken-interrupts"))
+ phydev->dev_flags |= PHY_F_NO_IRQ;
+
fw_version = phy_read(phydev, PHY_FWV);
if (fw_version < 0)
return fw_version;
diff --git a/drivers/net/phy/ncn26000.c b/drivers/net/phy/ncn26000.c
new file mode 100644
index 000000000000..5680584f659e
--- /dev/null
+++ b/drivers/net/phy/ncn26000.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Driver for the onsemi 10BASE-T1S NCN26000 PHYs family.
+ *
+ * Copyright 2022 onsemi
+ */
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "mdio-open-alliance.h"
+
+#define PHY_ID_NCN26000 0x180FF5A1
+
+#define NCN26000_REG_IRQ_CTL 16
+#define NCN26000_REG_IRQ_STATUS 17
+
+// the NCN26000 maps link_ctrl to BMCR_ANENABLE
+#define NCN26000_BCMR_LINK_CTRL_BIT BMCR_ANENABLE
+
+// the NCN26000 maps link_status to BMSR_ANEGCOMPLETE
+#define NCN26000_BMSR_LINK_STATUS_BIT BMSR_ANEGCOMPLETE
+
+#define NCN26000_IRQ_LINKST_BIT BIT(0)
+#define NCN26000_IRQ_PLCAST_BIT BIT(1)
+#define NCN26000_IRQ_LJABBER_BIT BIT(2)
+#define NCN26000_IRQ_RJABBER_BIT BIT(3)
+#define NCN26000_IRQ_PLCAREC_BIT BIT(4)
+#define NCN26000_IRQ_PHYSCOL_BIT BIT(5)
+#define NCN26000_IRQ_RESET_BIT BIT(15)
+
+#define TO_TMR_DEFAULT 32
+
+static int ncn26000_config_init(struct phy_device *phydev)
+{
+ /* HW bug workaround: the default value of the PLCA TO_TIMER should be
+ * 32, where the current version of NCN26000 reports 24. This will be
+ * fixed in future PHY versions. For the time being, we force the
+ * correct default here.
+ */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_TOTMR,
+ TO_TMR_DEFAULT);
+}
+
+static int ncn26000_config_aneg(struct phy_device *phydev)
+{
+ /* Note: the NCN26000 supports only P2MP link mode. Therefore, AN is not
+ * supported. However, this function is invoked by phylib to enable the
+ * PHY, regardless of the AN support.
+ */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ phydev->mdix = ETH_TP_MDI;
+
+ // bring up the link
+ return phy_write(phydev, MII_BMCR, NCN26000_BCMR_LINK_CTRL_BIT);
+}
+
+static int ncn26000_read_status(struct phy_device *phydev)
+{
+ /* The NCN26000 reports NCN26000_LINK_STATUS_BIT if the link status of
+ * the PHY is up. It further reports the logical AND of the link status
+ * and the PLCA status in the BMSR_LSTATUS bit.
+ */
+ int ret;
+
+ /* The link state is latched low so that momentary link
+ * drops can be detected. Do not double-read the status
+ * in polling mode to detect such short link drops except
+ * the link was already down.
+ */
+ if (!phy_polling_mode(phydev) || !phydev->link) {
+ ret = phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ return ret;
+ else if (ret & NCN26000_BMSR_LINK_STATUS_BIT)
+ goto upd_link;
+ }
+
+ ret = phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ return ret;
+
+upd_link:
+ // update link status
+ if (ret & NCN26000_BMSR_LINK_STATUS_BIT) {
+ phydev->link = 1;
+ phydev->pause = 0;
+ phydev->duplex = DUPLEX_HALF;
+ phydev->speed = SPEED_10;
+ } else {
+ phydev->link = 0;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->speed = SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ncn26000_handle_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ // read and aknowledge the IRQ status register
+ ret = phy_read(phydev, NCN26000_REG_IRQ_STATUS);
+
+ // check only link status changes
+ if (ret < 0 || (ret & NCN26000_REG_IRQ_STATUS) == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+ return IRQ_HANDLED;
+}
+
+static int ncn26000_config_intr(struct phy_device *phydev)
+{
+ int ret;
+ u16 irqe;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ // acknowledge IRQs
+ ret = phy_read(phydev, NCN26000_REG_IRQ_STATUS);
+ if (ret < 0)
+ return ret;
+
+ // get link status notifications
+ irqe = NCN26000_IRQ_LINKST_BIT;
+ } else {
+ // disable all IRQs
+ irqe = 0;
+ }
+
+ ret = phy_write(phydev, NCN26000_REG_IRQ_CTL, irqe);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static struct phy_driver ncn26000_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_NCN26000),
+ .name = "NCN26000",
+ .features = PHY_BASIC_T1S_P2MP_FEATURES,
+ .config_init = ncn26000_config_init,
+ .config_intr = ncn26000_config_intr,
+ .config_aneg = ncn26000_config_aneg,
+ .read_status = ncn26000_read_status,
+ .handle_interrupt = ncn26000_handle_interrupt,
+ .get_plca_cfg = genphy_c45_plca_get_cfg,
+ .set_plca_cfg = genphy_c45_plca_set_cfg,
+ .get_plca_status = genphy_c45_plca_get_status,
+ .soft_reset = genphy_soft_reset,
+ },
+};
+
+module_phy_driver(ncn26000_driver);
+
+static struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_NCN26000) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ncn26000_tbl);
+
+MODULE_AUTHOR("Piergiorgio Beruto");
+MODULE_DESCRIPTION("onsemi 10BASE-T1S PHY driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index a87a4b3ffce4..f9b128cecc3f 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -8,6 +8,8 @@
#include <linux/mii.h>
#include <linux/phy.h>
+#include "mdio-open-alliance.h"
+
/**
* genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities
* @phydev: target phy_device struct
@@ -254,13 +256,17 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
*/
int genphy_c45_an_config_aneg(struct phy_device *phydev)
{
- int changed, ret;
+ int changed = 0, ret;
u32 adv;
linkmode_and(phydev->advertising, phydev->advertising,
phydev->supported);
- changed = genphy_config_eee_advert(phydev);
+ ret = genphy_c45_write_eee_adv(phydev, phydev->supported_eee);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ changed = true;
if (genphy_c45_baset1_able(phydev))
return genphy_c45_baset1_an_config_aneg(phydev);
@@ -660,6 +666,199 @@ int genphy_c45_read_mdix(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
/**
+ * genphy_c45_write_eee_adv - write advertised EEE link modes
+ * @phydev: target phy_device struct
+ * @adv: the linkmode advertisement settings
+ */
+int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv)
+{
+ int val, changed;
+
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP1_FEATURES)) {
+ val = linkmode_to_mii_eee_cap1_t(adv);
+
+ /* In eee_broken_modes are stored MDIO_AN_EEE_ADV specific raw
+ * register values.
+ */
+ val &= ~phydev->eee_broken_modes;
+
+ /* IEEE 802.3-2018 45.2.7.13 EEE advertisement 1
+ * (Register 7.60)
+ */
+ val = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_EEE_ADV,
+ MDIO_EEE_100TX | MDIO_EEE_1000T |
+ MDIO_EEE_10GT | MDIO_EEE_1000KX |
+ MDIO_EEE_10GKX4 | MDIO_EEE_10GKR,
+ val);
+ if (val < 0)
+ return val;
+ if (val > 0)
+ changed = 1;
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported_eee)) {
+ val = linkmode_adv_to_mii_10base_t1_t(adv);
+ /* IEEE 802.3cg-2019 45.2.7.25 10BASE-T1 AN control register
+ * (Register 7.526)
+ */
+ val = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_10BT1_AN_CTRL,
+ MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L,
+ val);
+ if (val < 0)
+ return val;
+ if (val > 0)
+ changed = 1;
+ }
+
+ return changed;
+}
+
+/**
+ * genphy_c45_read_eee_adv - read advertised EEE link modes
+ * @phydev: target phy_device struct
+ * @adv: the linkmode advertisement status
+ */
+static int genphy_c45_read_eee_adv(struct phy_device *phydev,
+ unsigned long *adv)
+{
+ int val;
+
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP1_FEATURES)) {
+ /* IEEE 802.3-2018 45.2.7.13 EEE advertisement 1
+ * (Register 7.60)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap1_mod_linkmode_t(adv, val);
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported_eee)) {
+ /* IEEE 802.3cg-2019 45.2.7.25 10BASE-T1 AN control register
+ * (Register 7.526)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10BT1_AN_CTRL);
+ if (val < 0)
+ return val;
+
+ mii_10base_t1_adv_mod_linkmode_t(adv, val);
+ }
+
+ return 0;
+}
+
+/**
+ * genphy_c45_read_eee_lpa - read advertised LP EEE link modes
+ * @phydev: target phy_device struct
+ * @lpa: the linkmode LP advertisement status
+ */
+static int genphy_c45_read_eee_lpa(struct phy_device *phydev,
+ unsigned long *lpa)
+{
+ int val;
+
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP1_FEATURES)) {
+ /* IEEE 802.3-2018 45.2.7.14 EEE link partner ability 1
+ * (Register 7.61)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap1_mod_linkmode_t(lpa, val);
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported_eee)) {
+ /* IEEE 802.3cg-2019 45.2.7.26 10BASE-T1 AN status register
+ * (Register 7.527)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10BT1_AN_STAT);
+ if (val < 0)
+ return val;
+
+ mii_10base_t1_adv_mod_linkmode_t(lpa, val);
+ }
+
+ return 0;
+}
+
+/**
+ * genphy_c45_read_eee_cap1 - read supported EEE link modes from register 3.20
+ * @phydev: target phy_device struct
+ */
+static int genphy_c45_read_eee_cap1(struct phy_device *phydev)
+{
+ int val;
+
+ /* IEEE 802.3-2018 45.2.3.10 EEE control and capability 1
+ * (Register 3.20)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+ if (val < 0)
+ return val;
+
+ /* The 802.3 2018 standard says the top 2 bits are reserved and should
+ * read as 0. Also, it seems unlikely anybody will build a PHY which
+ * supports 100GBASE-R deep sleep all the way down to 100BASE-TX EEE.
+ * If MDIO_PCS_EEE_ABLE is 0xffff assume EEE is not supported.
+ */
+ if (val == 0xffff)
+ return 0;
+
+ mii_eee_cap1_mod_linkmode_t(phydev->supported_eee, val);
+
+ /* Some buggy devices indicate EEE link modes in MDIO_PCS_EEE_ABLE
+ * which they don't support as indicated by BMSR, ESTATUS etc.
+ */
+ linkmode_and(phydev->supported_eee, phydev->supported_eee,
+ phydev->supported);
+
+ return 0;
+}
+
+/**
+ * genphy_c45_read_eee_abilities - read supported EEE link modes
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_read_eee_abilities(struct phy_device *phydev)
+{
+ int val;
+
+ /* There is not indicator whether optional register
+ * "EEE control and capability 1" (3.20) is supported. Read it only
+ * on devices with appropriate linkmodes.
+ */
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP1_FEATURES)) {
+ val = genphy_c45_read_eee_cap1(phydev);
+ if (val)
+ return val;
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported)) {
+ /* IEEE 802.3cg-2019 45.2.1.186b 10BASE-T1L PMA status register
+ * (Register 1.2295)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10T1L_STAT);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported_eee,
+ val & MDIO_PMA_10T1L_STAT_EEE);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_eee_abilities);
+
+/**
* genphy_c45_pma_read_abilities - read supported link modes from PMA
* @phydev: target phy_device struct
*
@@ -773,6 +972,11 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev)
}
}
+ /* This is optional functionality. If not supported, we may get an error
+ * which should be ignored.
+ */
+ genphy_c45_read_eee_abilities(phydev);
+
return 0;
}
EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities);
@@ -931,6 +1135,312 @@ int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable)
}
EXPORT_SYMBOL_GPL(genphy_c45_fast_retrain);
+/**
+ * genphy_c45_plca_get_cfg - get PLCA configuration from standard registers
+ * @phydev: target phy_device struct
+ * @plca_cfg: output structure to store the PLCA configuration
+ *
+ * Description: if the PHY complies to the Open Alliance TC14 10BASE-T1S PLCA
+ * Management Registers specifications, this function can be used to retrieve
+ * the current PLCA configuration from the standard registers in MMD 31.
+ */
+int genphy_c45_plca_get_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_IDVER);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & MDIO_OATC14_PLCA_IDM) != OATC14_IDM)
+ return -ENODEV;
+
+ plca_cfg->version = ret & ~MDIO_OATC14_PLCA_IDM;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_CTRL0);
+ if (ret < 0)
+ return ret;
+
+ plca_cfg->enabled = !!(ret & MDIO_OATC14_PLCA_EN);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ plca_cfg->node_cnt = (ret & MDIO_OATC14_PLCA_NCNT) >> 8;
+ plca_cfg->node_id = (ret & MDIO_OATC14_PLCA_ID);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_TOTMR);
+ if (ret < 0)
+ return ret;
+
+ plca_cfg->to_tmr = ret & MDIO_OATC14_PLCA_TOT;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_BURST);
+ if (ret < 0)
+ return ret;
+
+ plca_cfg->burst_cnt = (ret & MDIO_OATC14_PLCA_MAXBC) >> 8;
+ plca_cfg->burst_tmr = (ret & MDIO_OATC14_PLCA_BTMR);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_plca_get_cfg);
+
+/**
+ * genphy_c45_plca_set_cfg - set PLCA configuration using standard registers
+ * @phydev: target phy_device struct
+ * @plca_cfg: structure containing the PLCA configuration. Fields set to -1 are
+ * not to be changed.
+ *
+ * Description: if the PHY complies to the Open Alliance TC14 10BASE-T1S PLCA
+ * Management Registers specifications, this function can be used to modify
+ * the PLCA configuration using the standard registers in MMD 31.
+ */
+int genphy_c45_plca_set_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg)
+{
+ u16 val = 0;
+ int ret;
+
+ // PLCA IDVER is read-only
+ if (plca_cfg->version >= 0)
+ return -EINVAL;
+
+ // first of all, disable PLCA if required
+ if (plca_cfg->enabled == 0) {
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_CTRL0,
+ MDIO_OATC14_PLCA_EN);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ // check if we need to set the PLCA node count, node ID, or both
+ if (plca_cfg->node_cnt >= 0 || plca_cfg->node_id >= 0) {
+ /* if one between node count and node ID is -not- to be
+ * changed, read the register to later perform merge/purge of
+ * the configuration as appropriate
+ */
+ if (plca_cfg->node_cnt < 0 || plca_cfg->node_id < 0) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_CTRL1);
+
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+ }
+
+ if (plca_cfg->node_cnt >= 0)
+ val = (val & ~MDIO_OATC14_PLCA_NCNT) |
+ (plca_cfg->node_cnt << 8);
+
+ if (plca_cfg->node_id >= 0)
+ val = (val & ~MDIO_OATC14_PLCA_ID) |
+ (plca_cfg->node_id);
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_CTRL1, val);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ if (plca_cfg->to_tmr >= 0) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_TOTMR,
+ plca_cfg->to_tmr);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ // check if we need to set the PLCA burst count, burst timer, or both
+ if (plca_cfg->burst_cnt >= 0 || plca_cfg->burst_tmr >= 0) {
+ /* if one between burst count and burst timer is -not- to be
+ * changed, read the register to later perform merge/purge of
+ * the configuration as appropriate
+ */
+ if (plca_cfg->burst_cnt < 0 || plca_cfg->burst_tmr < 0) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_BURST);
+
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+ }
+
+ if (plca_cfg->burst_cnt >= 0)
+ val = (val & ~MDIO_OATC14_PLCA_MAXBC) |
+ (plca_cfg->burst_cnt << 8);
+
+ if (plca_cfg->burst_tmr >= 0)
+ val = (val & ~MDIO_OATC14_PLCA_BTMR) |
+ (plca_cfg->burst_tmr);
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_BURST, val);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ // if we need to enable PLCA, do it at the end
+ if (plca_cfg->enabled > 0) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_CTRL0,
+ MDIO_OATC14_PLCA_EN);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_plca_set_cfg);
+
+/**
+ * genphy_c45_plca_get_status - get PLCA status from standard registers
+ * @phydev: target phy_device struct
+ * @plca_st: output structure to store the PLCA status
+ *
+ * Description: if the PHY complies to the Open Alliance TC14 10BASE-T1S PLCA
+ * Management Registers specifications, this function can be used to retrieve
+ * the current PLCA status information from the standard registers in MMD 31.
+ */
+int genphy_c45_plca_get_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_STATUS);
+ if (ret < 0)
+ return ret;
+
+ plca_st->pst = !!(ret & MDIO_OATC14_PLCA_PST);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status);
+
+/**
+ * genphy_c45_eee_is_active - get EEE status
+ * @phydev: target phy_device struct
+ * @adv: variable to store advertised linkmodes
+ * @lp: variable to store LP advertised linkmodes
+ * @is_enabled: variable to store EEE enabled/disabled configuration value
+ *
+ * Description: this function will read local and link partner PHY
+ * advertisements. Compare them return current EEE state.
+ */
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
+ unsigned long *lp, bool *is_enabled)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ bool eee_enabled, eee_active;
+ int ret;
+
+ ret = genphy_c45_read_eee_adv(phydev, tmp_adv);
+ if (ret)
+ return ret;
+
+ ret = genphy_c45_read_eee_lpa(phydev, tmp_lp);
+ if (ret)
+ return ret;
+
+ eee_enabled = !linkmode_empty(tmp_adv);
+ linkmode_and(common, tmp_adv, tmp_lp);
+ if (eee_enabled && !linkmode_empty(common))
+ eee_active = phy_check_valid(phydev->speed, phydev->duplex,
+ common);
+ else
+ eee_active = false;
+
+ if (adv)
+ linkmode_copy(adv, tmp_adv);
+ if (lp)
+ linkmode_copy(lp, tmp_lp);
+ if (is_enabled)
+ *is_enabled = eee_enabled;
+
+ return eee_active;
+}
+EXPORT_SYMBOL(genphy_c45_eee_is_active);
+
+/**
+ * genphy_c45_ethtool_get_eee - get EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it reports the Supported/Advertisement/LP Advertisement
+ * capabilities.
+ */
+int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
+ struct ethtool_eee *data)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp) = {};
+ bool overflow = false, is_enabled;
+ int ret;
+
+ ret = genphy_c45_eee_is_active(phydev, adv, lp, &is_enabled);
+ if (ret < 0)
+ return ret;
+
+ data->eee_enabled = is_enabled;
+ data->eee_active = ret;
+
+ if (!ethtool_convert_link_mode_to_legacy_u32(&data->supported,
+ phydev->supported_eee))
+ overflow = true;
+ if (!ethtool_convert_link_mode_to_legacy_u32(&data->advertised, adv))
+ overflow = true;
+ if (!ethtool_convert_link_mode_to_legacy_u32(&data->lp_advertised, lp))
+ overflow = true;
+
+ if (overflow)
+ phydev_warn(phydev, "Not all supported or advertised EEE link modes were passed to the user space\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c45_ethtool_get_eee);
+
+/**
+ * genphy_c45_ethtool_set_eee - get EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it reportes the Supported/Advertisement/LP Advertisement
+ * capabilities.
+ */
+int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
+ struct ethtool_eee *data)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
+ int ret;
+
+ if (data->eee_enabled) {
+ if (data->advertised)
+ adv[0] = data->advertised;
+ else
+ linkmode_copy(adv, phydev->supported_eee);
+ }
+
+ ret = genphy_c45_write_eee_adv(phydev, adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return phy_restart_aneg(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c45_ethtool_set_eee);
+
struct phy_driver genphy_c45_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 5d08c627a516..a64186dc53f8 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -13,7 +13,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 99,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 102,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -260,6 +260,9 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 10, FULL, 10baseT_Full ),
PHY_SETTING( 10, HALF, 10baseT_Half ),
PHY_SETTING( 10, FULL, 10baseT1L_Full ),
+ PHY_SETTING( 10, FULL, 10baseT1S_Full ),
+ PHY_SETTING( 10, HALF, 10baseT1S_Half ),
+ PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ),
};
#undef PHY_SETTING
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e5b6cb1a77f9..b33e55a7364e 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -242,11 +242,11 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
*
* Description: Returns true if there is a valid setting, false otherwise.
*/
-static inline bool phy_check_valid(int speed, int duplex,
- unsigned long *features)
+bool phy_check_valid(int speed, int duplex, unsigned long *features)
{
return !!phy_lookup_setting(speed, duplex, features, true);
}
+EXPORT_SYMBOL(phy_check_valid);
/**
* phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
@@ -544,6 +544,198 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
EXPORT_SYMBOL(phy_ethtool_get_stats);
/**
+ * phy_ethtool_get_plca_cfg - Get PLCA RS configuration
+ * @phydev: the phy_device struct
+ * @plca_cfg: where to store the retrieved configuration
+ *
+ * Retrieve the PLCA configuration from the PHY. Return 0 on success or a
+ * negative value if an error occurred.
+ */
+int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg)
+{
+ int ret;
+
+ if (!phydev->drv) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!phydev->drv->get_plca_cfg) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_plca_cfg(phydev, plca_cfg);
+
+ mutex_unlock(&phydev->lock);
+out:
+ return ret;
+}
+
+/**
+ * plca_check_valid - Check PLCA configuration before enabling
+ * @phydev: the phy_device struct
+ * @plca_cfg: current PLCA configuration
+ * @extack: extack for reporting useful error messages
+ *
+ * Checks whether the PLCA and PHY configuration are consistent and it is safe
+ * to enable PLCA. Returns 0 on success or a negative value if the PLCA or PHY
+ * configuration is not consistent.
+ */
+static int plca_check_valid(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack)
+{
+ int ret = 0;
+
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT,
+ phydev->advertising)) {
+ ret = -EOPNOTSUPP;
+ NL_SET_ERR_MSG(extack,
+ "Point to Multi-Point mode is not enabled");
+ } else if (plca_cfg->node_id >= 255) {
+ NL_SET_ERR_MSG(extack, "PLCA node ID is not set");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * phy_ethtool_set_plca_cfg - Set PLCA RS configuration
+ * @phydev: the phy_device struct
+ * @plca_cfg: new PLCA configuration to apply
+ * @extack: extack for reporting useful error messages
+ *
+ * Sets the PLCA configuration in the PHY. Return 0 on success or a
+ * negative value if an error occurred.
+ */
+int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct phy_plca_cfg *curr_plca_cfg;
+ int ret;
+
+ if (!phydev->drv) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!phydev->drv->set_plca_cfg ||
+ !phydev->drv->get_plca_cfg) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ curr_plca_cfg = kmalloc(sizeof(*curr_plca_cfg), GFP_KERNEL);
+ if (!curr_plca_cfg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&phydev->lock);
+
+ ret = phydev->drv->get_plca_cfg(phydev, curr_plca_cfg);
+ if (ret)
+ goto out_drv;
+
+ if (curr_plca_cfg->enabled < 0 && plca_cfg->enabled >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'enable' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->node_id < 0 && plca_cfg->node_id >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'local node ID' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->node_cnt < 0 && plca_cfg->node_cnt >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'node count' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->to_tmr < 0 && plca_cfg->to_tmr >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'TO timer' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->burst_cnt < 0 && plca_cfg->burst_cnt >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'burst count' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->burst_tmr < 0 && plca_cfg->burst_tmr >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'burst timer' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ // if enabling PLCA, perform a few sanity checks
+ if (plca_cfg->enabled > 0) {
+ // allow setting node_id concurrently with enabled
+ if (plca_cfg->node_id >= 0)
+ curr_plca_cfg->node_id = plca_cfg->node_id;
+
+ ret = plca_check_valid(phydev, curr_plca_cfg, extack);
+ if (ret)
+ goto out_drv;
+ }
+
+ ret = phydev->drv->set_plca_cfg(phydev, plca_cfg);
+
+out_drv:
+ kfree(curr_plca_cfg);
+ mutex_unlock(&phydev->lock);
+out:
+ return ret;
+}
+
+/**
+ * phy_ethtool_get_plca_status - Get PLCA RS status information
+ * @phydev: the phy_device struct
+ * @plca_st: where to store the retrieved status information
+ *
+ * Retrieve the PLCA status information from the PHY. Return 0 on success or a
+ * negative value if an error occurred.
+ */
+int phy_ethtool_get_plca_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st)
+{
+ int ret;
+
+ if (!phydev->drv) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!phydev->drv->get_plca_status) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_plca_status(phydev, plca_st);
+
+ mutex_unlock(&phydev->lock);
+out:
+ return ret;
+}
+
+/**
* phy_start_cable_test - Start a cable test
*
* @phydev: the phy_device struct
@@ -877,27 +1069,35 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set);
int phy_speed_down(struct phy_device *phydev, bool sync)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp);
- int ret;
+ int ret = 0;
+
+ mutex_lock(&phydev->lock);
if (phydev->autoneg != AUTONEG_ENABLE)
- return 0;
+ goto out;
linkmode_copy(adv_tmp, phydev->advertising);
ret = phy_speed_down_core(phydev);
if (ret)
- return ret;
+ goto out;
linkmode_copy(phydev->adv_old, adv_tmp);
- if (linkmode_equal(phydev->advertising, adv_tmp))
- return 0;
+ if (linkmode_equal(phydev->advertising, adv_tmp)) {
+ ret = 0;
+ goto out;
+ }
ret = phy_config_aneg(phydev);
if (ret)
- return ret;
+ goto out;
+
+ ret = sync ? phy_poll_aneg_done(phydev) : 0;
+out:
+ mutex_unlock(&phydev->lock);
- return sync ? phy_poll_aneg_done(phydev) : 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(phy_speed_down);
@@ -910,21 +1110,28 @@ EXPORT_SYMBOL_GPL(phy_speed_down);
int phy_speed_up(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp);
+ int ret = 0;
+
+ mutex_lock(&phydev->lock);
if (phydev->autoneg != AUTONEG_ENABLE)
- return 0;
+ goto out;
if (linkmode_empty(phydev->adv_old))
- return 0;
+ goto out;
linkmode_copy(adv_tmp, phydev->advertising);
linkmode_copy(phydev->advertising, phydev->adv_old);
linkmode_zero(phydev->adv_old);
if (linkmode_equal(phydev->advertising, adv_tmp))
- return 0;
+ goto out;
+
+ ret = phy_config_aneg(phydev);
+out:
+ mutex_unlock(&phydev->lock);
- return phy_config_aneg(phydev);
+ return ret;
}
EXPORT_SYMBOL_GPL(phy_speed_up);
@@ -1265,30 +1472,6 @@ void phy_mac_interrupt(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_mac_interrupt);
-static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv)
-{
- linkmode_zero(advertising);
-
- if (eee_adv & MDIO_EEE_100TX)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_1000T)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_10GT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_1000KX)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_10GKX4)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_10GKR)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- advertising);
-}
-
/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
@@ -1301,62 +1484,25 @@ static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv)
*/
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
{
+ int ret;
+
if (!phydev->drv)
return -EIO;
- /* According to 802.3az,the EEE is supported only in full duplex-mode.
- */
- if (phydev->duplex == DUPLEX_FULL) {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(lp);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
- int eee_lp, eee_cap, eee_adv;
- int status;
- u32 cap;
-
- /* Read phy status to properly get the right settings */
- status = phy_read_status(phydev);
- if (status)
- return status;
-
- /* First check if the EEE ability is supported */
- eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- if (eee_cap <= 0)
- goto eee_exit_err;
-
- cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
- if (!cap)
- goto eee_exit_err;
-
- /* Check which link settings negotiated and verify it in
- * the EEE advertising registers.
- */
- eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- if (eee_lp <= 0)
- goto eee_exit_err;
-
- eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- if (eee_adv <= 0)
- goto eee_exit_err;
-
- mmd_eee_adv_to_linkmode(adv, eee_adv);
- mmd_eee_adv_to_linkmode(lp, eee_lp);
- linkmode_and(common, adv, lp);
-
- if (!phy_check_valid(phydev->speed, phydev->duplex, common))
- goto eee_exit_err;
+ ret = genphy_c45_eee_is_active(phydev, NULL, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -EPROTONOSUPPORT;
- if (clk_stop_enable)
- /* Configure the PHY to stop receiving xMII
- * clock while it is signaling LPI.
- */
- phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
- MDIO_PCS_CTRL1_CLKSTOP_EN);
+ if (clk_stop_enable)
+ /* Configure the PHY to stop receiving xMII
+ * clock while it is signaling LPI.
+ */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_PCS_CTRL1_CLKSTOP_EN);
- return 0; /* EEE supported */
- }
-eee_exit_err:
- return -EPROTONOSUPPORT;
+ return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL(phy_init_eee);
@@ -1369,10 +1515,16 @@ EXPORT_SYMBOL(phy_init_eee);
*/
int phy_get_eee_err(struct phy_device *phydev)
{
+ int ret;
+
if (!phydev->drv)
return -EIO;
- return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
+ mutex_lock(&phydev->lock);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
}
EXPORT_SYMBOL(phy_get_eee_err);
@@ -1386,33 +1538,16 @@ EXPORT_SYMBOL(phy_get_eee_err);
*/
int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
- int val;
+ int ret;
if (!phydev->drv)
return -EIO;
- /* Get Supported EEE */
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- if (val < 0)
- return val;
- data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
-
- /* Get advertisement EEE */
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- if (val < 0)
- return val;
- data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
- data->eee_enabled = !!data->advertised;
-
- /* Get LP advertisement EEE */
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- if (val < 0)
- return val;
- data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
-
- data->eee_active = !!(data->advertised & data->lp_advertised);
+ mutex_lock(&phydev->lock);
+ ret = genphy_c45_ethtool_get_eee(phydev, data);
+ mutex_unlock(&phydev->lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(phy_ethtool_get_eee);
@@ -1425,43 +1560,16 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
*/
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
- int cap, old_adv, adv = 0, ret;
+ int ret;
if (!phydev->drv)
return -EIO;
- /* Get Supported EEE */
- cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- if (cap < 0)
- return cap;
-
- old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- if (old_adv < 0)
- return old_adv;
-
- if (data->eee_enabled) {
- adv = !data->advertised ? cap :
- ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
- /* Mask prohibited EEE modes */
- adv &= ~phydev->eee_broken_modes;
- }
-
- if (old_adv != adv) {
- ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
- if (ret < 0)
- return ret;
-
- /* Restart autonegotiation so the new modes get sent to the
- * link partner.
- */
- if (phydev->autoneg == AUTONEG_ENABLE) {
- ret = phy_restart_aneg(phydev);
- if (ret < 0)
- return ret;
- }
- }
+ mutex_lock(&phydev->lock);
+ ret = genphy_c45_ethtool_set_eee(phydev, data);
+ mutex_unlock(&phydev->lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
@@ -1473,8 +1581,15 @@ EXPORT_SYMBOL(phy_ethtool_set_eee);
*/
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
- if (phydev->drv && phydev->drv->set_wol)
- return phydev->drv->set_wol(phydev, wol);
+ int ret;
+
+ if (phydev->drv && phydev->drv->set_wol) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->set_wol(phydev, wol);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+ }
return -EOPNOTSUPP;
}
@@ -1488,8 +1603,11 @@ EXPORT_SYMBOL(phy_ethtool_set_wol);
*/
void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
- if (phydev->drv && phydev->drv->get_wol)
+ if (phydev->drv && phydev->drv->get_wol) {
+ mutex_lock(&phydev->lock);
phydev->drv->get_wol(phydev, wol);
+ mutex_unlock(&phydev->lock);
+ }
}
EXPORT_SYMBOL(phy_ethtool_get_wol);
@@ -1526,6 +1644,7 @@ EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
int phy_ethtool_nway_reset(struct net_device *ndev)
{
struct phy_device *phydev = ndev->phydev;
+ int ret;
if (!phydev)
return -ENODEV;
@@ -1533,6 +1652,10 @@ int phy_ethtool_nway_reset(struct net_device *ndev)
if (!phydev->drv)
return -EIO;
- return phy_restart_aneg(phydev);
+ mutex_lock(&phydev->lock);
+ ret = phy_restart_aneg(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
}
EXPORT_SYMBOL(phy_ethtool_nway_reset);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 607aa786c8cb..71becceb8764 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -45,6 +45,9 @@ EXPORT_SYMBOL_GPL(phy_basic_features);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_basic_t1_features);
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features);
+
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_gbit_features);
@@ -98,6 +101,12 @@ const int phy_basic_t1_features_array[3] = {
};
EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
+const int phy_basic_t1s_p2mp_features_array[2] = {
+ ETHTOOL_LINK_MODE_TP_BIT,
+ ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT,
+};
+EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features_array);
+
const int phy_gbit_features_array[2] = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
@@ -123,6 +132,18 @@ static const int phy_10gbit_full_features_array[] = {
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
};
+static const int phy_eee_cap1_features_array[] = {
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+};
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_eee_cap1_features);
+
static void features_init(void)
{
/* 10/100 half/full*/
@@ -138,6 +159,11 @@ static void features_init(void)
ARRAY_SIZE(phy_basic_t1_features_array),
phy_basic_t1_features);
+ /* 10 half, P2MP, TP */
+ linkmode_set_bit_array(phy_basic_t1s_p2mp_features_array,
+ ARRAY_SIZE(phy_basic_t1s_p2mp_features_array),
+ phy_basic_t1s_p2mp_features);
+
/* 10/100 half/full + 1000 half/full */
linkmode_set_bit_array(phy_basic_ports_array,
ARRAY_SIZE(phy_basic_ports_array),
@@ -199,6 +225,10 @@ static void features_init(void)
linkmode_set_bit_array(phy_10gbit_fec_features_array,
ARRAY_SIZE(phy_10gbit_fec_features_array),
phy_10gbit_fec_features);
+ linkmode_set_bit_array(phy_eee_cap1_features_array,
+ ARRAY_SIZE(phy_eee_cap1_features_array),
+ phy_eee_cap1_features);
+
}
void phy_device_free(struct phy_device *phydev)
@@ -932,7 +962,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
* probe with C45 to see if we're able to get a valid PHY ID in the C45
* space, if successful, create the C45 PHY device.
*/
- if (!is_c45 && phy_id == 0 && bus->probe_capabilities >= MDIOBUS_C45) {
+ if (!is_c45 && phy_id == 0 && bus->read_c45) {
r = get_phy_c45_ids(bus, addr, &c45_ids);
if (!r)
return phy_device_create(bus, addr, phy_id,
@@ -1487,6 +1517,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->interrupts = PHY_INTERRUPT_DISABLED;
+ /* PHYs can request to use poll mode even though they have an
+ * associated interrupt line. This could be the case if they
+ * detect a broken interrupt handling.
+ */
+ if (phydev->dev_flags & PHY_F_NO_IRQ)
+ phydev->irq = PHY_POLL;
+
/* Port is set to PORT_TP by default and the actual PHY driver will set
* it to different value depending on the PHY configuration. If we have
* the generic PHY driver we can't figure it out, thus set the old
@@ -2194,7 +2231,10 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
int err;
- if (genphy_config_eee_advert(phydev))
+ err = genphy_c45_write_eee_adv(phydev, phydev->supported_eee);
+ if (err < 0)
+ return err;
+ else if (err)
changed = true;
err = genphy_setup_master_slave(phydev);
@@ -2616,6 +2656,11 @@ int genphy_read_abilities(struct phy_device *phydev)
phydev->supported, val & ESTATUS_1000_XFULL);
}
+ /* This is optional functionality. If not supported, we may get an error
+ * which should be ignored.
+ */
+ genphy_c45_read_eee_abilities(phydev);
+
return 0;
}
EXPORT_SYMBOL(genphy_read_abilities);
@@ -3068,8 +3113,10 @@ static int phy_probe(struct device *dev)
* a controller will attach, and may modify one
* or both of these values
*/
- if (phydrv->features)
+ if (phydrv->features) {
linkmode_copy(phydev->supported, phydrv->features);
+ genphy_c45_read_eee_abilities(phydev);
+ }
else if (phydrv->get_features)
err = phydrv->get_features(phydev);
else if (phydev->is_c45)
@@ -3262,6 +3309,9 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = {
.get_sset_count = phy_ethtool_get_sset_count,
.get_strings = phy_ethtool_get_strings,
.get_stats = phy_ethtool_get_stats,
+ .get_plca_cfg = phy_ethtool_get_plca_cfg,
+ .set_plca_cfg = phy_ethtool_set_plca_cfg,
+ .get_plca_status = phy_ethtool_get_plca_status,
.start_cable_test = phy_start_cable_test,
.start_cable_test_tdr = phy_start_cable_test_tdr,
};
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 4d2519cdb801..1a2f074685fa 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -241,12 +241,16 @@ void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps)
if (caps & MAC_ASYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes);
- if (caps & MAC_10HD)
+ if (caps & MAC_10HD) {
__set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Half_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, linkmodes);
+ }
if (caps & MAC_10FD) {
__set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
__set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Full_BIT, linkmodes);
}
if (caps & MAC_100HD) {
@@ -707,6 +711,7 @@ static int phylink_parse_fixedlink(struct phylink *pl,
struct fwnode_handle *fwnode)
{
struct fwnode_handle *fixed_node;
+ bool pause, asym_pause, autoneg;
const struct phy_setting *s;
struct gpio_desc *desc;
u32 speed;
@@ -779,13 +784,23 @@ static int phylink_parse_fixedlink(struct phylink *pl,
linkmode_copy(pl->link_config.advertising, pl->supported);
phylink_validate(pl, pl->supported, &pl->link_config);
+ pause = phylink_test(pl->supported, Pause);
+ asym_pause = phylink_test(pl->supported, Asym_Pause);
+ autoneg = phylink_test(pl->supported, Autoneg);
s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex,
pl->supported, true);
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
- phylink_set(pl->supported, Pause);
- phylink_set(pl->supported, Asym_Pause);
- phylink_set(pl->supported, Autoneg);
+
+ if (pause)
+ phylink_set(pl->supported, Pause);
+
+ if (asym_pause)
+ phylink_set(pl->supported, Asym_Pause);
+
+ if (autoneg)
+ phylink_set(pl->supported, Autoneg);
+
if (s) {
__set_bit(s->bit, pl->supported);
__set_bit(s->bit, pl->link_config.lp_advertising);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 83b99d95b278..c02cad6478a8 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,6 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/acpi.h>
-#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -144,7 +142,7 @@ static const char *sm_state_to_str(unsigned short sm_state)
return sm_state_strings[sm_state];
}
-static const char *gpio_of_names[] = {
+static const char *gpio_names[] = {
"mod-def0",
"los",
"tx-fault",
@@ -2563,7 +2561,7 @@ static void sfp_check_state(struct sfp *sfp)
for (i = 0; i < GPIO_MAX; i++)
if (changed & BIT(i))
- dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_of_names[i],
+ dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_names[i],
!!(sfp->state & BIT(i)), !!(state & BIT(i)));
state |= sfp->state & (SFP_F_TX_DISABLE | SFP_F_RATE_SELECT);
@@ -2644,10 +2642,8 @@ static void sfp_cleanup(void *data)
static int sfp_i2c_get(struct sfp *sfp)
{
- struct acpi_handle *acpi_handle;
struct fwnode_handle *h;
struct i2c_adapter *i2c;
- struct device_node *np;
int err;
h = fwnode_find_reference(dev_fwnode(sfp->dev), "i2c-bus", 0);
@@ -2656,16 +2652,7 @@ static int sfp_i2c_get(struct sfp *sfp)
return -ENODEV;
}
- if (is_acpi_device_node(h)) {
- acpi_handle = ACPI_HANDLE_FWNODE(h);
- i2c = i2c_acpi_find_adapter_by_handle(acpi_handle);
- } else if ((np = to_of_node(h)) != NULL) {
- i2c = of_find_i2c_adapter_by_node(np);
- } else {
- err = -EINVAL;
- goto put;
- }
-
+ i2c = i2c_get_adapter_by_fwnode(h);
if (!i2c) {
err = -EPROBE_DEFER;
goto put;
@@ -2696,19 +2683,11 @@ static int sfp_probe(struct platform_device *pdev)
if (err < 0)
return err;
- sff = sfp->type = &sfp_data;
-
- if (pdev->dev.of_node) {
- const struct of_device_id *id;
+ sff = device_get_match_data(sfp->dev);
+ if (!sff)
+ sff = &sfp_data;
- id = of_match_node(sfp_of_match, pdev->dev.of_node);
- if (WARN_ON(!id))
- return -EINVAL;
-
- sff = sfp->type = id->data;
- } else if (!has_acpi_companion(&pdev->dev)) {
- return -EINVAL;
- }
+ sfp->type = sff;
err = sfp_i2c_get(sfp);
if (err)
@@ -2717,7 +2696,7 @@ static int sfp_probe(struct platform_device *pdev)
for (i = 0; i < GPIO_MAX; i++)
if (sff->gpios & BIT(i)) {
sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev,
- gpio_of_names[i], gpio_flags[i]);
+ gpio_names[i], gpio_flags[i]);
if (IS_ERR(sfp->gpio[i]))
return PTR_ERR(sfp->gpio[i]);
}
@@ -2772,7 +2751,7 @@ static int sfp_probe(struct platform_device *pdev)
sfp_irq_name = devm_kasprintf(sfp->dev, GFP_KERNEL,
"%s-%s", dev_name(sfp->dev),
- gpio_of_names[i]);
+ gpio_names[i]);
if (!sfp_irq_name)
return -ENOMEM;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index a2be1994b389..8941aa199ea3 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -533,7 +533,7 @@ static int tap_open(struct inode *inode, struct file *file)
q->sock.state = SS_CONNECTED;
q->sock.file = file;
q->sock.ops = &tap_socket_ops;
- sock_init_data(&q->sock, &q->sk);
+ sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
q->sk.sk_write_space = tap_sock_write_space;
q->sk.sk_destruct = tap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
diff --git a/drivers/net/thunderbolt/Kconfig b/drivers/net/thunderbolt/Kconfig
new file mode 100644
index 000000000000..e127848c8cbd
--- /dev/null
+++ b/drivers/net/thunderbolt/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config USB4_NET
+ tristate "Networking over USB4 and Thunderbolt cables"
+ depends on USB4 && INET
+ help
+ Select this if you want to create network between two computers
+ over a USB4 and Thunderbolt cables. The driver supports Apple
+ ThunderboltIP protocol and allows communication with any host
+ supporting the same protocol including Windows and macOS.
+
+ To compile this driver a module, choose M here. The module will be
+ called thunderbolt_net.
diff --git a/drivers/net/thunderbolt/Makefile b/drivers/net/thunderbolt/Makefile
new file mode 100644
index 000000000000..e81c2a4849f0
--- /dev/null
+++ b/drivers/net/thunderbolt/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_USB4_NET) := thunderbolt_net.o
+thunderbolt_net-objs := main.o trace.o
+
+# Tracepoints need to know where to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt/main.c
index 990484776f2d..26ef3706445e 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt/main.c
@@ -23,6 +23,8 @@
#include <net/ip6_checksum.h>
+#include "trace.h"
+
/* Protocol timeouts in ms */
#define TBNET_LOGIN_DELAY 4500
#define TBNET_LOGIN_TIMEOUT 500
@@ -305,6 +307,8 @@ static int tbnet_logout_request(struct tbnet *net)
static void start_login(struct tbnet *net)
{
+ netdev_dbg(net->dev, "login started\n");
+
mutex_lock(&net->connection_lock);
net->login_sent = false;
net->login_received = false;
@@ -318,6 +322,8 @@ static void stop_login(struct tbnet *net)
{
cancel_delayed_work_sync(&net->login_work);
cancel_work_sync(&net->connected_work);
+
+ netdev_dbg(net->dev, "login stopped\n");
}
static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
@@ -349,6 +355,8 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
size = TBNET_RX_PAGE_SIZE;
}
+ trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir);
+
if (tf->frame.buffer_phy)
dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
dir);
@@ -374,6 +382,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
int ret, retries = TBNET_LOGOUT_RETRIES;
while (send_logout && retries-- > 0) {
+ netdev_dbg(net->dev, "sending logout request %u\n",
+ retries);
ret = tbnet_logout_request(net);
if (ret != -ETIMEDOUT)
break;
@@ -400,6 +410,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
net->login_sent = false;
net->login_received = false;
+ netdev_dbg(net->dev, "network traffic stopped\n");
+
mutex_unlock(&net->connection_lock);
}
@@ -431,12 +443,15 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
switch (pkg->hdr.type) {
case TBIP_LOGIN:
+ netdev_dbg(net->dev, "remote login request received\n");
if (!netif_running(net->dev))
break;
ret = tbnet_login_response(net, route, sequence,
pkg->hdr.command_id);
if (!ret) {
+ netdev_dbg(net->dev, "remote login response sent\n");
+
mutex_lock(&net->connection_lock);
net->login_received = true;
net->remote_transmit_path = pkg->transmit_path;
@@ -458,9 +473,12 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
break;
case TBIP_LOGOUT:
+ netdev_dbg(net->dev, "remote logout request received\n");
ret = tbnet_logout_response(net, route, sequence, command_id);
- if (!ret)
+ if (!ret) {
+ netdev_dbg(net->dev, "remote logout response sent\n");
queue_work(system_long_wq, &net->disconnect_work);
+ }
break;
default:
@@ -512,6 +530,9 @@ static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
tf->frame.buffer_phy = dma_addr;
tf->dev = net->dev;
+ trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr,
+ DMA_FROM_DEVICE);
+
tb_ring_rx(ring->ring, &tf->frame);
ring->prod++;
@@ -588,6 +609,8 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
tf->frame.callback = tbnet_tx_callback;
tf->frame.sof = TBIP_PDF_FRAME_START;
tf->frame.eof = TBIP_PDF_FRAME_END;
+
+ trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE);
}
ring->cons = 0;
@@ -612,6 +635,8 @@ static void tbnet_connected_work(struct work_struct *work)
if (!connected)
return;
+ netdev_dbg(net->dev, "login successful, enabling paths\n");
+
ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
if (ret != net->remote_transmit_path) {
netdev_err(net->dev, "failed to allocate Rx HopID\n");
@@ -647,6 +672,8 @@ static void tbnet_connected_work(struct work_struct *work)
netif_carrier_on(net->dev);
netif_start_queue(net->dev);
+
+ netdev_dbg(net->dev, "network traffic started\n");
return;
err_free_tx_buffers:
@@ -668,8 +695,13 @@ static void tbnet_login_work(struct work_struct *work)
if (netif_carrier_ok(net->dev))
return;
+ netdev_dbg(net->dev, "sending login request, retries=%u\n",
+ net->login_retries);
+
ret = tbnet_login_request(net, net->login_retries % 4);
if (ret) {
+ netdev_dbg(net->dev, "sending login request failed, ret=%d\n",
+ ret);
if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
queue_delayed_work(system_long_wq, &net->login_work,
delay);
@@ -677,6 +709,8 @@ static void tbnet_login_work(struct work_struct *work)
netdev_info(net->dev, "ThunderboltIP login timed out\n");
}
} else {
+ netdev_dbg(net->dev, "received login reply\n");
+
net->login_retries = 0;
mutex_lock(&net->connection_lock);
@@ -807,12 +841,16 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
hdr = page_address(page);
if (!tbnet_check_frame(net, tf, hdr)) {
+ trace_tbnet_invalid_rx_ip_frame(hdr->frame_size,
+ hdr->frame_id, hdr->frame_index, hdr->frame_count);
__free_pages(page, TBNET_RX_PAGE_ORDER);
dev_kfree_skb_any(net->skb);
net->skb = NULL;
continue;
}
+ trace_tbnet_rx_ip_frame(hdr->frame_size, hdr->frame_id,
+ hdr->frame_index, hdr->frame_count);
frame_size = le32_to_cpu(hdr->frame_size);
skb = net->skb;
@@ -846,6 +884,7 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
if (last) {
skb->protocol = eth_type_trans(skb, net->dev);
+ trace_tbnet_rx_skb(skb);
napi_gro_receive(&net->napi, skb);
net->skb = NULL;
}
@@ -965,6 +1004,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
for (i = 0; i < frame_count; i++) {
hdr = page_address(frames[i]->page);
hdr->frame_count = cpu_to_le32(frame_count);
+ trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
+ hdr->frame_index, hdr->frame_count);
dma_sync_single_for_device(dma_dev,
frames[i]->frame.buffer_phy,
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
@@ -1029,6 +1070,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
len = le32_to_cpu(hdr->frame_size) - offset;
wsum = csum_partial(dest, len, wsum);
hdr->frame_count = cpu_to_le32(frame_count);
+ trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
+ hdr->frame_index, hdr->frame_count);
offset = 0;
}
@@ -1071,6 +1114,8 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
bool unmap = false;
void *dest;
+ trace_tbnet_tx_skb(skb);
+
nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
if (tbnet_available_buffers(&net->tx_ring) < nframes) {
netif_stop_queue(net->dev);
@@ -1177,6 +1222,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
+ trace_tbnet_consume_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/thunderbolt/trace.c b/drivers/net/thunderbolt/trace.c
new file mode 100644
index 000000000000..1b1499520a44
--- /dev/null
+++ b/drivers/net/thunderbolt/trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tracepoints for Thunderbolt/USB4 networking driver
+ *
+ * Copyright (C) 2023, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/thunderbolt/trace.h b/drivers/net/thunderbolt/trace.h
new file mode 100644
index 000000000000..9626eadaebb9
--- /dev/null
+++ b/drivers/net/thunderbolt/trace.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Tracepoints for Thunderbolt/USB4 networking driver
+ *
+ * Copyright (C) 2023, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thunderbolt_net
+
+#if !defined(__TRACE_THUNDERBOLT_NET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_THUNDERBOLT_NET_H
+
+#include <linux/dma-direction.h>
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+
+#define DMA_DATA_DIRECTION_NAMES \
+ { DMA_BIDIRECTIONAL, "DMA_BIDIRECTIONAL" }, \
+ { DMA_TO_DEVICE, "DMA_TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "DMA_FROM_DEVICE" }, \
+ { DMA_NONE, "DMA_NONE" }
+
+DECLARE_EVENT_CLASS(tbnet_frame,
+ TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
+ enum dma_data_direction dir),
+ TP_ARGS(index, page, phys, dir),
+ TP_STRUCT__entry(
+ __field(unsigned int, index)
+ __field(const void *, page)
+ __field(dma_addr_t, phys)
+ __field(enum dma_data_direction, dir)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->page = page;
+ __entry->phys = phys;
+ __entry->dir = dir;
+ ),
+ TP_printk("index=%u page=%p phys=%pad dir=%s",
+ __entry->index, __entry->page, &__entry->phys,
+ __print_symbolic(__entry->dir, DMA_DATA_DIRECTION_NAMES))
+);
+
+DEFINE_EVENT(tbnet_frame, tbnet_alloc_rx_frame,
+ TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
+ enum dma_data_direction dir),
+ TP_ARGS(index, page, phys, dir)
+);
+
+DEFINE_EVENT(tbnet_frame, tbnet_alloc_tx_frame,
+ TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
+ enum dma_data_direction dir),
+ TP_ARGS(index, page, phys, dir)
+);
+
+DEFINE_EVENT(tbnet_frame, tbnet_free_frame,
+ TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
+ enum dma_data_direction dir),
+ TP_ARGS(index, page, phys, dir)
+);
+
+DECLARE_EVENT_CLASS(tbnet_ip_frame,
+ TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
+ TP_ARGS(size, id, index, count),
+ TP_STRUCT__entry(
+ __field(u32, size)
+ __field(u16, id)
+ __field(u16, index)
+ __field(u32, count)
+ ),
+ TP_fast_assign(
+ __entry->size = le32_to_cpu(size);
+ __entry->id = le16_to_cpu(id);
+ __entry->index = le16_to_cpu(index);
+ __entry->count = le32_to_cpu(count);
+ ),
+ TP_printk("id=%u size=%u index=%u count=%u",
+ __entry->id, __entry->size, __entry->index, __entry->count)
+);
+
+DEFINE_EVENT(tbnet_ip_frame, tbnet_rx_ip_frame,
+ TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
+ TP_ARGS(size, id, index, count)
+);
+
+DEFINE_EVENT(tbnet_ip_frame, tbnet_invalid_rx_ip_frame,
+ TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
+ TP_ARGS(size, id, index, count)
+);
+
+DEFINE_EVENT(tbnet_ip_frame, tbnet_tx_ip_frame,
+ TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
+ TP_ARGS(size, id, index, count)
+);
+
+DECLARE_EVENT_CLASS(tbnet_skb,
+ TP_PROTO(const struct sk_buff *skb),
+ TP_ARGS(skb),
+ TP_STRUCT__entry(
+ __field(const void *, addr)
+ __field(unsigned int, len)
+ __field(unsigned int, data_len)
+ __field(unsigned int, nr_frags)
+ ),
+ TP_fast_assign(
+ __entry->addr = skb;
+ __entry->len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ ),
+ TP_printk("skb=%p len=%u data_len=%u nr_frags=%u",
+ __entry->addr, __entry->len, __entry->data_len,
+ __entry->nr_frags)
+);
+
+DEFINE_EVENT(tbnet_skb, tbnet_rx_skb,
+ TP_PROTO(const struct sk_buff *skb),
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(tbnet_skb, tbnet_tx_skb,
+ TP_PROTO(const struct sk_buff *skb),
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(tbnet_skb, tbnet_consume_skb,
+ TP_PROTO(const struct sk_buff *skb),
+ TP_ARGS(skb)
+);
+
+#endif /* _TRACE_THUNDERBOLT_NET_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a7d17c680f4a..ad653b32b2f0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1401,6 +1401,11 @@ static void tun_net_initialize(struct net_device *dev)
eth_hw_addr_random(dev);
+ /* Currently tun does not support XDP, only tap does. */
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
break;
}
@@ -3448,7 +3453,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
- sock_init_data(&tfile->socket, &tfile->sk);
+ sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c140edb4b648..80849d115e5d 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -747,13 +747,6 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
-/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
@@ -761,71 +754,6 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
-/* Samsung USB Ethernet Adapters */
-{
- USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-#if IS_ENABLED(CONFIG_USB_RTL8152)
-/* Linksys USB3GIGV1 Ethernet Adapter */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-#endif
-
-/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* ThinkPad Thunderbolt 3 Dock (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3069, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* ThinkPad Thunderbolt 3 Dock Gen 2 (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3082, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Lenovo USB C to Ethernet Adapter (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x720c, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Lenovo USB-C Travel Hub (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7214, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
@@ -833,48 +761,6 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
-/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
/* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */
{
USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 23da1d9dafd1..decb5ba56a25 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -8232,43 +8232,6 @@ static bool rtl_check_vendor_ok(struct usb_interface *intf)
return true;
}
-static bool rtl_vendor_mode(struct usb_interface *intf)
-{
- struct usb_host_interface *alt = intf->cur_altsetting;
- struct usb_device *udev;
- struct usb_host_config *c;
- int i, num_configs;
-
- if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC)
- return rtl_check_vendor_ok(intf);
-
- /* The vendor mode is not always config #1, so to find it out. */
- udev = interface_to_usbdev(intf);
- c = udev->config;
- num_configs = udev->descriptor.bNumConfigurations;
- if (num_configs < 2)
- return false;
-
- for (i = 0; i < num_configs; (i++, c++)) {
- struct usb_interface_descriptor *desc = NULL;
-
- if (c->desc.bNumInterfaces > 0)
- desc = &c->intf_cache[0]->altsetting->desc;
- else
- continue;
-
- if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
- usb_driver_set_configuration(udev, c->desc.bConfigurationValue);
- break;
- }
- }
-
- if (i == num_configs)
- dev_err(&intf->dev, "Unexpected Device\n");
-
- return false;
-}
-
static int rtl8152_pre_reset(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
@@ -9500,9 +9463,8 @@ static int rtl_fw_init(struct r8152 *tp)
return 0;
}
-u8 rtl8152_get_version(struct usb_interface *intf)
+static u8 __rtl_get_hw_ver(struct usb_device *udev)
{
- struct usb_device *udev = interface_to_usbdev(intf);
u32 ocp_data = 0;
__le32 *tmp;
u8 version;
@@ -9571,10 +9533,19 @@ u8 rtl8152_get_version(struct usb_interface *intf)
break;
default:
version = RTL_VER_UNKNOWN;
- dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data);
+ dev_info(&udev->dev, "Unknown version 0x%04x\n", ocp_data);
break;
}
+ return version;
+}
+
+u8 rtl8152_get_version(struct usb_interface *intf)
+{
+ u8 version;
+
+ version = __rtl_get_hw_ver(interface_to_usbdev(intf));
+
dev_dbg(&intf->dev, "Detected version 0x%04x\n", version);
return version;
@@ -9610,15 +9581,19 @@ static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
- u8 version = rtl8152_get_version(intf);
struct r8152 *tp;
struct net_device *netdev;
+ u8 version;
int ret;
- if (version == RTL_VER_UNKNOWN)
+ if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
return -ENODEV;
- if (!rtl_vendor_mode(intf))
+ if (!rtl_check_vendor_ok(intf))
+ return -ENODEV;
+
+ version = rtl8152_get_version(intf);
+ if (version == RTL_VER_UNKNOWN)
return -ENODEV;
usb_reset_device(udev);
@@ -9814,43 +9789,35 @@ static void rtl8152_disconnect(struct usb_interface *intf)
}
}
-#define REALTEK_USB_DEVICE(vend, prod) { \
- USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC), \
-}, \
-{ \
- USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_COMM, \
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), \
-}
-
/* table of devices that work with this driver */
static const struct usb_device_id rtl8152_table[] = {
/* Realtek */
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8053),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8155),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156),
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8050) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8053) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8152) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8153) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8155) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8156) },
/* Microsoft */
- REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
- REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
- REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
- REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
- REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387),
- REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041),
- REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff),
- REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601),
+ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab) },
+ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6) },
+ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) },
+ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) },
+ { USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) },
+ { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
+ { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
+ { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
{}
};
@@ -9870,7 +9837,67 @@ static struct usb_driver rtl8152_driver = {
.disable_hub_initiated_lpm = 1,
};
-module_usb_driver(rtl8152_driver);
+static int rtl8152_cfgselector_probe(struct usb_device *udev)
+{
+ struct usb_host_config *c;
+ int i, num_configs;
+
+ /* Switch the device to vendor mode, if and only if the vendor mode
+ * driver supports it.
+ */
+ if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN)
+ return 0;
+
+ /* The vendor mode is not always config #1, so to find it out. */
+ c = udev->config;
+ num_configs = udev->descriptor.bNumConfigurations;
+ for (i = 0; i < num_configs; (i++, c++)) {
+ struct usb_interface_descriptor *desc = NULL;
+
+ if (!c->desc.bNumInterfaces)
+ continue;
+ desc = &c->intf_cache[0]->altsetting->desc;
+ if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC)
+ break;
+ }
+
+ if (i == num_configs)
+ return -ENODEV;
+
+ if (usb_set_configuration(udev, c->desc.bConfigurationValue)) {
+ dev_err(&udev->dev, "Failed to set configuration %d\n",
+ c->desc.bConfigurationValue);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct usb_device_driver rtl8152_cfgselector_driver = {
+ .name = MODULENAME "-cfgselector",
+ .probe = rtl8152_cfgselector_probe,
+ .id_table = rtl8152_table,
+ .generic_subclass = 1,
+};
+
+static int __init rtl8152_driver_init(void)
+{
+ int ret;
+
+ ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE);
+ if (ret)
+ return ret;
+ return usb_register(&rtl8152_driver);
+}
+
+static void __exit rtl8152_driver_exit(void)
+{
+ usb_deregister(&rtl8152_driver);
+ usb_deregister_device_driver(&rtl8152_cfgselector_driver);
+}
+
+module_init(rtl8152_driver_init);
+module_exit(rtl8152_driver_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 64a9a80b2309..283ffddda821 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -555,32 +555,30 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
/*-------------------------------------------------------------------------*/
-static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
+static inline int rx_process(struct usbnet *dev, struct sk_buff *skb)
{
if (dev->driver_info->rx_fixup &&
!dev->driver_info->rx_fixup (dev, skb)) {
/* With RX_ASSEMBLE, rx_fixup() must update counters */
if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
dev->net->stats.rx_errors++;
- goto done;
+ return -EPROTO;
}
// else network stack removes extra byte if we forced a short packet
/* all data was already cloned from skb inside the driver */
if (dev->driver_info->flags & FLAG_MULTI_PACKET)
- goto done;
+ return -EALREADY;
if (skb->len < ETH_HLEN) {
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
- } else {
- usbnet_skb_return(dev, skb);
- return;
+ return -EPROTO;
}
-done:
- skb_queue_tail(&dev->done, skb);
+ usbnet_skb_return(dev, skb);
+ return 0;
}
/*-------------------------------------------------------------------------*/
@@ -1514,6 +1512,14 @@ err:
return ret;
}
+static inline void usb_free_skb(struct sk_buff *skb)
+{
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+
+ usb_free_urb(entry->urb);
+ dev_kfree_skb(skb);
+}
+
/*-------------------------------------------------------------------------*/
// tasklet (work deferred from completions, in_irq) or timer
@@ -1528,15 +1534,14 @@ static void usbnet_bh (struct timer_list *t)
entry = (struct skb_data *) skb->cb;
switch (entry->state) {
case rx_done:
- entry->state = rx_cleanup;
- rx_process (dev, skb);
+ if (rx_process(dev, skb))
+ usb_free_skb(skb);
continue;
case tx_done:
kfree(entry->urb->sg);
fallthrough;
case rx_cleanup:
- usb_free_urb (entry->urb);
- dev_kfree_skb (skb);
+ usb_free_skb(skb);
continue;
default:
netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index dfc7d87fad59..1bb54de7124d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -116,6 +116,11 @@ static struct {
{ "peer_ifindex" },
};
+struct veth_xdp_buff {
+ struct xdp_buff xdp;
+ struct sk_buff *skb;
+};
+
static int veth_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -592,23 +597,25 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (likely(xdp_prog)) {
- struct xdp_buff xdp;
+ struct veth_xdp_buff vxbuf;
+ struct xdp_buff *xdp = &vxbuf.xdp;
u32 act;
- xdp_convert_frame_to_buff(frame, &xdp);
- xdp.rxq = &rq->xdp_rxq;
+ xdp_convert_frame_to_buff(frame, xdp);
+ xdp->rxq = &rq->xdp_rxq;
+ vxbuf.skb = NULL;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
- if (xdp_update_frame_from_buff(&xdp, frame))
+ if (xdp_update_frame_from_buff(xdp, frame))
goto err_xdp;
break;
case XDP_TX:
orig_frame = *frame;
- xdp.rxq->mem = frame->mem;
- if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
+ xdp->rxq->mem = frame->mem;
+ if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
stats->rx_drops++;
@@ -619,8 +626,8 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
case XDP_REDIRECT:
orig_frame = *frame;
- xdp.rxq->mem = frame->mem;
- if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
+ xdp->rxq->mem = frame->mem;
+ if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
frame = &orig_frame;
stats->rx_drops++;
goto err_xdp;
@@ -801,7 +808,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
{
void *orig_data, *orig_data_end;
struct bpf_prog *xdp_prog;
- struct xdp_buff xdp;
+ struct veth_xdp_buff vxbuf;
+ struct xdp_buff *xdp = &vxbuf.xdp;
u32 act, metalen;
int off;
@@ -815,22 +823,23 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
}
__skb_push(skb, skb->data - skb_mac_header(skb));
- if (veth_convert_skb_to_xdp_buff(rq, &xdp, &skb))
+ if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb))
goto drop;
+ vxbuf.skb = skb;
- orig_data = xdp.data;
- orig_data_end = xdp.data_end;
+ orig_data = xdp->data;
+ orig_data_end = xdp->data_end;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- veth_xdp_get(&xdp);
+ veth_xdp_get(xdp);
consume_skb(skb);
- xdp.rxq->mem = rq->xdp_mem;
- if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
+ xdp->rxq->mem = rq->xdp_mem;
+ if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
stats->rx_drops++;
goto err_xdp;
@@ -839,10 +848,10 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
- veth_xdp_get(&xdp);
+ veth_xdp_get(xdp);
consume_skb(skb);
- xdp.rxq->mem = rq->xdp_mem;
- if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
+ xdp->rxq->mem = rq->xdp_mem;
+ if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
stats->rx_drops++;
goto err_xdp;
}
@@ -862,7 +871,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
/* check if bpf_xdp_adjust_head was used */
- off = orig_data - xdp.data;
+ off = orig_data - xdp->data;
if (off > 0)
__skb_push(skb, off);
else if (off < 0)
@@ -871,21 +880,21 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
skb_reset_mac_header(skb);
/* check if bpf_xdp_adjust_tail was used */
- off = xdp.data_end - orig_data_end;
+ off = xdp->data_end - orig_data_end;
if (off != 0)
__skb_put(skb, off); /* positive on grow, negative on shrink */
/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
* (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
*/
- if (xdp_buff_has_frags(&xdp))
+ if (xdp_buff_has_frags(xdp))
skb->data_len = skb_shinfo(skb)->xdp_frags_size;
else
skb->data_len = 0;
skb->protocol = eth_type_trans(skb, rq->dev);
- metalen = xdp.data - xdp.data_meta;
+ metalen = xdp->data - xdp->data_meta;
if (metalen)
skb_metadata_set(skb, metalen);
out:
@@ -898,7 +907,7 @@ xdp_drop:
return NULL;
err_xdp:
rcu_read_unlock();
- xdp_return_buff(&xdp);
+ xdp_return_buff(xdp);
xdp_xmit:
return NULL;
}
@@ -1596,6 +1605,28 @@ static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ struct veth_xdp_buff *_ctx = (void *)ctx;
+
+ if (!_ctx->skb)
+ return -EOPNOTSUPP;
+
+ *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
+ return 0;
+}
+
+static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+ struct veth_xdp_buff *_ctx = (void *)ctx;
+
+ if (!_ctx->skb)
+ return -EOPNOTSUPP;
+
+ *hash = skb_get_hash(_ctx->skb);
+ return 0;
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -1617,6 +1648,11 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_get_peer_dev = veth_peer_dev,
};
+static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
+ .xmo_rx_timestamp = veth_xdp_rx_timestamp,
+ .xmo_rx_hash = veth_xdp_rx_hash,
+};
+
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
@@ -1633,6 +1669,7 @@ static void veth_setup(struct net_device *dev)
dev->priv_flags |= IFF_PHONY_HEADROOM;
dev->netdev_ops = &veth_netdev_ops;
+ dev->xdp_metadata_ops = &veth_xdp_metadata_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
@@ -1649,6 +1686,10 @@ static void veth_setup(struct net_device *dev)
dev->hw_enc_features = VETH_FEATURES;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
}
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 61e33e4dd0cd..fb5e68ed3ec2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -134,7 +134,7 @@ struct send_queue {
struct scatterlist sg[MAX_SKB_FRAGS + 2];
/* Name of the send queue: output.$index */
- char name[40];
+ char name[16];
struct virtnet_sq_stats stats;
@@ -171,7 +171,7 @@ struct receive_queue {
unsigned int min_buf_len;
/* Name of this receive queue: input.$index */
- char name[40];
+ char name[16];
struct xdp_rxq_info xdp_rxq;
};
@@ -446,9 +446,7 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
- unsigned int len, unsigned int truesize,
- bool hdr_valid, unsigned int metasize,
- unsigned int headroom)
+ unsigned int len, unsigned int truesize)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -466,21 +464,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
- /* If headroom is not 0, there is an offset between the beginning of the
- * data and the allocated space, otherwise the data and the allocated
- * space are aligned.
- *
- * Buffers with headroom use PAGE_SIZE as alloc size, see
- * add_recvbuf_mergeable() + get_mergeable_buf_len()
- */
- truesize = headroom ? PAGE_SIZE : truesize;
- tailroom = truesize - headroom;
- buf = p - headroom;
-
+ buf = p;
len -= hdr_len;
offset += hdr_padded_len;
p += hdr_padded_len;
- tailroom -= hdr_padded_len + len;
+ tailroom = truesize - hdr_padded_len - len;
shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -510,7 +498,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
if (len <= skb_tailroom(skb))
copy = len;
else
- copy = ETH_HLEN + metasize;
+ copy = ETH_HLEN;
skb_put_data(skb, p, copy);
len -= copy;
@@ -549,19 +537,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
give_pages(rq, page);
ok:
- /* hdr_valid means no XDP, so we can copy the vnet header */
- if (hdr_valid) {
- hdr = skb_vnet_hdr(skb);
- memcpy(hdr, hdr_p, hdr_len);
- }
+ hdr = skb_vnet_hdr(skb);
+ memcpy(hdr, hdr_p, hdr_len);
if (page_to_free)
put_page(page_to_free);
- if (metasize) {
- __skb_pull(skb, metasize);
- skb_metadata_set(skb, metasize);
- }
-
return skb;
}
@@ -570,22 +550,43 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct xdp_frame *xdpf)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
- int err;
+ struct skb_shared_info *shinfo;
+ u8 nr_frags = 0;
+ int err, i;
if (unlikely(xdpf->headroom < vi->hdr_len))
return -EOVERFLOW;
- /* Make room for virtqueue hdr (also change xdpf->headroom?) */
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ shinfo = xdp_get_shared_info_from_frame(xdpf);
+ nr_frags = shinfo->nr_frags;
+ }
+
+ /* In wrapping function virtnet_xdp_xmit(), we need to free
+ * up the pending old buffers, where we need to calculate the
+ * position of skb_shared_info in xdp_get_frame_len() and
+ * xdp_return_frame(), which will involve to xdpf->data and
+ * xdpf->headroom. Therefore, we need to update the value of
+ * headroom synchronously here.
+ */
+ xdpf->headroom -= vi->hdr_len;
xdpf->data -= vi->hdr_len;
/* Zero header and leave csum up to XDP layers */
hdr = xdpf->data;
memset(hdr, 0, vi->hdr_len);
xdpf->len += vi->hdr_len;
- sg_init_one(sq->sg, xdpf->data, xdpf->len);
+ sg_init_table(sq->sg, nr_frags + 1);
+ sg_set_buf(sq->sg, xdpf->data, xdpf->len);
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &shinfo->frags[i];
- err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
- GFP_ATOMIC);
+ sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
+ skb_frag_size(frag), skb_frag_off(frag));
+ }
+
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
+ xdp_to_ptr(xdpf), GFP_ATOMIC);
if (unlikely(err))
return -ENOSPC; /* Caller handle free/refcnt */
@@ -665,7 +666,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
if (likely(is_xdp_frame(ptr))) {
struct xdp_frame *frame = ptr_to_xdp(ptr);
- bytes += frame->len;
+ bytes += xdp_get_frame_len(frame);
xdp_return_frame(frame);
} else {
struct sk_buff *skb = ptr;
@@ -722,7 +723,7 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
* have enough headroom.
*/
static struct page *xdp_linearize_page(struct receive_queue *rq,
- u16 *num_buf,
+ int *num_buf,
struct page *p,
int offset,
int page_off,
@@ -822,7 +823,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
int offset = buf - page_address(page) + header_offset;
unsigned int tlen = len + vi->hdr_len;
- u16 num_buf = 1;
+ int num_buf = 1;
xdp_headroom = virtnet_get_headroom(vi);
header_offset = VIRTNET_RX_PAD + xdp_headroom;
@@ -924,7 +925,7 @@ static struct sk_buff *receive_big(struct net_device *dev,
{
struct page *page = buf;
struct sk_buff *skb =
- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@@ -938,6 +939,143 @@ err:
return NULL;
}
+/* Why not use xdp_build_skb_from_frame() ?
+ * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
+ * virtio-net there are 2 points that do not match its requirements:
+ * 1. The size of the prefilled buffer is not fixed before xdp is set.
+ * 2. xdp_build_skb_from_frame() does more checks that we don't need,
+ * like eth_type_trans() (which virtio-net does in receive_buf()).
+ */
+static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
+ struct virtnet_info *vi,
+ struct xdp_buff *xdp,
+ unsigned int xdp_frags_truesz)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ unsigned int headroom, data_len;
+ struct sk_buff *skb;
+ int metasize;
+ u8 nr_frags;
+
+ if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
+ pr_debug("Error building skb as missing reserved tailroom for xdp");
+ return NULL;
+ }
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ nr_frags = sinfo->nr_frags;
+
+ skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
+ if (unlikely(!skb))
+ return NULL;
+
+ headroom = xdp->data - xdp->data_hard_start;
+ data_len = xdp->data_end - xdp->data;
+ skb_reserve(skb, headroom);
+ __skb_put(skb, data_len);
+
+ metasize = xdp->data - xdp->data_meta;
+ metasize = metasize > 0 ? metasize : 0;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ xdp_frags_truesz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+
+ return skb;
+}
+
+/* TODO: build xdp in big mode */
+static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
+ struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct xdp_buff *xdp,
+ void *buf,
+ unsigned int len,
+ unsigned int frame_sz,
+ int *num_buf,
+ unsigned int *xdp_frags_truesize,
+ struct virtnet_rq_stats *stats)
+{
+ struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+ unsigned int headroom, tailroom, room;
+ unsigned int truesize, cur_frag_size;
+ struct skb_shared_info *shinfo;
+ unsigned int xdp_frags_truesz = 0;
+ struct page *page;
+ skb_frag_t *frag;
+ int offset;
+ void *ctx;
+
+ xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
+ VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
+
+ if (!*num_buf)
+ return 0;
+
+ if (*num_buf > 1) {
+ /* If we want to build multi-buffer xdp, we need
+ * to specify that the flags of xdp_buff have the
+ * XDP_FLAGS_HAS_FRAG bit.
+ */
+ if (!xdp_buff_has_frags(xdp))
+ xdp_buff_set_frags_flag(xdp);
+
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+ shinfo->nr_frags = 0;
+ shinfo->xdp_frags_size = 0;
+ }
+
+ if (*num_buf > MAX_SKB_FRAGS + 1)
+ return -EINVAL;
+
+ while (--*num_buf > 0) {
+ buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+ if (unlikely(!buf)) {
+ pr_debug("%s: rx error: %d buffers out of %d missing\n",
+ dev->name, *num_buf,
+ virtio16_to_cpu(vi->vdev, hdr->num_buffers));
+ dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ stats->bytes += len;
+ page = virt_to_head_page(buf);
+ offset = buf - page_address(page);
+
+ truesize = mergeable_ctx_to_truesize(ctx);
+ headroom = mergeable_ctx_to_headroom(ctx);
+ tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ room = SKB_DATA_ALIGN(headroom + tailroom);
+
+ cur_frag_size = truesize;
+ xdp_frags_truesz += cur_frag_size;
+ if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
+ put_page(page);
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)(truesize - room));
+ dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ frag = &shinfo->frags[shinfo->nr_frags++];
+ __skb_frag_set_page(frag, page);
+ skb_frag_off_set(frag, offset);
+ skb_frag_size_set(frag, len);
+ if (page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ shinfo->xdp_frags_size += len;
+ }
+
+ *xdp_frags_truesize = xdp_frags_truesz;
+ return 0;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
@@ -948,23 +1086,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
- u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
+ int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
struct sk_buff *head_skb, *curr_skb;
struct bpf_prog *xdp_prog;
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
- unsigned int metasize = 0;
- unsigned int frame_sz;
+ unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
+ unsigned int frame_sz, xdp_room;
int err;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
- if (unlikely(len > truesize)) {
+ if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)ctx);
+ dev->name, len, (unsigned long)(truesize - room));
dev->stats.rx_length_errors++;
goto err_skb;
}
@@ -977,11 +1116,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
+ unsigned int xdp_frags_truesz = 0;
+ struct skb_shared_info *shinfo;
struct xdp_frame *xdpf;
struct page *xdp_page;
struct xdp_buff xdp;
void *data;
u32 act;
+ int i;
/* Transient failure which in theory could occur if
* in-flight packets from before XDP was enabled reach
@@ -990,19 +1132,23 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
- /* Buffers with headroom use PAGE_SIZE as alloc size,
- * see add_recvbuf_mergeable() + get_mergeable_buf_len()
+ /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
+ * with headroom may add hole in truesize, which
+ * make their length exceed PAGE_SIZE. So we disabled the
+ * hole mechanism for xdp. See add_recvbuf_mergeable().
*/
- frame_sz = headroom ? PAGE_SIZE : truesize;
-
- /* This happens when rx buffer size is underestimated
- * or headroom is not enough because of the buffer
- * was refilled before XDP is set. This should only
- * happen for the first several packets, so we don't
- * care much about its performance.
+ frame_sz = truesize;
+
+ /* This happens when headroom is not enough because
+ * of the buffer was prefilled before XDP is set.
+ * This should only happen for the first several packets.
+ * In fact, vq reset can be used here to help us clean up
+ * the prefilled buffers, but many existing devices do not
+ * support it, and we don't want to bother users who are
+ * using xdp normally.
*/
- if (unlikely(num_buf > 1 ||
- headroom < virtnet_get_headroom(vi))) {
+ if (!xdp_prog->aux->xdp_has_frags &&
+ (num_buf > 1 || headroom < virtnet_get_headroom(vi))) {
/* linearize data for XDP */
xdp_page = xdp_linearize_page(rq, &num_buf,
page, offset,
@@ -1013,82 +1159,53 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (!xdp_page)
goto err_xdp;
offset = VIRTIO_XDP_HEADROOM;
+ } else if (unlikely(headroom < virtnet_get_headroom(vi))) {
+ xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
+ sizeof(struct skb_shared_info));
+ if (len + xdp_room > PAGE_SIZE)
+ goto err_xdp;
+
+ xdp_page = alloc_page(GFP_ATOMIC);
+ if (!xdp_page)
+ goto err_xdp;
+
+ memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
+ page_address(page) + offset, len);
+ frame_sz = PAGE_SIZE;
+ offset = VIRTIO_XDP_HEADROOM;
} else {
xdp_page = page;
}
- /* Allow consuming headroom but reserve enough space to push
- * the descriptor on if we get an XDP_TX return code.
- */
data = page_address(xdp_page) + offset;
- xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
- xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
- VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
+ err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
+ &num_buf, &xdp_frags_truesz, stats);
+ if (unlikely(err))
+ goto err_xdp_frags;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
switch (act) {
case XDP_PASS:
- metasize = xdp.data - xdp.data_meta;
-
- /* recalculate offset to account for any header
- * adjustments and minus the metasize to copy the
- * metadata in page_to_skb(). Note other cases do not
- * build an skb and avoid using offset
- */
- offset = xdp.data - page_address(xdp_page) -
- vi->hdr_len - metasize;
-
- /* recalculate len if xdp.data, xdp.data_end or
- * xdp.data_meta were adjusted
- */
- len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
-
- /* recalculate headroom if xdp.data or xdp_data_meta
- * were adjusted, note that offset should always point
- * to the start of the reserved bytes for virtio_net
- * header which are followed by xdp.data, that means
- * that offset is equal to the headroom (when buf is
- * starting at the beginning of the page, otherwise
- * there is a base offset inside the page) but it's used
- * with a different starting point (buf start) than
- * xdp.data (buf start + vnet hdr size). If xdp.data or
- * data_meta were adjusted by the xdp prog then the
- * headroom size has changed and so has the offset, we
- * can use data_hard_start, which points at buf start +
- * vnet hdr size, to calculate the new headroom and use
- * it later to compute buf start in page_to_skb()
- */
- headroom = xdp.data - xdp.data_hard_start - metasize;
-
- /* We can only create skb based on xdp_page. */
- if (unlikely(xdp_page != page)) {
- rcu_read_unlock();
+ if (unlikely(xdp_page != page))
put_page(page);
- head_skb = page_to_skb(vi, rq, xdp_page, offset,
- len, PAGE_SIZE, false,
- metasize,
- headroom);
- return head_skb;
- }
- break;
+ head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
+ rcu_read_unlock();
+ return head_skb;
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf)) {
- if (unlikely(xdp_page != page))
- put_page(xdp_page);
- goto err_xdp;
+ netdev_dbg(dev, "convert buff to frame failed for xdp\n");
+ goto err_xdp_frags;
}
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(!err)) {
xdp_return_frame_rx_napi(xdpf);
} else if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
- if (unlikely(xdp_page != page))
- put_page(xdp_page);
- goto err_xdp;
+ goto err_xdp_frags;
}
*xdp_xmit |= VIRTIO_XDP_TX;
if (unlikely(xdp_page != page))
@@ -1098,11 +1215,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
case XDP_REDIRECT:
stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
- if (err) {
- if (unlikely(xdp_page != page))
- put_page(xdp_page);
- goto err_xdp;
- }
+ if (err)
+ goto err_xdp_frags;
*xdp_xmit |= VIRTIO_XDP_REDIR;
if (unlikely(xdp_page != page))
put_page(page);
@@ -1115,16 +1229,26 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
trace_xdp_exception(vi->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- if (unlikely(xdp_page != page))
- __free_pages(xdp_page, 0);
- goto err_xdp;
+ goto err_xdp_frags;
}
+err_xdp_frags:
+ if (unlikely(xdp_page != page))
+ __free_pages(xdp_page, 0);
+
+ if (xdp_buff_has_frags(&xdp)) {
+ shinfo = xdp_get_shared_info_from_buff(&xdp);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ xdp_page = skb_frag_page(&shinfo->frags[i]);
+ put_page(xdp_page);
+ }
+ }
+
+ goto err_xdp;
}
rcu_read_unlock();
skip_xdp:
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
- metasize, headroom);
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -1146,9 +1270,12 @@ skip_xdp:
page = virt_to_head_page(buf);
truesize = mergeable_ctx_to_truesize(ctx);
- if (unlikely(len > truesize)) {
+ headroom = mergeable_ctx_to_headroom(ctx);
+ tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ room = SKB_DATA_ALIGN(headroom + tailroom);
+ if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)ctx);
+ dev->name, len, (unsigned long)(truesize - room));
dev->stats.rx_length_errors++;
goto err_skb;
}
@@ -1251,13 +1378,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(rq, buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
+ virtnet_rq_free_unused_buf(rq->vq, buf);
return;
}
@@ -1426,13 +1547,16 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
/* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to
* the current buffer.
+ * XDP core assumes that frame_size of xdp_buff and the length
+ * of the frag are PAGE_SIZE, so we disable the hole mechanism.
*/
- len += hole;
+ if (!headroom)
+ len += hole;
alloc_frag->offset += hole;
}
sg_init_one(rq->sg, buf, len);
- ctx = mergeable_len_to_ctx(len, headroom);
+ ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -1608,7 +1732,7 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
} else {
struct xdp_frame *frame = ptr_to_xdp(ptr);
- bytes += frame->len;
+ bytes += xdp_get_frame_len(frame);
xdp_return_frame(frame);
}
packets++;
@@ -2158,9 +2282,9 @@ static int virtnet_close(struct net_device *dev)
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtnet_napi_tx_disable(&vi->sq[i].napi);
napi_disable(&vi->rq[i].napi);
xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
}
return 0;
@@ -3080,7 +3204,9 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
+ unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
+ sizeof(struct skb_shared_info));
+ unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
struct virtnet_info *vi = netdev_priv(dev);
struct bpf_prog *old_prog;
u16 xdp_qp = 0, curr_qp;
@@ -3103,9 +3229,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
return -EINVAL;
}
- if (dev->mtu > max_sz) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
- netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
+ if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
+ netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
return -EINVAL;
}
@@ -3157,7 +3283,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (i == 0 && !old_prog)
virtnet_clear_guest_offloads(vi);
}
+ if (!old_prog)
+ xdp_features_set_redirect_target(dev, true);
} else {
+ xdp_features_clear_redirect_target(dev);
vi->xdp_enabled = false;
}
@@ -3690,6 +3819,12 @@ static int virtnet_validate(struct virtio_device *vdev)
__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
}
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
+ !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
+ dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
+ __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
+ }
+
return 0;
}
@@ -3787,6 +3922,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->hw_features |= NETIF_F_GRO_HW;
dev->vlan_features = dev->features;
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
/* MTU range: 68 - 65535 */
dev->min_mtu = MIN_MTU;
@@ -3802,6 +3938,8 @@ static int virtnet_probe(struct virtio_device *vdev)
eth_hw_addr_set(dev, addr);
} else {
eth_hw_addr_random(dev);
+ dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
+ dev->dev_addr);
}
/* Set up our device-specific information */
@@ -3813,8 +3951,10 @@ static int virtnet_probe(struct virtio_device *vdev)
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
spin_lock_init(&vi->refill_lock);
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
vi->mergeable_rx_bufs = true;
+ dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
+ }
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
vi->rx_usecs = 0;
@@ -3929,6 +4069,24 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ /* a random MAC address has been assigned, notify the device.
+ * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
+ * because many devices work fine without getting MAC explicitly
+ */
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ struct scatterlist sg;
+
+ sg_init_one(&sg, dev->dev_addr, dev->addr_len);
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
+ pr_debug("virtio_net: setting MAC address failed\n");
+ rtnl_unlock();
+ err = -EINVAL;
+ goto free_unregister_netdev;
+ }
+ }
+
rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index d88edbf1bea3..910c10028b14 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -63,5 +63,6 @@ source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
source "drivers/net/wireless/ath/wcn36xx/Kconfig"
source "drivers/net/wireless/ath/ath11k/Kconfig"
+source "drivers/net/wireless/ath/ath12k/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 8e4ae9de5ae4..8d6e6e218d24 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
obj-$(CONFIG_WCN36XX) += wcn36xx/
obj-$(CONFIG_ATH11K) += ath11k/
+obj-$(CONFIG_ATH12K) += ath12k/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 59926227bd49..c2f3bd35c392 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -208,14 +208,6 @@ ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
}
-static inline void
-ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
- struct ath10k_ce_pipe *ce_state,
- unsigned int value)
-{
- ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value);
-}
-
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
u32 ce_id,
u64 addr)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index d34a4d6325b2..cd48eca494ed 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -32,6 +32,9 @@ static const struct of_device_id ath11k_ahb_of_match[] = {
{ .compatible = "qcom,wcn6750-wifi",
.data = (void *)ATH11K_HW_WCN6750_HW10,
},
+ { .compatible = "qcom,ipq5018-wifi",
+ .data = (void *)ATH11K_HW_IPQ5018_HW10,
+ },
{ }
};
@@ -267,30 +270,42 @@ static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
- ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+ ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
if (ce_attr->dest_nentries) {
- ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
- CE_HOST_IE_3_ADDRESS);
+ ie3_reg_addr);
}
}
static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
- ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+ ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
if (ce_attr->dest_nentries) {
- ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
- CE_HOST_IE_3_ADDRESS);
+ ie3_reg_addr);
}
}
@@ -1150,6 +1165,22 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
if (ret)
goto err_core_free;
+ ab->mem_ce = ab->mem;
+
+ if (ab->hw_params.ce_remap) {
+ const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
+ /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
+ * and the space is not contiguous, hence remapping the CE registers
+ * to a new space for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (IS_ERR(ab->mem_ce)) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ ret = -ENOMEM;
+ goto err_core_free;
+ }
+ }
+
ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
goto err_core_free;
@@ -1236,6 +1267,10 @@ static void ath11k_ahb_free_resources(struct ath11k_base *ab)
ath11k_ahb_release_smp2p_handle(ab);
ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
+
+ if (ab->hw_params.ce_remap)
+ iounmap(ab->mem_ce);
+
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 9644ff909502..1fc6360e7f01 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -49,6 +49,11 @@ void ath11k_ce_byte_swap(void *mem, u32 len);
#define CE_HOST_IE_2_ADDRESS 0x00A18040
#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
+/* CE IE registers are different for IPQ5018 */
+#define CE_HOST_IPQ5018_IE_ADDRESS 0x0841804C
+#define CE_HOST_IPQ5018_IE_2_ADDRESS 0x08418050
+#define CE_HOST_IPQ5018_IE_3_ADDRESS CE_HOST_IPQ5018_IE_ADDRESS
+
#define CE_HOST_IE_3_SHIFT 0xC
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
@@ -84,6 +89,17 @@ struct ce_pipe_config {
__le32 reserved;
};
+struct ce_ie_addr {
+ u32 ie1_reg_addr;
+ u32 ie2_reg_addr;
+ u32 ie3_reg_addr;
+};
+
+struct ce_remap {
+ u32 base;
+ u32 size;
+};
+
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index edf78df9b12f..75fdbe4ef83a 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -54,6 +54,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
.svc_to_ce_map_len = 21,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -115,6 +116,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .ftm_responder = true,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -137,6 +139,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
.svc_to_ce_map_len = 19,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -196,6 +199,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .ftm_responder = true,
},
{
.name = "qca6390 hw2.0",
@@ -218,6 +222,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -279,6 +284,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .ftm_responder = false,
},
{
.name = "qcn9074 hw1.0",
@@ -301,6 +307,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
.svc_to_ce_map_len = 18,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = false,
@@ -359,6 +366,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .ftm_responder = true,
},
{
.name = "wcn6855 hw2.0",
@@ -381,6 +389,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -442,6 +451,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .ftm_responder = false,
},
{
.name = "wcn6855 hw2.1",
@@ -524,6 +534,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .ftm_responder = false,
},
{
.name = "wcn6750 hw1.0",
@@ -546,6 +557,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 1,
@@ -603,6 +615,87 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
+ .ftm_responder = false,
+ },
+ {
+ .hw_rev = ATH11K_HW_IPQ5018_HW10,
+ .name = "ipq5018 hw1.0",
+ .fw = {
+ .dir = "IPQ5018/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = MAX_RADIOS_5018,
+ .bdf_addr = 0x4BA00000,
+ /* hal_desc_sz and hw ops are similar to qcn9074 */
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .credit_flow = false,
+ .max_tx_ring = 1,
+ .spectral = {
+ .fft_sz = 2,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 16,
+ .fft_hdr_len = 24,
+ .max_fft_bins = 1024,
+ },
+ .internal_sleep_clock = false,
+ .regs = &ipq5018_regs,
+ .hw_ops = &ipq5018_ops,
+ .host_ce_config = ath11k_host_ce_config_qcn9074,
+ .ce_count = CE_CNT_5018,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq5018,
+ .target_ce_count = TARGET_CE_CNT_5018,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq5018,
+ .svc_to_ce_map_len = SVC_CE_MAP_LEN_5018,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq5018,
+ .ce_remap = &ath11k_ce_remap_ipq5018,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = RXDMA_PER_PDEV_5018,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = false,
+ .supports_sta_ps = false,
+ .supports_shadow_regs = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_regdb = false,
+ .idle_ps = false,
+ .supports_suspend = false,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .single_pdev_only = false,
+ .cold_boot_calib = true,
+ .fix_l1ss = true,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
+ .ftm_responder = true,
},
};
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 22460b0abf03..0830276e5028 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -142,6 +142,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6855_HW20,
ATH11K_HW_WCN6855_HW21,
ATH11K_HW_WCN6750_HW10,
+ ATH11K_HW_IPQ5018_HW10,
};
enum ath11k_firmware_mode {
@@ -230,6 +231,13 @@ struct ath11k_he {
#define MAX_RADIOS 3
+/* ipq5018 hw param macros */
+#define MAX_RADIOS_5018 1
+#define CE_CNT_5018 6
+#define TARGET_CE_CNT_5018 9
+#define SVC_CE_MAP_LEN_5018 17
+#define RXDMA_PER_PDEV_5018 1
+
enum {
WMI_HOST_TP_SCALE_MAX = 0,
WMI_HOST_TP_SCALE_50 = 1,
@@ -338,6 +346,7 @@ struct ath11k_vif {
bool is_started;
bool is_up;
+ bool ftm_responder;
bool spectral_enabled;
bool ps;
u32 aid;
@@ -512,8 +521,8 @@ struct ath11k_sta {
#define ATH11K_MIN_5G_FREQ 4150
#define ATH11K_MIN_6G_FREQ 5925
#define ATH11K_MAX_6G_FREQ 7115
-#define ATH11K_NUM_CHANS 101
-#define ATH11K_MAX_5G_CHAN 173
+#define ATH11K_NUM_CHANS 102
+#define ATH11K_MAX_5G_CHAN 177
enum ath11k_state {
ATH11K_STATE_OFF,
@@ -843,6 +852,7 @@ struct ath11k_base {
struct ath11k_dp dp;
void __iomem *mem;
+ void __iomem *mem_ce;
unsigned long mem_len;
struct {
@@ -912,7 +922,6 @@ struct ath11k_base {
enum ath11k_dfs_region dfs_region;
#ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_soc;
- struct dentry *debugfs_ath11k;
#endif
struct ath11k_soc_dp_stats soc_stats;
@@ -1138,6 +1147,9 @@ extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[];
+
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[];
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index ccdf3d5ba1ab..5bb6fd17fdf6 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -976,10 +976,6 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
- ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
- if (IS_ERR(ab->debugfs_soc))
- return PTR_ERR(ab->debugfs_soc);
-
debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
&fops_simulate_fw_crash);
@@ -1001,15 +997,51 @@ void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
int ath11k_debugfs_soc_create(struct ath11k_base *ab)
{
- ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
+ struct dentry *root;
+ bool dput_needed;
+ char name[64];
+ int ret;
+
+ root = debugfs_lookup("ath11k", NULL);
+ if (!root) {
+ root = debugfs_create_dir("ath11k", NULL);
+ if (IS_ERR_OR_NULL(root))
+ return PTR_ERR(root);
+
+ dput_needed = false;
+ } else {
+ /* a dentry from lookup() needs dput() after we don't use it */
+ dput_needed = true;
+ }
+
+ scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus),
+ dev_name(ab->dev));
+
+ ab->debugfs_soc = debugfs_create_dir(name, root);
+ if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
+ ret = PTR_ERR(ab->debugfs_soc);
+ goto out;
+ }
+
+ ret = 0;
- return PTR_ERR_OR_ZERO(ab->debugfs_ath11k);
+out:
+ if (dput_needed)
+ dput(root);
+
+ return ret;
}
void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
{
- debugfs_remove_recursive(ab->debugfs_ath11k);
- ab->debugfs_ath11k = NULL;
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+
+ /* We are not removing ath11k directory on purpose, even if it
+ * would be empty. This simplifies the directory handling and it's
+ * a minor cosmetic issue to leave an empty ath11k directory to
+ * debugfs.
+ */
}
EXPORT_SYMBOL(ath11k_debugfs_soc_destroy);
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index c5a4c34d7749..b65a84a88264 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1535,13 +1535,12 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
{
struct htt_ppdu_stats_info *ppdu_info;
- spin_lock_bh(&ar->data_lock);
+ lockdep_assert_held(&ar->data_lock);
+
if (!list_empty(&ar->ppdu_stats_info)) {
list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
- if (ppdu_info->ppdu_id == ppdu_id) {
- spin_unlock_bh(&ar->data_lock);
+ if (ppdu_info->ppdu_id == ppdu_id)
return ppdu_info;
- }
}
if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
@@ -1553,16 +1552,13 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
kfree(ppdu_info);
}
}
- spin_unlock_bh(&ar->data_lock);
ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
if (!ppdu_info)
return NULL;
- spin_lock_bh(&ar->data_lock);
list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
ar->ppdu_stat_list_depth++;
- spin_unlock_bh(&ar->data_lock);
return ppdu_info;
}
@@ -1586,16 +1582,17 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
ret = -EINVAL;
- goto exit;
+ goto out;
}
if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
+ spin_lock_bh(&ar->data_lock);
ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
if (!ppdu_info) {
ret = -EINVAL;
- goto exit;
+ goto out_unlock_data;
}
ppdu_info->ppdu_id = ppdu_id;
@@ -1604,10 +1601,13 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
(void *)ppdu_info);
if (ret) {
ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
- goto exit;
+ goto out_unlock_data;
}
-exit:
+out_unlock_data:
+ spin_unlock_bh(&ar->data_lock);
+
+out:
rcu_read_unlock();
return ret;
@@ -3126,6 +3126,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
if (!peer) {
ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
return -ENOENT;
}
@@ -5022,6 +5023,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
} else {
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 2fd224480d45..22422237500c 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1220,16 +1220,20 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
s = &hal->srng_config[HAL_CE_SRC];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP +
+ ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s = &hal->srng_config[HAL_CE_DST];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP +
+ ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
@@ -1237,8 +1241,9 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s = &hal->srng_config[HAL_CE_DST_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
- HAL_CE_DST_STATUS_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
+ HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP +
+ ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 6a1f78ee6eb6..1942d41d6de5 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -321,6 +321,10 @@ struct ath11k_base;
#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+/* IPQ5018 ce registers */
+#define HAL_IPQ5018_CE_WFSS_REG_BASE 0x08400000
+#define HAL_IPQ5018_CE_SIZE 0x200000
+
/* Add any other errors here and return them in
* ath11k_hal_rx_desc_get_err().
*/
@@ -519,6 +523,7 @@ enum hal_srng_dir {
#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
#define HAL_SRNG_FLAGS_CACHED 0x20000000
#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
+#define HAL_SRNG_FLAGS_REMAP_CE_RING 0x10000000
#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index dbcc0c4035b6..ab8f0ccacc6b 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -791,6 +791,49 @@ static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
ring_hash_map);
}
+static void ath11k_hw_ipq5018_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+
+ /* Each hash entry uses three bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
+
static u16 ath11k_hw_ipq8074_mpdu_info_get_peerid(u8 *tlv_data)
{
u16 peer_id = 0;
@@ -1084,6 +1127,47 @@ const struct ath11k_hw_ops wcn6750_ops = {
.get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
};
+/* IPQ5018 hw ops is similar to QCN9074 except for the dest ring remap */
+const struct ath11k_hw_ops ipq5018_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .reo_setup = ath11k_hw_ipq5018_reo_setup,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+
+};
+
#define ATH11K_TX_RING_MASK_0 BIT(0)
#define ATH11K_TX_RING_MASK_1 BIT(1)
#define ATH11K_TX_RING_MASK_2 BIT(2)
@@ -1972,6 +2056,214 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
},
};
+/* Target firmware's Copy Engine configuration for IPQ5018 */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0x2000),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+/* Map from service/endpoint to Copy Engine for IPQ5018.
+ * This table is derived from the CE TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074 = {
+ .ie1_reg_addr = CE_HOST_IE_ADDRESS,
+ .ie2_reg_addr = CE_HOST_IE_2_ADDRESS,
+ .ie3_reg_addr = CE_HOST_IE_3_ADDRESS,
+};
+
+const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018 = {
+ .ie1_reg_addr = CE_HOST_IPQ5018_IE_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .ie2_reg_addr = CE_HOST_IPQ5018_IE_2_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .ie3_reg_addr = CE_HOST_IPQ5018_IE_3_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+};
+
+const struct ce_remap ath11k_ce_remap_ipq5018 = {
+ .base = HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .size = HAL_IPQ5018_CE_SIZE,
+};
+
const struct ath11k_hw_regs ipq8074_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000510,
@@ -2437,6 +2729,85 @@ static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[
},
};
+const struct ath11k_hw_regs ipq5018_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000694,
+ .hal_tcl1_ring_base_msb = 0x00000698,
+ .hal_tcl1_ring_id = 0x0000069c,
+ .hal_tcl1_ring_misc = 0x000006a4,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006e0,
+ .hal_tcl1_ring_msi1_data = 0x000006e4,
+ .hal_tcl2_ring_base_lsb = 0x000006ec,
+ .hal_tcl_ring_base_lsb = 0x0000079c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a4,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x000001ec,
+ .hal_reo1_ring_base_msb = 0x000001f0,
+ .hal_reo1_ring_id = 0x000001f4,
+ .hal_reo1_ring_misc = 0x000001fc,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000200,
+ .hal_reo1_ring_hp_addr_msb = 0x00000204,
+ .hal_reo1_ring_producer_int_setup = 0x00000210,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000234,
+ .hal_reo1_ring_msi1_base_msb = 0x00000238,
+ .hal_reo1_ring_msi1_data = 0x0000023c,
+ .hal_reo2_ring_base_lsb = 0x00000244,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003028,
+ .hal_reo1_ring_tp = 0x0000302c,
+ .hal_reo2_ring_hp = 0x00003030,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x0000013c,
+ .hal_sw2reo_ring_hp = 0x00003018,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x000000e4,
+ .hal_reo_cmd_ring_hp = 0x00003010,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x08400000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x08401000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x08402000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x08403000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+};
+
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 0c5ef8a526d8..0be4e1232384 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -80,6 +80,8 @@
#define ATH11K_M3_FILE "m3.bin"
#define ATH11K_REGDB_FILE_NAME "regdb.bin"
+#define ATH11K_CE_OFFSET(ab) (ab->mem_ce - ab->mem)
+
enum ath11k_hw_rate_cck {
ATH11K_HW_RATE_CCK_LP_11M = 0,
ATH11K_HW_RATE_CCK_LP_5_5M,
@@ -158,6 +160,8 @@ struct ath11k_hw_params {
u32 target_ce_count;
const struct service_to_pipe *svc_to_ce_map;
u32 svc_to_ce_map_len;
+ const struct ce_ie_addr *ce_ie_addr;
+ const struct ce_remap *ce_remap;
bool single_pdev_only;
@@ -220,6 +224,7 @@ struct ath11k_hw_params {
u32 tx_ring_size;
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
+ bool ftm_responder;
};
struct ath11k_hw_ops {
@@ -271,12 +276,18 @@ extern const struct ath11k_hw_ops qca6390_ops;
extern const struct ath11k_hw_ops qcn9074_ops;
extern const struct ath11k_hw_ops wcn6855_ops;
extern const struct ath11k_hw_ops wcn6750_ops;
+extern const struct ath11k_hw_ops ipq5018_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750;
+extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074;
+extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018;
+
+extern const struct ce_remap ath11k_ce_remap_ipq5018;
+
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750;
@@ -406,6 +417,7 @@ extern const struct ath11k_hw_regs qca6390_regs;
extern const struct ath11k_hw_regs qcn9074_regs;
extern const struct ath11k_hw_regs wcn6855_regs;
extern const struct ath11k_hw_regs wcn6750_regs;
+extern const struct ath11k_hw_regs ipq5018_regs;
static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
{
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 9e923ecb0891..110a38cce0a7 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -96,6 +96,7 @@ static const struct ieee80211_channel ath11k_5ghz_channels[] = {
CHAN5G(165, 5825, 0),
CHAN5G(169, 5845, 0),
CHAN5G(173, 5865, 0),
+ CHAN5G(177, 5885, 0),
};
static const struct ieee80211_channel ath11k_6ghz_channels[] = {
@@ -3110,7 +3111,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
u16 bitrate;
int ret = 0;
u8 rateidx;
- u32 rate;
+ u32 rate, param;
u32 ipv4_cnt;
mutex_lock(&ar->conf_mutex);
@@ -3412,6 +3413,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_FTM_RESPONDER &&
+ arvif->ftm_responder != info->ftm_responder &&
+ ar->ab->hw_params.ftm_responder &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ arvif->ftm_responder = info->ftm_responder;
+ param = WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ arvif->ftm_responder);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set ftm responder %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
if (changed & BSS_CHANGED_FILS_DISCOVERY ||
changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
ath11k_mac_fils_discovery(arvif, info);
@@ -3612,7 +3627,7 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
- struct scan_req_params arg;
+ struct scan_req_params *arg = NULL;
int ret = 0;
int i;
u32 scan_timeout;
@@ -3640,72 +3655,78 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
- memset(&arg, 0, sizeof(arg));
- ath11k_wmi_start_scan_init(ar, &arg);
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH11K_SCAN_ID;
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath11k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH11K_SCAN_ID;
if (req->ie_len) {
- arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
- if (!arg.extraie.ptr) {
+ arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg->extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
- arg.extraie.len = req->ie_len;
+ arg->extraie.len = req->ie_len;
}
if (req->n_ssids) {
- arg.num_ssids = req->n_ssids;
- for (i = 0; i < arg.num_ssids; i++) {
- arg.ssid[i].length = req->ssids[i].ssid_len;
- memcpy(&arg.ssid[i].ssid, req->ssids[i].ssid,
+ arg->num_ssids = req->n_ssids;
+ for (i = 0; i < arg->num_ssids; i++) {
+ arg->ssid[i].length = req->ssids[i].ssid_len;
+ memcpy(&arg->ssid[i].ssid, req->ssids[i].ssid,
req->ssids[i].ssid_len);
}
} else {
- arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
}
if (req->n_channels) {
- arg.num_chan = req->n_channels;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
+ arg->num_chan = req->n_channels;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
- if (!arg.chan_list) {
+ if (!arg->chan_list) {
ret = -ENOMEM;
goto exit;
}
- for (i = 0; i < arg.num_chan; i++)
- arg.chan_list[i] = req->channels[i]->center_freq;
+ for (i = 0; i < arg->num_chan; i++)
+ arg->chan_list[i] = req->channels[i]->center_freq;
}
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- arg.scan_f_add_spoofed_mac_in_probe = 1;
- ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
- ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
+ arg->scan_f_add_spoofed_mac_in_probe = 1;
+ ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
+ ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
}
/* if duration is set, default dwell times will be overwritten */
if (req->duration) {
- arg.dwell_time_active = req->duration;
- arg.dwell_time_active_2g = req->duration;
- arg.dwell_time_active_6g = req->duration;
- arg.dwell_time_passive = req->duration;
- arg.dwell_time_passive_6g = req->duration;
- arg.burst_duration = req->duration;
-
- scan_timeout = min_t(u32, arg.max_rest_time *
- (arg.num_chan - 1) + (req->duration +
+ arg->dwell_time_active = req->duration;
+ arg->dwell_time_active_2g = req->duration;
+ arg->dwell_time_active_6g = req->duration;
+ arg->dwell_time_passive = req->duration;
+ arg->dwell_time_passive_6g = req->duration;
+ arg->burst_duration = req->duration;
+
+ scan_timeout = min_t(u32, arg->max_rest_time *
+ (arg->num_chan - 1) + (req->duration +
ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
- arg.num_chan, arg.max_scan_time);
+ arg->num_chan, arg->max_scan_time);
} else {
- scan_timeout = arg.max_scan_time;
+ scan_timeout = arg->max_scan_time;
}
/* Add a margin to account for event/command processing */
scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD;
- ret = ath11k_start_scan(ar, &arg);
+ ret = ath11k_start_scan(ar, arg);
if (ret) {
ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@@ -3717,10 +3738,11 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
msecs_to_jiffies(scan_timeout));
exit:
- kfree(arg.chan_list);
-
- if (req->ie_len)
- kfree(arg.extraie.ptr);
+ if (arg) {
+ kfree(arg->chan_list);
+ kfree(arg->extraie.ptr);
+ kfree(arg);
+ }
mutex_unlock(&ar->conf_mutex);
@@ -9106,6 +9128,10 @@ static int __ath11k_mac_register(struct ath11k *ar)
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ if (ab->hw_params.ftm_responder)
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
+
ath11k_reg_init(ar);
if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 99cf3357c66e..776362d151cb 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -543,6 +543,8 @@ static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
goto clear_master;
}
+ ab->mem_ce = ab->mem;
+
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 2a8a3e3dcff6..b3a7d7bfe17c 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -3853,8 +3853,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
- GFP_KERNEL);
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ GFP_KERNEL);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 8f2c07d70a4a..0a045af5419b 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -1073,6 +1073,7 @@ enum wmi_tlv_vdev_param {
WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
WMI_VDEV_PARAM_HE_LTF = 0x74,
+ WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE = 0x7d,
WMI_VDEV_PARAM_BA_MODE = 0x7e,
WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
new file mode 100644
index 000000000000..4f9c514c13e7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config ATH12K
+ tristate "Qualcomm Technologies Wi-Fi 7 support (ath12k)"
+ depends on MAC80211 && HAS_DMA && PCI
+ depends on CRYPTO_MICHAEL_MIC
+ select QCOM_QMI_HELPERS
+ select MHI_BUS
+ select QRTR
+ select QRTR_MHI
+ help
+ Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
+ 802.11be) family of chipsets, for example WCN7850 and
+ QCN9274.
+
+ If you choose to build a module, it'll be called ath12k.
+
+config ATH12K_DEBUG
+ bool "ath12k debugging"
+ depends on ATH12K
+ help
+ Enable debug support, for example debug messages which must
+ be enabled separately using the debug_mask module parameter.
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
+
+config ATH12K_TRACING
+ bool "ath12k tracing support"
+ depends on ATH12K && EVENT_TRACING
+ help
+ Enable ath12k tracing infrastructure.
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
new file mode 100644
index 000000000000..62c52e733b5e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH12K) += ath12k.o
+ath12k-y += core.o \
+ hal.o \
+ hal_tx.o \
+ hal_rx.o \
+ wmi.o \
+ mac.o \
+ reg.o \
+ htc.o \
+ qmi.o \
+ dp.o \
+ dp_tx.o \
+ dp_rx.o \
+ debug.o \
+ ce.o \
+ peer.o \
+ dbring.o \
+ hw.o \
+ mhi.o \
+ pci.o \
+ dp_mon.o
+
+ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
new file mode 100644
index 000000000000..aed6987804bf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -0,0 +1,964 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+
+const struct ce_attr ath12k_host_ce_config_qcn9274[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE9: MHI */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: MHI */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE11: MHI */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE12: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE13: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE14: target->host dbg log */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE15: reserved for future use */
+ {
+ .flags = (CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 64,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+};
+
+static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
+ struct sk_buff *skb, dma_addr_t paddr)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct ath12k_ce_ring *ring = pipe->dest_ring;
+ struct hal_srng *srng;
+ unsigned int write_index;
+ unsigned int nentries_mask = ring->nentries_mask;
+ struct hal_ce_srng_dest_desc *desc;
+ int ret;
+
+ lockdep_assert_held(&ab->ce.ce_lock);
+
+ write_index = ring->write_index;
+
+ srng = &ab->hal.srng_list[ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ ath12k_hal_ce_dst_set_desc(desc, paddr);
+
+ ring->skb[write_index] = skb;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ring->write_index = write_index;
+
+ pipe->rx_buf_needed--;
+
+ ret = 0;
+exit:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ if (!(pipe->dest_ring || pipe->status_ring))
+ return 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+ while (pipe->rx_buf_needed) {
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr))) {
+ ath12k_warn(ab, "failed to dma map ce rx buf\n");
+ dev_kfree_skb_any(skb);
+ ret = -EIO;
+ goto exit;
+ }
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
+ if (ret) {
+ ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ dma_unmap_single(ab->dev, paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+ }
+
+exit:
+ spin_unlock_bh(&ab->ce.ce_lock);
+ return ret;
+}
+
+static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
+ struct sk_buff **skb, int *nbytes)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct hal_ce_srng_dst_status_desc *desc;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ int ret = 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->dest_ring->sw_index;
+ nentries_mask = pipe->dest_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *nbytes = ath12k_hal_ce_dst_status_get_length(desc);
+ if (*nbytes == 0) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *skb = pipe->dest_ring->skb[sw_index];
+ pipe->dest_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->dest_ring->sw_index = sw_index;
+
+ pipe->rx_buf_needed++;
+err:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ unsigned int nbytes, max_nbytes;
+ int ret;
+
+ __skb_queue_head_init(&list);
+ while (ath12k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->recv_cb(ab, skb);
+ }
+
+ ret = ath12k_ce_rx_post_pipe(pipe);
+ if (ret && ret != -ENOSPC) {
+ ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ pipe->pipe_num, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
+ }
+}
+
+static struct sk_buff *ath12k_ce_completed_send_next(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct hal_ce_srng_src_desc *desc;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->src_ring->sw_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ desc = ath12k_hal_srng_src_reap_next(ab, srng);
+ if (!desc) {
+ skb = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+
+ skb = pipe->src_ring->skb[sw_index];
+
+ pipe->src_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->src_ring->sw_index = sw_index;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return skb;
+}
+
+static void ath12k_ce_send_done_cb(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+
+ while (!IS_ERR(skb = ath12k_ce_completed_send_next(pipe))) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath12k_ce_srng_msi_ring_params_setup(struct ath12k_base *ab, u32 ce_id,
+ struct hal_srng_params *ring_params)
+{
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ u32 addr_lo;
+ u32 addr_hi;
+ int ret;
+
+ ret = ath12k_hif_get_user_msi_vector(ab, "CE",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+
+ if (ret)
+ return;
+
+ ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
+ ath12k_hif_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+static int ath12k_ce_init_ring(struct ath12k_base *ab,
+ struct ath12k_ce_ring *ce_ring,
+ int ce_id, enum hal_ring_type type)
+{
+ struct hal_srng_params params = { 0 };
+ int ret;
+
+ params.ring_base_paddr = ce_ring->base_addr_ce_space;
+ params.ring_base_vaddr = ce_ring->base_addr_owner_space;
+ params.num_entries = ce_ring->nentries;
+
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
+ ath12k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
+
+ switch (type) {
+ case HAL_CE_SRC:
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
+ params.intr_batch_cntr_thres_entries = 1;
+ break;
+ case HAL_CE_DST:
+ params.max_buffer_len = ab->hw_params->host_ce_config[ce_id].src_sz_max;
+ if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_timer_thres_us = 1024;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.low_threshold = ce_ring->nentries - 3;
+ }
+ break;
+ case HAL_CE_DST_STATUS:
+ if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_batch_cntr_thres_entries = 1;
+ params.intr_timer_thres_us = 0x1000;
+ }
+ break;
+ default:
+ ath12k_warn(ab, "Invalid CE ring type %d\n", type);
+ return -EINVAL;
+ }
+
+ /* TODO: Init other params needed by HAL to init the ring */
+
+ ret = ath12k_hal_srng_setup(ab, type, ce_id, 0, &params);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ce_id);
+ return ret;
+ }
+
+ ce_ring->hal_ring_id = ret;
+
+ return 0;
+}
+
+static struct ath12k_ce_ring *
+ath12k_ce_alloc_ring(struct ath12k_base *ab, int nentries, int desc_sz)
+{
+ struct ath12k_ce_ring *ce_ring;
+ dma_addr_t base_addr;
+
+ ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
+ if (!ce_ring)
+ return ERR_PTR(-ENOMEM);
+
+ ce_ring->nentries = nentries;
+ ce_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ ce_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ab->dev,
+ nentries * desc_sz + CE_DESC_RING_ALIGN,
+ &base_addr, GFP_KERNEL);
+ if (!ce_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ce_ring->base_addr_ce_space_unaligned = base_addr;
+
+ ce_ring->base_addr_owner_space =
+ PTR_ALIGN(ce_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ ce_ring->base_addr_ce_space = ALIGN(ce_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return ce_ring;
+}
+
+static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id)
+{
+ struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &ab->hw_params->host_ce_config[ce_id];
+ struct ath12k_ce_ring *ring;
+ int nentries;
+ int desc_sz;
+
+ pipe->attr_flags = attr->flags;
+
+ if (attr->src_nentries) {
+ pipe->send_cb = ath12k_ce_send_done_cb;
+ nentries = roundup_pow_of_two(attr->src_nentries);
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->src_ring = ring;
+ }
+
+ if (attr->dest_nentries) {
+ pipe->recv_cb = attr->recv_cb;
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->dest_ring = ring;
+
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->status_ring = ring;
+ }
+
+ return 0;
+}
+
+void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id)
+{
+ struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+
+ if (pipe->send_cb)
+ pipe->send_cb(pipe);
+
+ if (pipe->recv_cb)
+ ath12k_ce_recv_process_cb(pipe);
+}
+
+void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id)
+{
+ struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+
+ if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
+ pipe->send_cb(pipe);
+}
+
+int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id)
+{
+ struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ struct hal_ce_srng_src_desc *desc;
+ struct hal_srng *srng;
+ unsigned int write_index, sw_index;
+ unsigned int nentries_mask;
+ int ret = 0;
+ u8 byte_swap_data = 0;
+ int num_used;
+
+ /* Check if some entries could be regained by handling tx completion if
+ * the CE has interrupts disabled and the used entries is more than the
+ * defined usage threshold.
+ */
+ if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
+ spin_lock_bh(&ab->ce.ce_lock);
+ write_index = pipe->src_ring->write_index;
+
+ sw_index = pipe->src_ring->sw_index;
+
+ if (write_index >= sw_index)
+ num_used = write_index - sw_index;
+ else
+ num_used = pipe->src_ring->nentries - sw_index +
+ write_index;
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ if (num_used > ATH12K_CE_USAGE_THRESHOLD)
+ ath12k_ce_poll_send_completed(ab, pipe->pipe_num);
+ }
+
+ if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ write_index = pipe->src_ring->write_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ath12k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto unlock;
+ }
+
+ desc = ath12k_hal_srng_src_get_next_reaped(ab, srng);
+ if (!desc) {
+ ath12k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto unlock;
+ }
+
+ if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+ byte_swap_data = 1;
+
+ ath12k_hal_ce_src_set_desc(desc, ATH12K_SKB_CB(skb)->paddr,
+ skb->len, transfer_id, byte_swap_data);
+
+ pipe->src_ring->skb[write_index] = skb;
+ pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
+ write_index);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath12k_ce_rx_pipe_cleanup(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct ath12k_ce_ring *ring = pipe->dest_ring;
+ struct sk_buff *skb;
+ int i;
+
+ if (!(ring && pipe->buf_sz))
+ return;
+
+ for (i = 0; i < ring->nentries; i++) {
+ skb = ring->skb[i];
+ if (!skb)
+ continue;
+
+ ring->skb[i] = NULL;
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+void ath12k_ce_cleanup_pipes(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < ab->hw_params->ce_count; pipe_num++) {
+ pipe = &ab->ce.ce_pipe[pipe_num];
+ ath12k_ce_rx_pipe_cleanup(pipe);
+
+ /* Cleanup any src CE's which have interrupts disabled */
+ ath12k_ce_poll_send_completed(ab, pipe_num);
+
+ /* NOTE: Should we also clean up tx buffer in all pipes? */
+ }
+}
+
+void ath12k_ce_rx_post_buf(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+ ret = ath12k_ce_rx_post_pipe(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ continue;
+
+ ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ i, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
+
+ return;
+ }
+ }
+}
+
+void ath12k_ce_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath12k_base *ab = from_timer(ab, t, rx_replenish_retry);
+
+ ath12k_ce_rx_post_buf(ab);
+}
+
+static void ath12k_ce_shadow_config(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ab->hw_params->host_ce_config[i].src_nentries)
+ ath12k_hal_srng_update_shadow_config(ab, HAL_CE_SRC, i);
+
+ if (ab->hw_params->host_ce_config[i].dest_nentries) {
+ ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST, i);
+ ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST_STATUS, i);
+ }
+ }
+}
+
+void ath12k_ce_get_shadow_config(struct ath12k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len)
+{
+ if (!ab->hw_params->supports_shadow_regs)
+ return;
+
+ ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+
+ /* shadow is already configured */
+ if (*shadow_cfg_len)
+ return;
+
+ /* shadow isn't configured yet, configure now.
+ * non-CE srngs are configured firstly, then
+ * all CE srngs.
+ */
+ ath12k_hal_srng_shadow_config(ab);
+ ath12k_ce_shadow_config(ab);
+
+ /* get the shadow configuration */
+ ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+}
+
+int ath12k_ce_init_pipes(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
+ &ab->qmi.ce_cfg.shadow_reg_v3_len);
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ ret = ath12k_ce_init_ring(ab, pipe->src_ring, i,
+ HAL_CE_SRC);
+ if (ret) {
+ ath12k_warn(ab, "failed to init src ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->src_ring->write_index = 0;
+ pipe->src_ring->sw_index = 0;
+ }
+
+ if (pipe->dest_ring) {
+ ret = ath12k_ce_init_ring(ab, pipe->dest_ring, i,
+ HAL_CE_DST);
+ if (ret) {
+ ath12k_warn(ab, "failed to init dest ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->rx_buf_needed = pipe->dest_ring->nentries ?
+ pipe->dest_ring->nentries - 2 : 0;
+
+ pipe->dest_ring->write_index = 0;
+ pipe->dest_ring->sw_index = 0;
+ }
+
+ if (pipe->status_ring) {
+ ret = ath12k_ce_init_ring(ab, pipe->status_ring, i,
+ HAL_CE_DST_STATUS);
+ if (ret) {
+ ath12k_warn(ab, "failed to init dest status ing: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->status_ring->write_index = 0;
+ pipe->status_ring->sw_index = 0;
+ }
+ }
+
+ return 0;
+}
+
+void ath12k_ce_free_pipes(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int desc_sz;
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ dma_free_coherent(ab->dev,
+ pipe->src_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->src_ring->base_addr_owner_space,
+ pipe->src_ring->base_addr_ce_space);
+ kfree(pipe->src_ring);
+ pipe->src_ring = NULL;
+ }
+
+ if (pipe->dest_ring) {
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ dma_free_coherent(ab->dev,
+ pipe->dest_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->dest_ring->base_addr_owner_space,
+ pipe->dest_ring->base_addr_ce_space);
+ kfree(pipe->dest_ring);
+ pipe->dest_ring = NULL;
+ }
+
+ if (pipe->status_ring) {
+ desc_sz =
+ ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ dma_free_coherent(ab->dev,
+ pipe->status_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->status_ring->base_addr_owner_space,
+ pipe->status_ring->base_addr_ce_space);
+ kfree(pipe->status_ring);
+ pipe->status_ring = NULL;
+ }
+ }
+}
+
+int ath12k_ce_alloc_pipes(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int i;
+ int ret;
+ const struct ce_attr *attr;
+
+ spin_lock_init(&ab->ce.ce_lock);
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ attr = &ab->hw_params->host_ce_config[i];
+ pipe = &ab->ce.ce_pipe[i];
+ pipe->pipe_num = i;
+ pipe->ab = ab;
+ pipe->buf_sz = attr->src_sz_max;
+
+ ret = ath12k_ce_alloc_pipe(ab, i);
+ if (ret) {
+ /* Free any parial successful allocation */
+ ath12k_ce_free_pipes(ab);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id)
+{
+ if (ce_id >= ab->hw_params->ce_count)
+ return -EINVAL;
+
+ return ab->hw_params->host_ce_config[ce_id].flags;
+}
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
new file mode 100644
index 000000000000..17cf16235e0b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_CE_H
+#define ATH12K_CE_H
+
+#define CE_COUNT_MAX 16
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/* Threshold to poll for tx completion in case of Interrupt disabled CE's */
+#define ATH12K_CE_USAGE_THRESHOLD 32
+
+/* Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
+
+/* CE address/mask */
+#define CE_HOST_IE_ADDRESS 0x00A1803C
+#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
+
+#define CE_HOST_IE_3_SHIFT 0xC
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define ATH12K_CE_RX_POST_RETRY_JIFFIES 50
+
+struct ath12k_base;
+
+/* Establish a mapping between a service/direction and a pipe.
+ * Configuration information for a Copy Engine pipe and services.
+ * Passed from Host to Target through QMI message and must be in
+ * little endian format.
+ */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+/* Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target through QMI message during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /* Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb);
+};
+
+#define CE_DESC_RING_ALIGN 8
+
+struct ath12k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /* For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ u32 base_addr_ce_space_unaligned;
+
+ /* Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ u32 base_addr_ce_space;
+
+ /* HAL ring id */
+ u32 hal_ring_id;
+
+ /* keep last */
+ struct sk_buff *skb[];
+};
+
+struct ath12k_ce_pipe {
+ struct ath12k_base *ab;
+ u16 pipe_num;
+ unsigned int attr_flags;
+ unsigned int buf_sz;
+ unsigned int rx_buf_needed;
+
+ void (*send_cb)(struct ath12k_ce_pipe *pipe);
+ void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb);
+
+ struct tasklet_struct intr_tq;
+ struct ath12k_ce_ring *src_ring;
+ struct ath12k_ce_ring *dest_ring;
+ struct ath12k_ce_ring *status_ring;
+ u64 timestamp;
+};
+
+struct ath12k_ce {
+ struct ath12k_ce_pipe ce_pipe[CE_COUNT_MAX];
+ /* Protects rings of all ce pipes */
+ spinlock_t ce_lock;
+ struct ath12k_hp_update_timer hp_timer[CE_COUNT_MAX];
+};
+
+extern const struct ce_attr ath12k_host_ce_config_qcn9274[];
+extern const struct ce_attr ath12k_host_ce_config_wcn7850[];
+
+void ath12k_ce_cleanup_pipes(struct ath12k_base *ab);
+void ath12k_ce_rx_replenish_retry(struct timer_list *t);
+void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id);
+int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id);
+void ath12k_ce_rx_post_buf(struct ath12k_base *ab);
+int ath12k_ce_init_pipes(struct ath12k_base *ab);
+int ath12k_ce_alloc_pipes(struct ath12k_base *ab);
+void ath12k_ce_free_pipes(struct ath12k_base *ab);
+int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id);
+void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id);
+int ath12k_ce_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+int ath12k_ce_attr_attach(struct ath12k_base *ab);
+void ath12k_ce_get_shadow_config(struct ath12k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
new file mode 100644
index 000000000000..a89e66653f04
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -0,0 +1,939 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/remoteproc.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+
+unsigned int ath12k_debug_mask;
+module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+int ath12k_core_suspend(struct ath12k_base *ab)
+{
+ int ret;
+
+ if (!ab->hw_params->supports_suspend)
+ return -EOPNOTSUPP;
+
+ /* TODO: there can frames in queues so for now add delay as a hack.
+ * Need to implement to handle and remove this delay.
+ */
+ msleep(500);
+
+ ret = ath12k_dp_rx_pktlog_stop(ab, true);
+ if (ret) {
+ ath12k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath12k_dp_rx_pktlog_stop(ab, false);
+ if (ret) {
+ ath12k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath12k_hif_irq_disable(ab);
+ ath12k_hif_ce_irq_disable(ab);
+
+ ret = ath12k_hif_suspend(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to suspend hif: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_core_resume(struct ath12k_base *ab)
+{
+ int ret;
+
+ if (!ab->hw_params->supports_suspend)
+ return -EOPNOTSUPP;
+
+ ret = ath12k_hif_resume(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to resume hif during resume: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_hif_ce_irq_enable(ab);
+ ath12k_hif_irq_enable(ab);
+
+ ret = ath12k_dp_rx_pktlog_start(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to start rx pktlog during resume: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
+ char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+
+ if (ab->qmi.target.bdf_ext[0] != '\0')
+ scnprintf(variant, sizeof(variant), ",variant=%s",
+ ab->qmi.target.bdf_ext);
+
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id, variant);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
+
+ return 0;
+}
+
+const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
+ const char *file)
+{
+ const struct firmware *fw;
+ char path[100];
+ int ret;
+
+ if (!file)
+ return ERR_PTR(-ENOENT);
+
+ ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
+
+ ret = firmware_request_nowarn(&fw, path, ab->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
+ path, fw->size);
+
+ return fw;
+}
+
+void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+ if (!IS_ERR(bd->fw))
+ release_firmware(bd->fw);
+
+ memset(bd, 0, sizeof(*bd));
+}
+
+static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
+ struct ath12k_board_data *bd,
+ const void *buf, size_t buf_len,
+ const char *boardname,
+ int bd_ie_type)
+{
+ const struct ath12k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH12K_BD_IE_BOARD_ elements */
+ while (buf_len > sizeof(struct ath12k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n",
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (board_ie_id) {
+ case ATH12K_BD_IE_BOARD_NAME:
+ ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ break;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ break;
+
+ name_match_found = true;
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot found match for name '%s'",
+ boardname);
+ break;
+ case ATH12K_BD_IE_BOARD_DATA:
+ if (!name_match_found)
+ /* no match found */
+ break;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot found board data for '%s'", boardname);
+
+ bd->data = board_ie_data;
+ bd->len = board_ie_len;
+
+ ret = 0;
+ goto out;
+ default:
+ ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n",
+ board_ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
+ struct ath12k_board_data *bd,
+ const char *boardname)
+{
+ size_t len, magic_len;
+ const u8 *data;
+ char *filename, filepath[100];
+ size_t ie_len;
+ struct ath12k_fw_ie *hdr;
+ int ret, ie_id;
+
+ filename = ATH12K_BOARD_API2_FILE;
+
+ if (!bd->fw)
+ bd->fw = ath12k_core_firmware_request(ab, filename);
+
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ data = bd->fw->data;
+ len = bd->fw->size;
+
+ ath12k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
+ filepath, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
+ ath12k_err(ab, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
+ filepath, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ while (len > sizeof(struct ath12k_fw_ie)) {
+ hdr = (struct ath12k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH12K_BD_IE_BOARD:
+ ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
+ ie_len,
+ boardname,
+ ATH12K_BD_IE_BOARD);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+ else if (ret)
+ /* there was an error, bail out */
+ goto err;
+ /* either found or error, so stop searching */
+ goto out;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ if (!bd->data || !bd->len) {
+ ath12k_err(ab,
+ "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ ret = -ENODATA;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath12k_core_free_bdf(ab, bd);
+ return ret;
+}
+
+int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
+ struct ath12k_board_data *bd,
+ char *filename)
+{
+ bd->fw = ath12k_core_firmware_request(ab, filename);
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ bd->data = bd->fw->data;
+ bd->len = bd->fw->size;
+
+ return 0;
+}
+
+#define BOARD_NAME_SIZE 100
+int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_err(ab, "failed to create board name: %d", ret);
+ return ret;
+ }
+
+ ab->bd_api = 2;
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname);
+ if (!ret)
+ goto success;
+
+ ab->bd_api = 1;
+ ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
+ if (ret) {
+ ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ab->hw_params->fw.dir);
+ return ret;
+ }
+
+success:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", ab->bd_api);
+ return 0;
+}
+
+static void ath12k_core_stop(struct ath12k_base *ab)
+{
+ if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath12k_qmi_firmware_stop(ab);
+
+ ath12k_hif_stop(ab);
+ ath12k_wmi_detach(ab);
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
+
+ /* De-Init of components as needed */
+}
+
+static int ath12k_core_soc_create(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_qmi_init_service(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_hif_power_up(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to power up :%d\n", ret);
+ goto err_qmi_deinit;
+ }
+
+ return 0;
+
+err_qmi_deinit:
+ ath12k_qmi_deinit_service(ab);
+ return ret;
+}
+
+static void ath12k_core_soc_destroy(struct ath12k_base *ab)
+{
+ ath12k_dp_free(ab);
+ ath12k_reg_free(ab);
+ ath12k_qmi_deinit_service(ab);
+}
+
+static int ath12k_core_pdev_create(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_mac_register(ab);
+ if (ret) {
+ ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_dp_pdev_alloc(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
+ goto err_mac_unregister;
+ }
+
+ return 0;
+
+err_mac_unregister:
+ ath12k_mac_unregister(ab);
+
+ return ret;
+}
+
+static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
+{
+ ath12k_mac_unregister(ab);
+ ath12k_hif_irq_disable(ab);
+ ath12k_dp_pdev_free(ab);
+}
+
+static int ath12k_core_start(struct ath12k_base *ab,
+ enum ath12k_firmware_mode mode)
+{
+ int ret;
+
+ ret = ath12k_wmi_attach(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to attach wmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_htc_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init htc: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath12k_hif_start(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to start HIF: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath12k_htc_wait_target(&ab->htc);
+ if (ret) {
+ ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_dp_htt_connect(&ab->dp);
+ if (ret) {
+ ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_wmi_connect(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to connect wmi: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_htc_start(&ab->htc);
+ if (ret) {
+ ath12k_err(ab, "failed to start HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_wmi_wait_for_service_ready(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_mac_allocate(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ath12k_dp_cc_config(ab);
+
+ ath12k_dp_pdev_pre_alloc(ab);
+
+ ret = ath12k_dp_rx_pdev_reo_setup(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
+ goto err_mac_destroy;
+ }
+
+ ret = ath12k_wmi_cmd_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath12k_wmi_wait_for_unified_ready(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ /* put hardware to DBS mode */
+ if (ab->hw_params->single_pdev_only) {
+ ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
+ if (ret) {
+ ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
+ goto err_reo_cleanup;
+ }
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to send htt version request message: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
+err_mac_destroy:
+ ath12k_mac_destroy(ab);
+err_hif_stop:
+ ath12k_hif_stop(ab);
+err_wmi_detach:
+ ath12k_wmi_detach(ab);
+ return ret;
+}
+
+static int ath12k_core_start_firmware(struct ath12k_base *ab,
+ enum ath12k_firmware_mode mode)
+{
+ int ret;
+
+ ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
+ &ab->qmi.ce_cfg.shadow_reg_v3_len);
+
+ ret = ath12k_qmi_firmware_start(ab, mode);
+ if (ret) {
+ ath12k_err(ab, "failed to send firmware start: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath12k_err(ab, "failed to start firmware: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_ce_init_pipes(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to initialize CE: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ ret = ath12k_dp_alloc(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init DP: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ mutex_lock(&ab->core_lock);
+ ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath12k_err(ab, "failed to start core: %d\n", ret);
+ goto err_dp_free;
+ }
+
+ ret = ath12k_core_pdev_create(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create pdev core: %d\n", ret);
+ goto err_core_stop;
+ }
+ ath12k_hif_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
+
+ return 0;
+
+err_core_stop:
+ ath12k_core_stop(ab);
+ ath12k_mac_destroy(ab);
+err_dp_free:
+ ath12k_dp_free(ab);
+ mutex_unlock(&ab->core_lock);
+err_firmware_stop:
+ ath12k_qmi_firmware_stop(ab);
+
+ return ret;
+}
+
+static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_hif_irq_disable(ab);
+ ath12k_dp_pdev_free(ab);
+ ath12k_hif_stop(ab);
+ ath12k_wmi_detach(ab);
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
+ mutex_unlock(&ab->core_lock);
+
+ ath12k_dp_free(ab);
+ ath12k_hal_srng_deinit(ab);
+
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ ret = ath12k_hal_srng_init(ab);
+ if (ret)
+ return ret;
+
+ clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
+ ret = ath12k_core_qmi_firmware_ready(ab);
+ if (ret)
+ goto err_hal_srng_deinit;
+
+ clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
+
+ return 0;
+
+err_hal_srng_deinit:
+ ath12k_hal_srng_deinit(ab);
+ return ret;
+}
+
+void ath12k_core_halt(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ar->num_created_vdevs = 0;
+ ar->allocated_vdev_map = 0;
+
+ ath12k_mac_scan_finish(ar);
+ ath12k_mac_peer_cleanup_all(ar);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+ synchronize_rcu();
+ INIT_LIST_HEAD(&ar->arvifs);
+ idr_init(&ar->txmgmt_idr);
+}
+
+static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ spin_lock_bh(&ab->base_lock);
+ ab->stats.fw_crash_counter++;
+ spin_unlock_bh(&ab->base_lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH12K_STATE_OFF)
+ continue;
+
+ ieee80211_stop_queues(ar->hw);
+ ath12k_mac_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->peer_delete_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
+ complete(&ar->bss_survey_done);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath12k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ }
+
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+ wake_up(&ab->peer_mapping_wq);
+}
+
+static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH12K_STATE_OFF)
+ continue;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH12K_STATE_ON:
+ ar->state = ATH12K_STATE_RESTARTING;
+ ath12k_core_halt(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH12K_STATE_OFF:
+ ath12k_warn(ab,
+ "cannot restart radio %d that hasn't been started\n",
+ i);
+ break;
+ case ATH12K_STATE_RESTARTING:
+ break;
+ case ATH12K_STATE_RESTARTED:
+ ar->state = ATH12K_STATE_WEDGED;
+ fallthrough;
+ case ATH12K_STATE_WEDGED:
+ ath12k_warn(ab,
+ "device is wedged, will not restart radio %d\n", i);
+ break;
+ }
+ mutex_unlock(&ar->conf_mutex);
+ }
+ complete(&ab->driver_recovery);
+}
+
+static void ath12k_core_restart(struct work_struct *work)
+{
+ struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
+ int ret;
+
+ if (!ab->is_reset)
+ ath12k_core_pre_reconfigure_recovery(ab);
+
+ ret = ath12k_core_reconfigure_on_crash(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
+ return;
+ }
+
+ if (ab->is_reset)
+ complete_all(&ab->reconfigure_complete);
+
+ if (!ab->is_reset)
+ ath12k_core_post_reconfigure_recovery(ab);
+}
+
+static void ath12k_core_reset(struct work_struct *work)
+{
+ struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
+ int reset_count, fail_cont_count;
+ long time_left;
+
+ if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
+ ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
+ return;
+ }
+
+ /* Sometimes the recovery will fail and then the next all recovery fail,
+ * this is to avoid infinite recovery since it can not recovery success
+ */
+ fail_cont_count = atomic_read(&ab->fail_cont_count);
+
+ if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
+ return;
+
+ if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
+ time_before(jiffies, ab->reset_fail_timeout))
+ return;
+
+ reset_count = atomic_inc_return(&ab->reset_count);
+
+ if (reset_count > 1) {
+ /* Sometimes it happened another reset worker before the previous one
+ * completed, then the second reset worker will destroy the previous one,
+ * thus below is to avoid that.
+ */
+ ath12k_warn(ab, "already resetting count %d\n", reset_count);
+
+ reinit_completion(&ab->reset_complete);
+ time_left = wait_for_completion_timeout(&ab->reset_complete,
+ ATH12K_RESET_TIMEOUT_HZ);
+ if (time_left) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
+ atomic_dec(&ab->reset_count);
+ return;
+ }
+
+ ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
+ /* Record the continuous recovery fail count when recovery failed*/
+ fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
+
+ ab->is_reset = true;
+ atomic_set(&ab->recovery_count, 0);
+
+ ath12k_core_pre_reconfigure_recovery(ab);
+
+ reinit_completion(&ab->reconfigure_complete);
+ ath12k_core_post_reconfigure_recovery(ab);
+
+ reinit_completion(&ab->recovery_start);
+ atomic_set(&ab->recovery_start_count, 0);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
+
+ time_left = wait_for_completion_timeout(&ab->recovery_start,
+ ATH12K_RECOVER_START_TIMEOUT_HZ);
+
+ ath12k_hif_power_down(ab);
+ ath12k_hif_power_up(ab);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
+}
+
+int ath12k_core_pre_init(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_hw_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init hw params: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_core_init(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_core_soc_create(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath12k_core_deinit(struct ath12k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath12k_core_pdev_destroy(ab);
+ ath12k_core_stop(ab);
+
+ mutex_unlock(&ab->core_lock);
+
+ ath12k_hif_power_down(ab);
+ ath12k_mac_destroy(ab);
+ ath12k_core_soc_destroy(ab);
+}
+
+void ath12k_core_free(struct ath12k_base *ab)
+{
+ destroy_workqueue(ab->workqueue_aux);
+ destroy_workqueue(ab->workqueue);
+ kfree(ab);
+}
+
+struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath12k_bus bus)
+{
+ struct ath12k_base *ab;
+
+ ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
+ if (!ab)
+ return NULL;
+
+ init_completion(&ab->driver_recovery);
+
+ ab->workqueue = create_singlethread_workqueue("ath12k_wq");
+ if (!ab->workqueue)
+ goto err_sc_free;
+
+ ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
+ if (!ab->workqueue_aux)
+ goto err_free_wq;
+
+ mutex_init(&ab->core_lock);
+ spin_lock_init(&ab->base_lock);
+ init_completion(&ab->reset_complete);
+ init_completion(&ab->reconfigure_complete);
+ init_completion(&ab->recovery_start);
+
+ INIT_LIST_HEAD(&ab->peers);
+ init_waitqueue_head(&ab->peer_mapping_wq);
+ init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
+ INIT_WORK(&ab->restart_work, ath12k_core_restart);
+ INIT_WORK(&ab->reset_work, ath12k_core_reset);
+ timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
+ init_completion(&ab->htc_suspend);
+
+ ab->dev = dev;
+ ab->hif.bus = bus;
+
+ return ab;
+
+err_free_wq:
+ destroy_workqueue(ab->workqueue);
+err_sc_free:
+ kfree(ab);
+ return NULL;
+}
+
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
new file mode 100644
index 000000000000..a54ae74543c1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -0,0 +1,822 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_CORE_H
+#define ATH12K_CORE_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/bitfield.h>
+#include "qmi.h"
+#include "htc.h"
+#include "wmi.h"
+#include "hal.h"
+#include "dp.h"
+#include "ce.h"
+#include "mac.h"
+#include "hw.h"
+#include "hal_rx.h"
+#include "reg.h"
+#include "dbring.h"
+
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+
+#define ATH12K_TX_MGMT_NUM_PENDING_MAX 512
+
+#define ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
+
+/* Pending management packets threshold for dropping probe responses */
+#define ATH12K_PRB_RSP_DROP_THRESHOLD ((ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
+
+#define ATH12K_INVALID_HW_MAC_ID 0xFF
+#define ATH12K_RX_RATE_TABLE_NUM 320
+#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
+
+#define ATH12K_MON_TIMER_INTERVAL 10
+#define ATH12K_RESET_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_RESET_MAX_FAIL_COUNT_FIRST 3
+#define ATH12K_RESET_MAX_FAIL_COUNT_FINAL 5
+#define ATH12K_RESET_FAIL_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
+#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+
+enum wme_ac {
+ WME_AC_BE,
+ WME_AC_BK,
+ WME_AC_VI,
+ WME_AC_VO,
+ WME_NUM_AC
+};
+
+#define ATH12K_HT_MCS_MAX 7
+#define ATH12K_VHT_MCS_MAX 9
+#define ATH12K_HE_MCS_MAX 11
+
+enum ath12k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH12K_CRYPT_MODE_HW,
+ /* Only use software crypto */
+ ATH12K_CRYPT_MODE_SW,
+};
+
+static inline enum wme_ac ath12k_tid_to_ac(u32 tid)
+{
+ return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
+ ((tid == 1) || (tid == 2)) ? WME_AC_BK :
+ ((tid == 4) || (tid == 5)) ? WME_AC_VI :
+ WME_AC_VO);
+}
+
+enum ath12k_skb_flags {
+ ATH12K_SKB_HW_80211_ENCAP = BIT(0),
+ ATH12K_SKB_CIPHER_SET = BIT(1),
+};
+
+struct ath12k_skb_cb {
+ dma_addr_t paddr;
+ struct ath12k *ar;
+ struct ieee80211_vif *vif;
+ dma_addr_t paddr_ext_desc;
+ u32 cipher;
+ u8 flags;
+};
+
+struct ath12k_skb_rxcb {
+ dma_addr_t paddr;
+ bool is_first_msdu;
+ bool is_last_msdu;
+ bool is_continuation;
+ bool is_mcbc;
+ bool is_eapol;
+ struct hal_rx_desc *rx_desc;
+ u8 err_rel_src;
+ u8 err_code;
+ u8 mac_id;
+ u8 unmapped;
+ u8 is_frag;
+ u8 tid;
+ u16 peer_id;
+};
+
+enum ath12k_hw_rev {
+ ATH12K_HW_QCN9274_HW10,
+ ATH12K_HW_QCN9274_HW20,
+ ATH12K_HW_WCN7850_HW20
+};
+
+enum ath12k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH12K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH12K_FIRMWARE_MODE_FTM,
+};
+
+#define ATH12K_IRQ_NUM_MAX 57
+#define ATH12K_EXT_IRQ_NUM_MAX 16
+
+struct ath12k_ext_irq_grp {
+ struct ath12k_base *ab;
+ u32 irqs[ATH12K_EXT_IRQ_NUM_MAX];
+ u32 num_irq;
+ u32 grp_id;
+ u64 timestamp;
+ struct napi_struct napi;
+ struct net_device napi_ndev;
+};
+
+#define HEHANDLE_CAP_PHYINFO_SIZE 3
+#define HECAP_PHYINFO_SIZE 9
+#define HECAP_MACINFO_SIZE 5
+#define HECAP_TXRX_MCS_NSS_SIZE 2
+#define HECAP_PPET16_PPET8_MAX_SIZE 25
+
+#define HE_PPET16_PPET8_SIZE 8
+
+/* 802.11ax PPE (PPDU packet Extension) threshold */
+struct he_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_mask;
+ u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE];
+};
+
+struct ath12k_he {
+ u8 hecap_macinfo[HECAP_MACINFO_SIZE];
+ u32 hecap_rxmcsnssmap;
+ u32 hecap_txmcsnssmap;
+ u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE];
+ struct he_ppe_threshold hecap_ppet;
+ u32 heop_param;
+};
+
+#define MAX_RADIOS 3
+
+enum {
+ WMI_HOST_TP_SCALE_MAX = 0,
+ WMI_HOST_TP_SCALE_50 = 1,
+ WMI_HOST_TP_SCALE_25 = 2,
+ WMI_HOST_TP_SCALE_12 = 3,
+ WMI_HOST_TP_SCALE_MIN = 4,
+ WMI_HOST_TP_SCALE_SIZE = 5,
+};
+
+enum ath12k_scan_state {
+ ATH12K_SCAN_IDLE,
+ ATH12K_SCAN_STARTING,
+ ATH12K_SCAN_RUNNING,
+ ATH12K_SCAN_ABORTING,
+};
+
+enum ath12k_dev_flags {
+ ATH12K_CAC_RUNNING,
+ ATH12K_FLAG_CRASH_FLUSH,
+ ATH12K_FLAG_RAW_MODE,
+ ATH12K_FLAG_HW_CRYPTO_DISABLED,
+ ATH12K_FLAG_RECOVERY,
+ ATH12K_FLAG_UNREGISTERING,
+ ATH12K_FLAG_REGISTERED,
+ ATH12K_FLAG_QMI_FAIL,
+ ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
+};
+
+enum ath12k_monitor_flags {
+ ATH12K_FLAG_MONITOR_ENABLED,
+};
+
+struct ath12k_vif {
+ u32 vdev_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u16 ast_hash;
+ u16 ast_idx;
+ u16 tcl_metadata;
+ u8 hal_addr_search_flags;
+ u8 search_type;
+
+ struct ath12k *ar;
+ struct ieee80211_vif *vif;
+
+ int bank_id;
+ u8 vdev_id_check_en;
+
+ struct wmi_wmm_params_all_arg wmm_params;
+ struct list_head list;
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 127 stations; wmi limit */
+ u8 tim_bitmap[16];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ bool is_started;
+ bool is_up;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_bitrate_mask bitrate_mask;
+ int num_legacy_stations;
+ int rtscts_prot_mode;
+ int txpower;
+ bool rsnie_present;
+ bool wpaie_present;
+ struct ieee80211_chanctx_conf chanctx;
+ u32 key_cipher;
+ u8 tx_encap_type;
+ u8 vdev_stats_id;
+};
+
+struct ath12k_vif_iter {
+ u32 vdev_id;
+ struct ath12k_vif *arvif;
+};
+
+#define HAL_AST_IDX_INVALID 0xFFFF
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_MCS_HT 31
+#define HAL_RX_MAX_MCS_VHT 9
+#define HAL_RX_MAX_MCS_HE 11
+#define HAL_RX_MAX_NSS 8
+#define HAL_RX_MAX_NUM_LEGACY_RATES 12
+#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
+#define ATH12K_RX_RATE_TABLE_NUM 320
+
+struct ath12k_rx_peer_rate_stats {
+ u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
+ u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
+ u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
+ u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM];
+};
+
+struct ath12k_rx_peer_stats {
+ u64 num_msdu;
+ u64 num_mpdu_fcs_ok;
+ u64 num_mpdu_fcs_err;
+ u64 tcp_msdu_count;
+ u64 udp_msdu_count;
+ u64 other_msdu_count;
+ u64 ampdu_msdu_count;
+ u64 non_ampdu_msdu_count;
+ u64 stbc_count;
+ u64 beamformed_count;
+ u64 mcs_count[HAL_RX_MAX_MCS + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
+ u64 tid_count[IEEE80211_NUM_TIDS + 1];
+ u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
+ u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
+ u64 rx_duration;
+ u64 dcm_count;
+ u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX];
+ struct ath12k_rx_peer_rate_stats pkt_stats;
+ struct ath12k_rx_peer_rate_stats byte_stats;
+};
+
+#define ATH12K_HE_MCS_NUM 12
+#define ATH12K_VHT_MCS_NUM 10
+#define ATH12K_BW_NUM 5
+#define ATH12K_NSS_NUM 4
+#define ATH12K_LEGACY_NUM 12
+#define ATH12K_GI_NUM 4
+#define ATH12K_HT_MCS_NUM 32
+
+enum ath12k_pkt_rx_err {
+ ATH12K_PKT_RX_ERR_FCS,
+ ATH12K_PKT_RX_ERR_TKIP,
+ ATH12K_PKT_RX_ERR_CRYPT,
+ ATH12K_PKT_RX_ERR_PEER_IDX_INVAL,
+ ATH12K_PKT_RX_ERR_MAX,
+};
+
+enum ath12k_ampdu_subfrm_num {
+ ATH12K_AMPDU_SUBFRM_NUM_10,
+ ATH12K_AMPDU_SUBFRM_NUM_20,
+ ATH12K_AMPDU_SUBFRM_NUM_30,
+ ATH12K_AMPDU_SUBFRM_NUM_40,
+ ATH12K_AMPDU_SUBFRM_NUM_50,
+ ATH12K_AMPDU_SUBFRM_NUM_60,
+ ATH12K_AMPDU_SUBFRM_NUM_MORE,
+ ATH12K_AMPDU_SUBFRM_NUM_MAX,
+};
+
+enum ath12k_amsdu_subfrm_num {
+ ATH12K_AMSDU_SUBFRM_NUM_1,
+ ATH12K_AMSDU_SUBFRM_NUM_2,
+ ATH12K_AMSDU_SUBFRM_NUM_3,
+ ATH12K_AMSDU_SUBFRM_NUM_4,
+ ATH12K_AMSDU_SUBFRM_NUM_MORE,
+ ATH12K_AMSDU_SUBFRM_NUM_MAX,
+};
+
+enum ath12k_counter_type {
+ ATH12K_COUNTER_TYPE_BYTES,
+ ATH12K_COUNTER_TYPE_PKTS,
+ ATH12K_COUNTER_TYPE_MAX,
+};
+
+enum ath12k_stats_type {
+ ATH12K_STATS_TYPE_SUCC,
+ ATH12K_STATS_TYPE_FAIL,
+ ATH12K_STATS_TYPE_RETRY,
+ ATH12K_STATS_TYPE_AMPDU,
+ ATH12K_STATS_TYPE_MAX,
+};
+
+struct ath12k_htt_data_stats {
+ u64 legacy[ATH12K_COUNTER_TYPE_MAX][ATH12K_LEGACY_NUM];
+ u64 ht[ATH12K_COUNTER_TYPE_MAX][ATH12K_HT_MCS_NUM];
+ u64 vht[ATH12K_COUNTER_TYPE_MAX][ATH12K_VHT_MCS_NUM];
+ u64 he[ATH12K_COUNTER_TYPE_MAX][ATH12K_HE_MCS_NUM];
+ u64 bw[ATH12K_COUNTER_TYPE_MAX][ATH12K_BW_NUM];
+ u64 nss[ATH12K_COUNTER_TYPE_MAX][ATH12K_NSS_NUM];
+ u64 gi[ATH12K_COUNTER_TYPE_MAX][ATH12K_GI_NUM];
+ u64 transmit_type[ATH12K_COUNTER_TYPE_MAX][HAL_RX_RECEPTION_TYPE_MAX];
+ u64 ru_loc[ATH12K_COUNTER_TYPE_MAX][HAL_RX_RU_ALLOC_TYPE_MAX];
+};
+
+struct ath12k_htt_tx_stats {
+ struct ath12k_htt_data_stats stats[ATH12K_STATS_TYPE_MAX];
+ u64 tx_duration;
+ u64 ba_fails;
+ u64 ack_fails;
+ u16 ru_start;
+ u16 ru_tones;
+ u32 mu_group[MAX_MU_GROUP_ID];
+};
+
+struct ath12k_per_ppdu_tx_stats {
+ u16 succ_pkts;
+ u16 failed_pkts;
+ u16 retry_pkts;
+ u32 succ_bytes;
+ u32 failed_bytes;
+ u32 retry_bytes;
+};
+
+struct ath12k_wbm_tx_stats {
+ u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
+};
+
+struct ath12k_sta {
+ struct ath12k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+ enum hal_pn_type pn_type;
+
+ struct work_struct update_wk;
+ struct rate_info txrate;
+ struct rate_info last_txrate;
+ u64 rx_duration;
+ u64 tx_duration;
+ u8 rssi_comb;
+ struct ath12k_rx_peer_stats *rx_stats;
+ struct ath12k_wbm_tx_stats *wbm_tx_stats;
+};
+
+#define ATH12K_MIN_5G_FREQ 4150
+#define ATH12K_MIN_6G_FREQ 5945
+#define ATH12K_MAX_6G_FREQ 7115
+#define ATH12K_NUM_CHANS 100
+#define ATH12K_MAX_5G_CHAN 173
+
+enum ath12k_state {
+ ATH12K_STATE_OFF,
+ ATH12K_STATE_ON,
+ ATH12K_STATE_RESTARTING,
+ ATH12K_STATE_RESTARTED,
+ ATH12K_STATE_WEDGED,
+ /* Add other states as required */
+};
+
+/* Antenna noise floor */
+#define ATH12K_DEFAULT_NOISE_FLOOR -95
+
+struct ath12k_fw_stats {
+ u32 pdev_id;
+ u32 stats_id;
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head bcn;
+};
+
+struct ath12k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u32 duration;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u16 ru_start;
+ u16 ru_tones;
+ u8 ba_fails;
+ u8 ppdu_type;
+ u32 mu_grpid;
+ u32 mu_pos;
+ bool is_ampdu;
+};
+
+#define ATH12K_FLUSH_TIMEOUT (5 * HZ)
+#define ATH12K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
+
+struct ath12k {
+ struct ath12k_base *ab;
+ struct ath12k_pdev *pdev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct ath12k_wmi_pdev *wmi;
+ struct ath12k_pdev_dp dp;
+ u8 mac_addr[ETH_ALEN];
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ struct ath12k_he ar_he;
+ enum ath12k_state state;
+ bool supports_6ghz;
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath12k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ bool roc_notify;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+ struct ieee80211_sband_iftype_data
+ iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
+ } mac;
+
+ unsigned long dev_flags;
+ unsigned int filter_flags;
+ unsigned long monitor_flags;
+ u32 min_tx_power;
+ u32 max_tx_power;
+ u32 txpower_limit_2g;
+ u32 txpower_limit_5g;
+ u32 txpower_scale;
+ u32 power_scale;
+ u32 chan_tx_pwr;
+ u32 num_stations;
+ u32 max_num_stations;
+ bool monitor_present;
+ /* To synchronize concurrent synchronous mac80211 callback operations,
+ * concurrent debugfs configuration and concurrent FW statistics events.
+ */
+ struct mutex conf_mutex;
+ /* protects the radio specific data like debug stats, ppdu_stats_info stats,
+ * vdev_stop_status info, scan data, ath12k_sta info, ath12k_vif info,
+ * channel context data, survey info, test mode data.
+ */
+ spinlock_t data_lock;
+
+ struct list_head arvifs;
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+ u8 num_rx_chains;
+ u8 num_tx_chains;
+ /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
+ u8 pdev_idx;
+ u8 lmac_id;
+
+ struct completion peer_assoc_done;
+ struct completion peer_delete_done;
+
+ int install_key_status;
+ struct completion install_key_done;
+
+ int last_wmi_vdev_start_status;
+ struct completion vdev_setup_done;
+ struct completion vdev_delete_done;
+
+ int num_peers;
+ int max_num_peers;
+ u32 num_started_vdevs;
+ u32 num_created_vdevs;
+ unsigned long long allocated_vdev_map;
+
+ struct idr txmgmt_idr;
+ /* protects txmgmt_idr data */
+ spinlock_t txmgmt_idr_lock;
+ atomic_t num_pending_mgmt_tx;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock
+ */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+
+ /* Channel info events are expected to come in pairs without and with
+ * COMPLETE flag set respectively for each channel visit during scan.
+ *
+ * However there are deviations from this rule. This flag is used to
+ * avoid reporting garbage data.
+ */
+ bool ch_info_can_report_survey;
+ struct survey_info survey[ATH12K_NUM_CHANS];
+ struct completion bss_survey_done;
+
+ struct work_struct regd_update_work;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ struct ath12k_per_peer_tx_stats peer_tx_stats;
+ struct list_head ppdu_stats_info;
+ u32 ppdu_stat_list_depth;
+
+ struct ath12k_per_peer_tx_stats cached_stats;
+ u32 last_ppdu_id;
+ u32 cached_ppdu_id;
+
+ bool dfs_block_radar_events;
+ bool monitor_conf_enabled;
+ bool monitor_vdev_created;
+ bool monitor_started;
+ int monitor_vdev_id;
+};
+
+struct ath12k_band_cap {
+ u32 phy_id;
+ u32 max_bw_supported;
+ u32 ht_cap_info;
+ u32 he_cap_info[2];
+ u32 he_mcs;
+ u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
+ struct ath12k_wmi_ppe_threshold_arg he_ppet;
+ u16 he_6ghz_capa;
+};
+
+struct ath12k_pdev_cap {
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 vht_cap;
+ u32 vht_mcs;
+ u32 he_mcs;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 tx_chain_mask_shift;
+ u32 rx_chain_mask_shift;
+ struct ath12k_band_cap band[NUM_NL80211_BANDS];
+};
+
+struct mlo_timestamp {
+ u32 info;
+ u32 sync_timestamp_lo_us;
+ u32 sync_timestamp_hi_us;
+ u32 mlo_offset_lo;
+ u32 mlo_offset_hi;
+ u32 mlo_offset_clks;
+ u32 mlo_comp_clks;
+ u32 mlo_comp_timer;
+};
+
+struct ath12k_pdev {
+ struct ath12k *ar;
+ u32 pdev_id;
+ struct ath12k_pdev_cap cap;
+ u8 mac_addr[ETH_ALEN];
+ struct mlo_timestamp timestamp;
+};
+
+struct ath12k_board_data {
+ const struct firmware *fw;
+ const void *data;
+ size_t len;
+};
+
+struct ath12k_soc_dp_tx_err_stats {
+ /* TCL Ring Descriptor unavailable */
+ u32 desc_na[DP_TCL_NUM_RING_MAX];
+ /* Other failures during dp_tx due to mem allocation failure
+ * idr unavailable etc.
+ */
+ atomic_t misc_fail;
+};
+
+struct ath12k_soc_dp_stats {
+ u32 err_ring_pkts;
+ u32 invalid_rbm;
+ u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
+ u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
+ u32 hal_reo_error[DP_REO_DST_RING_MAX];
+ struct ath12k_soc_dp_tx_err_stats tx_err;
+};
+
+/* Master structure to hold the hw data which may be used in core module */
+struct ath12k_base {
+ enum ath12k_hw_rev hw_rev;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct ath12k_qmi qmi;
+ struct ath12k_wmi_base wmi_ab;
+ struct completion fw_ready;
+ int num_radios;
+ /* HW channel counters frequency value in hertz common to all MACs */
+ u32 cc_freq_hz;
+
+ struct ath12k_htc htc;
+
+ struct ath12k_dp dp;
+
+ void __iomem *mem;
+ unsigned long mem_len;
+
+ struct {
+ enum ath12k_bus bus;
+ const struct ath12k_hif_ops *ops;
+ } hif;
+
+ struct ath12k_ce ce;
+ struct timer_list rx_replenish_retry;
+ struct ath12k_hal hal;
+ /* To synchronize core_start/core_stop */
+ struct mutex core_lock;
+ /* Protects data like peers */
+ spinlock_t base_lock;
+ struct ath12k_pdev pdevs[MAX_RADIOS];
+ struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
+ unsigned long long free_vdev_map;
+ unsigned long long free_vdev_stats_id_map;
+ struct list_head peers;
+ wait_queue_head_t peer_mapping_wq;
+ u8 mac_addr[ETH_ALEN];
+ bool wmi_ready;
+ u32 wlan_init_status;
+ int irq_num[ATH12K_IRQ_NUM_MAX];
+ struct ath12k_ext_irq_grp ext_irq_grp[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ struct napi_struct *napi;
+ struct ath12k_wmi_target_cap_arg target_caps;
+ u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
+ bool pdevs_macaddr_valid;
+ int bd_api;
+
+ const struct ath12k_hw_params *hw_params;
+
+ const struct firmware *cal_file;
+
+ /* Below regd's are protected by ab->data_lock */
+ /* This is the regd set for every radio
+ * by the firmware during initializatin
+ */
+ struct ieee80211_regdomain *default_regd[MAX_RADIOS];
+ /* This regd is set during dynamic country setting
+ * This may or may not be used during the runtime
+ */
+ struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+
+ /* Current DFS Regulatory */
+ enum ath12k_dfs_region dfs_region;
+ struct ath12k_soc_dp_stats soc_stats;
+
+ unsigned long dev_flags;
+ struct completion driver_recovery;
+ struct workqueue_struct *workqueue;
+ struct work_struct restart_work;
+ struct workqueue_struct *workqueue_aux;
+ struct work_struct reset_work;
+ atomic_t reset_count;
+ atomic_t recovery_count;
+ atomic_t recovery_start_count;
+ bool is_reset;
+ struct completion reset_complete;
+ struct completion reconfigure_complete;
+ struct completion recovery_start;
+ /* continuous recovery fail count */
+ atomic_t fail_cont_count;
+ unsigned long reset_fail_timeout;
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ } stats;
+ u32 pktlog_defs_checksum;
+
+ struct ath12k_dbring_cap *db_caps;
+ u32 num_db_cap;
+
+ struct timer_list mon_reap_timer;
+
+ struct completion htc_suspend;
+
+ u64 fw_soc_drop_count;
+ bool static_window_map;
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
+int ath12k_core_pre_init(struct ath12k_base *ab);
+int ath12k_core_init(struct ath12k_base *ath12k);
+void ath12k_core_deinit(struct ath12k_base *ath12k);
+struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath12k_bus bus);
+void ath12k_core_free(struct ath12k_base *ath12k);
+int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
+ struct ath12k_board_data *bd,
+ char *filename);
+int ath12k_core_fetch_bdf(struct ath12k_base *ath12k,
+ struct ath12k_board_data *bd);
+void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd);
+int ath12k_core_check_dt(struct ath12k_base *ath12k);
+
+void ath12k_core_halt(struct ath12k *ar);
+int ath12k_core_resume(struct ath12k_base *ab);
+int ath12k_core_suspend(struct ath12k_base *ab);
+
+const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
+ const char *filename);
+
+static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
+{
+ switch (state) {
+ case ATH12K_SCAN_IDLE:
+ return "idle";
+ case ATH12K_SCAN_STARTING:
+ return "starting";
+ case ATH12K_SCAN_RUNNING:
+ return "running";
+ case ATH12K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+static inline struct ath12k_skb_cb *ATH12K_SKB_CB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath12k_skb_cb) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+ return (struct ath12k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath12k_skb_rxcb *ATH12K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath12k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath12k_skb_rxcb *)skb->cb;
+}
+
+static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+ return (struct ath12k_vif *)vif->drv_priv;
+}
+
+static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab,
+ int mac_id)
+{
+ return ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar;
+}
+
+static inline void ath12k_core_create_firmware_path(struct ath12k_base *ab,
+ const char *filename,
+ void *buf, size_t buf_len)
+{
+ snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, filename);
+}
+
+static inline const char *ath12k_bus_str(enum ath12k_bus bus)
+{
+ switch (bus) {
+ case ATH12K_BUS_PCI:
+ return "pci";
+ }
+
+ return "unknown";
+}
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dbring.c b/drivers/net/wireless/ath/ath12k/dbring.c
new file mode 100644
index 000000000000..8fbf868e6f7e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dbring.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "debug.h"
+
+static int ath12k_dbring_bufs_replenish(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ struct ath12k_dbring_element *buff,
+ gfp_t gfp)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ void *ptr_aligned, *ptr_unaligned, *desc;
+ int ret;
+ int buf_id;
+ u32 cookie;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+
+ lockdep_assert_held(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ ptr_unaligned = buff->payload;
+ ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
+ paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
+ DMA_FROM_DEVICE);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret)
+ goto err;
+
+ spin_lock_bh(&ring->idr_lock);
+ buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
+ spin_unlock_bh(&ring->idr_lock);
+ if (buf_id < 0) {
+ ret = -ENOBUFS;
+ goto err_dma_unmap;
+ }
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOENT;
+ goto err_idr_remove;
+ }
+
+ buff->paddr = paddr;
+
+ cookie = u32_encode_bits(ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
+ u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ return 0;
+
+err_idr_remove:
+ spin_lock_bh(&ring->idr_lock);
+ idr_remove(&ring->bufs_idr, buf_id);
+ spin_unlock_bh(&ring->idr_lock);
+err_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, ring->buf_sz,
+ DMA_FROM_DEVICE);
+err:
+ ath12k_hal_srng_access_end(ab, srng);
+ return ret;
+}
+
+static int ath12k_dbring_fill_bufs(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ gfp_t gfp)
+{
+ struct ath12k_dbring_element *buff;
+ struct hal_srng *srng;
+ struct ath12k_base *ab = ar->ab;
+ int num_remain, req_entries, num_free;
+ u32 align;
+ int size, ret;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
+ req_entries = min(num_free, ring->bufs_max);
+ num_remain = req_entries;
+ align = ring->buf_align;
+ size = sizeof(*buff) + ring->buf_sz + align - 1;
+
+ while (num_remain > 0) {
+ buff = kzalloc(size, gfp);
+ if (!buff)
+ break;
+
+ ret = ath12k_dbring_bufs_replenish(ar, ring, buff, gfp);
+ if (ret) {
+ ath12k_warn(ab, "failed to replenish db ring num_remain %d req_ent %d\n",
+ num_remain, req_entries);
+ kfree(buff);
+ break;
+ }
+ num_remain--;
+ }
+
+ spin_unlock_bh(&srng->lock);
+
+ return num_remain;
+}
+
+int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ enum wmi_direct_buffer_module id)
+{
+ struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {0};
+ int ret;
+
+ if (id >= WMI_DIRECT_BUF_MAX)
+ return -EINVAL;
+
+ arg.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
+ arg.module_id = id;
+ arg.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
+ arg.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
+ arg.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
+ arg.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
+ arg.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
+ arg.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
+ arg.num_elems = ring->bufs_max;
+ arg.buf_size = ring->buf_sz;
+ arg.num_resp_per_event = ring->num_resp_per_event;
+ arg.event_timeout_ms = ring->event_timeout_ms;
+
+ ret = ath12k_wmi_pdev_dma_ring_cfg(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to setup db ring cfg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dbring_set_cfg(struct ath12k *ar, struct ath12k_dbring *ring,
+ u32 num_resp_per_event, u32 event_timeout_ms,
+ int (*handler)(struct ath12k *,
+ struct ath12k_dbring_data *))
+{
+ if (WARN_ON(!ring))
+ return -EINVAL;
+
+ ring->num_resp_per_event = num_resp_per_event;
+ ring->event_timeout_ms = event_timeout_ms;
+ ring->handler = handler;
+
+ return 0;
+}
+
+int ath12k_dbring_buf_setup(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ struct ath12k_dbring_cap *db_cap)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_srng *srng;
+ int ret;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+ ring->bufs_max = ring->refill_srng.size /
+ ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
+
+ ring->buf_sz = db_cap->min_buf_sz;
+ ring->buf_align = db_cap->min_buf_align;
+ ring->pdev_id = db_cap->pdev_id;
+ ring->hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
+ ring->tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath12k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
+
+ return ret;
+}
+
+int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring,
+ int ring_num, int num_entries)
+{
+ int ret;
+
+ ret = ath12k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
+ ring_num, ar->pdev_idx, num_entries);
+ if (ret < 0) {
+ ath12k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ goto err;
+ }
+
+ return 0;
+err:
+ ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+ return ret;
+}
+
+int ath12k_dbring_get_cap(struct ath12k_base *ab,
+ u8 pdev_idx,
+ enum wmi_direct_buffer_module id,
+ struct ath12k_dbring_cap *db_cap)
+{
+ int i;
+
+ if (!ab->num_db_cap || !ab->db_caps)
+ return -ENOENT;
+
+ if (id >= WMI_DIRECT_BUF_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < ab->num_db_cap; i++) {
+ if (pdev_idx == ab->db_caps[i].pdev_id &&
+ id == ab->db_caps[i].id) {
+ *db_cap = ab->db_caps[i];
+
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int ath12k_dbring_buffer_release_event(struct ath12k_base *ab,
+ struct ath12k_dbring_buf_release_event *ev)
+{
+ struct ath12k_dbring *ring = NULL;
+ struct hal_srng *srng;
+ struct ath12k *ar;
+ struct ath12k_dbring_element *buff;
+ struct ath12k_dbring_data handler_data;
+ struct ath12k_buffer_addr desc;
+ u8 *vaddr_unalign;
+ u32 num_entry, num_buff_reaped;
+ u8 pdev_idx, rbm;
+ u32 cookie;
+ int buf_id;
+ int size;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ pdev_idx = le32_to_cpu(ev->fixed.pdev_id);
+
+ if (pdev_idx >= ab->num_radios) {
+ ath12k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
+ return -EINVAL;
+ }
+
+ if (ev->fixed.num_buf_release_entry !=
+ ev->fixed.num_meta_data_entry) {
+ ath12k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
+ ev->fixed.num_buf_release_entry,
+ ev->fixed.num_meta_data_entry);
+ return -EINVAL;
+ }
+
+ ar = ab->pdevs[pdev_idx].ar;
+
+ rcu_read_lock();
+ if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
+ ret = -EINVAL;
+ goto rcu_unlock;
+ }
+
+ switch (ev->fixed.module_id) {
+ case WMI_DIRECT_BUF_SPECTRAL:
+ break;
+ default:
+ ring = NULL;
+ ath12k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
+ ev->fixed.module_id);
+ break;
+ }
+
+ if (!ring) {
+ ret = -EINVAL;
+ goto rcu_unlock;
+ }
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+ num_entry = le32_to_cpu(ev->fixed.num_buf_release_entry);
+ size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1;
+ num_buff_reaped = 0;
+
+ spin_lock_bh(&srng->lock);
+
+ while (num_buff_reaped < num_entry) {
+ desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
+ desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
+ handler_data.meta = ev->meta_data[num_buff_reaped];
+
+ num_buff_reaped++;
+
+ ath12k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
+
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&ring->idr_lock);
+ buff = idr_find(&ring->bufs_idr, buf_id);
+ if (!buff) {
+ spin_unlock_bh(&ring->idr_lock);
+ continue;
+ }
+ idr_remove(&ring->bufs_idr, buf_id);
+ spin_unlock_bh(&ring->idr_lock);
+
+ dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
+ DMA_FROM_DEVICE);
+
+ if (ring->handler) {
+ vaddr_unalign = buff->payload;
+ handler_data.data = PTR_ALIGN(vaddr_unalign,
+ ring->buf_align);
+ handler_data.data_sz = ring->buf_sz;
+
+ ring->handler(ar, &handler_data);
+ }
+
+ memset(buff, 0, size);
+ ath12k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
+ }
+
+ spin_unlock_bh(&srng->lock);
+
+rcu_unlock:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
+{
+ ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+}
+
+void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
+{
+ struct ath12k_dbring_element *buff;
+ int buf_id;
+
+ spin_lock_bh(&ring->idr_lock);
+ idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
+ idr_remove(&ring->bufs_idr, buf_id);
+ dma_unmap_single(ar->ab->dev, buff->paddr,
+ ring->buf_sz, DMA_FROM_DEVICE);
+ kfree(buff);
+ }
+
+ idr_destroy(&ring->bufs_idr);
+ spin_unlock_bh(&ring->idr_lock);
+}
diff --git a/drivers/net/wireless/ath/ath12k/dbring.h b/drivers/net/wireless/ath/ath12k/dbring.h
new file mode 100644
index 000000000000..e1c0eba774ec
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dbring.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_DBRING_H
+#define ATH12K_DBRING_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include "dp.h"
+
+struct ath12k_dbring_element {
+ dma_addr_t paddr;
+ u8 payload[];
+};
+
+struct ath12k_dbring_data {
+ void *data;
+ u32 data_sz;
+ struct ath12k_wmi_dma_buf_release_meta_data_params meta;
+};
+
+struct ath12k_dbring_buf_release_event {
+ struct ath12k_wmi_dma_buf_release_fixed_params fixed;
+ const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
+ const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
+ u32 num_buf_entry;
+ u32 num_meta;
+};
+
+struct ath12k_dbring_cap {
+ u32 pdev_id;
+ enum wmi_direct_buffer_module id;
+ u32 min_elem;
+ u32 min_buf_sz;
+ u32 min_buf_align;
+};
+
+struct ath12k_dbring {
+ struct dp_srng refill_srng;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ dma_addr_t tp_addr;
+ dma_addr_t hp_addr;
+ int bufs_max;
+ u32 pdev_id;
+ u32 buf_sz;
+ u32 buf_align;
+ u32 num_resp_per_event;
+ u32 event_timeout_ms;
+ int (*handler)(struct ath12k *ar, struct ath12k_dbring_data *data);
+};
+
+int ath12k_dbring_set_cfg(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ u32 num_resp_per_event,
+ u32 event_timeout_ms,
+ int (*handler)(struct ath12k *,
+ struct ath12k_dbring_data *));
+int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ enum wmi_direct_buffer_module id);
+int ath12k_dbring_buf_setup(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ struct ath12k_dbring_cap *db_cap);
+int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring,
+ int ring_num, int num_entries);
+int ath12k_dbring_buffer_release_event(struct ath12k_base *ab,
+ struct ath12k_dbring_buf_release_event *ev);
+int ath12k_dbring_get_cap(struct ath12k_base *ab,
+ u8 pdev_idx,
+ enum wmi_direct_buffer_module id,
+ struct ath12k_dbring_cap *db_cap);
+void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring);
+void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring);
+#endif /* ATH12K_DBRING_H */
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
new file mode 100644
index 000000000000..67893923e010
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+
+void ath12k_info(struct ath12k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+#ifdef CONFIG_ATH12K_DEBUG
+
+void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath12k_debug_mask & mask)
+ dev_dbg(ab->dev, "%pV", &vaf);
+
+ /* TODO: trace log */
+
+ va_end(args);
+}
+
+void ath12k_dbg_dump(struct ath12k_base *ab,
+ enum ath12k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ size_t linebuflen;
+ const void *ptr;
+
+ if (ath12k_debug_mask & mask) {
+ if (msg)
+ __ath12k_dbg(ab, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_dbg(ab->dev, "%s\n", linebuf);
+ }
+ }
+}
+
+#endif /* CONFIG_ATH12K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
new file mode 100644
index 000000000000..aa685295f8a4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUG_H_
+#define _ATH12K_DEBUG_H_
+
+#include "trace.h"
+
+enum ath12k_debug_mask {
+ ATH12K_DBG_AHB = 0x00000001,
+ ATH12K_DBG_WMI = 0x00000002,
+ ATH12K_DBG_HTC = 0x00000004,
+ ATH12K_DBG_DP_HTT = 0x00000008,
+ ATH12K_DBG_MAC = 0x00000010,
+ ATH12K_DBG_BOOT = 0x00000020,
+ ATH12K_DBG_QMI = 0x00000040,
+ ATH12K_DBG_DATA = 0x00000080,
+ ATH12K_DBG_MGMT = 0x00000100,
+ ATH12K_DBG_REG = 0x00000200,
+ ATH12K_DBG_TESTMODE = 0x00000400,
+ ATH12K_DBG_HAL = 0x00000800,
+ ATH12K_DBG_PCI = 0x00001000,
+ ATH12K_DBG_DP_TX = 0x00002000,
+ ATH12K_DBG_DP_RX = 0x00004000,
+ ATH12K_DBG_ANY = 0xffffffff,
+};
+
+__printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
+
+extern unsigned int ath12k_debug_mask;
+
+#ifdef CONFIG_ATH12K_DEBUG
+__printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab,
+ enum ath12k_debug_mask mask,
+ const char *fmt, ...);
+void ath12k_dbg_dump(struct ath12k_base *ab,
+ enum ath12k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH12K_DEBUG */
+static inline void __ath12k_dbg(struct ath12k_base *ab,
+ enum ath12k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+}
+
+static inline void ath12k_dbg_dump(struct ath12k_base *ab,
+ enum ath12k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH12K_DEBUG */
+
+#define ath12k_dbg(ar, dbg_mask, fmt, ...) \
+do { \
+ typeof(dbg_mask) mask = (dbg_mask); \
+ if (ath12k_debug_mask & mask) \
+ __ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _ATH12K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
new file mode 100644
index 000000000000..eb0261500ab0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -0,0 +1,1580 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <crypto/hash.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "hal_tx.h"
+#include "hif.h"
+#include "debug.h"
+#include "dp_rx.h"
+#include "peer.h"
+#include "dp_mon.h"
+
+static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+
+ /* TODO: Any other peer specific DP cleanup */
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
+ addr, vdev_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
+ ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+ crypto_free_shash(peer->tfm_mmic);
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ u32 reo_dest;
+ int ret = 0, tid;
+
+ /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
+ reo_dest = ar->dp.mac_id + 1;
+ ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
+ WMI_PEER_SET_DEFAULT_ROUTING,
+ DP_RX_HASH_ENABLE | (reo_dest << 1));
+
+ if (ret) {
+ ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
+ ret, addr, vdev_id);
+ return ret;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
+ HAL_PN_TYPE_NONE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
+ tid, ret);
+ goto peer_clean;
+ }
+ }
+
+ ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rx defrag context\n");
+ goto peer_clean;
+ }
+
+ /* TODO: Setup other peer specific resource used in data path */
+
+ return 0;
+
+peer_clean:
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath12k_warn(ab, "failed to find the peer to del rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (; tid >= 0; tid--)
+ ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
+}
+
+void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
+{
+ if (!ring->vaddr_unaligned)
+ return;
+
+ dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned);
+
+ ring->vaddr_unaligned = NULL;
+}
+
+static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
+{
+ int ext_group_num;
+ u8 mask = 1 << ring_num;
+
+ for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
+ ext_group_num++) {
+ if (mask & grp_mask[ext_group_num])
+ return ext_group_num;
+ }
+
+ return -ENOENT;
+}
+
+static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
+ enum hal_ring_type type, int ring_num)
+{
+ const u8 *grp_mask;
+
+ switch (type) {
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
+ grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
+ ring_num = 0;
+ } else {
+ grp_mask = &ab->hw_params->ring_mask->tx[0];
+ }
+ break;
+ case HAL_REO_EXCEPTION:
+ grp_mask = &ab->hw_params->ring_mask->rx_err[0];
+ break;
+ case HAL_REO_DST:
+ grp_mask = &ab->hw_params->ring_mask->rx[0];
+ break;
+ case HAL_REO_STATUS:
+ grp_mask = &ab->hw_params->ring_mask->reo_status[0];
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ case HAL_RXDMA_MONITOR_DST:
+ grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
+ break;
+ case HAL_TX_MONITOR_DST:
+ grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
+ break;
+ case HAL_RXDMA_BUF:
+ grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_REO_CMD:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_TCL_STATUS:
+ case HAL_REO_REINJECT:
+ case HAL_CE_SRC:
+ case HAL_CE_DST:
+ case HAL_CE_DST_STATUS:
+ default:
+ return -ENOENT;
+ }
+
+ return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
+}
+
+static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
+ struct hal_srng_params *ring_params,
+ enum hal_ring_type type, int ring_num)
+{
+ int msi_group_number, msi_data_count;
+ u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
+ int ret;
+
+ ret = ath12k_hif_get_user_msi_vector(ab, "DP",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+ if (ret)
+ return;
+
+ msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
+ ring_num);
+ if (msi_group_number < 0) {
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "ring not part of an ext_group; ring_type: %d,ring_num %d",
+ type, ring_num);
+ ring_params->msi_addr = 0;
+ ring_params->msi_data = 0;
+ return;
+ }
+
+ if (msi_group_number > msi_data_count) {
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "multiple msi_groups share one msi, msi_group_num %d",
+ msi_group_number);
+ }
+
+ ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_group_number % msi_data_count)
+ + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries)
+{
+ struct hal_srng_params params = { 0 };
+ int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
+ int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
+ int ret;
+
+ if (max_entries < 0 || entry_sz < 0)
+ return -EINVAL;
+
+ if (num_entries > max_entries)
+ num_entries = max_entries;
+
+ ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+ ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ GFP_KERNEL);
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
+ ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
+ (unsigned long)ring->vaddr_unaligned);
+
+ params.ring_base_vaddr = ring->vaddr;
+ params.ring_base_paddr = ring->paddr;
+ params.num_entries = num_entries;
+ ath12k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
+
+ switch (type) {
+ case HAL_REO_DST:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_RX;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_RXDMA_BUF:
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_RXDMA_MONITOR_STATUS:
+ params.low_threshold = num_entries >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 0;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_TX_MONITOR_DST:
+ params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 0;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_WBM2SW_RELEASE:
+ if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_TX;
+ params.intr_timer_thres_us =
+ HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+ break;
+ }
+ /* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
+ fallthrough;
+ case HAL_REO_EXCEPTION:
+ case HAL_REO_REINJECT:
+ case HAL_REO_CMD:
+ case HAL_REO_STATUS:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_TCL_STATUS:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_RXDMA_DST:
+ case HAL_RXDMA_MONITOR_DST:
+ case HAL_RXDMA_MONITOR_DESC:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
+ break;
+ case HAL_RXDMA_DIR_BUF:
+ break;
+ default:
+ ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
+ return -EINVAL;
+ }
+
+ ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ return ret;
+ }
+
+ ring->ring_id = ret;
+
+ return 0;
+}
+
+static
+u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
+{
+ u32 bank_config = 0;
+
+ /* Only valid for raw frames with HW crypto enabled.
+ * With SW crypto, mac80211 sets key per packet
+ */
+ if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+ test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
+ bank_config |=
+ u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
+ HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
+
+ bank_config |= u32_encode_bits(arvif->tx_encap_type,
+ HAL_TX_BANK_CONFIG_ENCAP_TYPE);
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
+ u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
+ u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
+
+ /* only valid if idx_lookup_override is not set in tcl_data_cmd */
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+
+ bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
+ HAL_TX_BANK_CONFIG_ADDRX_EN) |
+ u32_encode_bits(!!(arvif->hal_addr_search_flags &
+ HAL_TX_ADDRY_EN),
+ HAL_TX_BANK_CONFIG_ADDRY_EN);
+
+ bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
+ HAL_TX_BANK_CONFIG_MESH_EN) |
+ u32_encode_bits(arvif->vdev_id_check_en,
+ HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
+
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
+
+ return bank_config;
+}
+
+static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
+ struct ath12k_dp *dp)
+{
+ int bank_id = DP_INVALID_BANK_ID;
+ int i;
+ u32 bank_config;
+ bool configure_register = false;
+
+ /* convert vdev params into hal_tx_bank_config */
+ bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
+
+ spin_lock_bh(&dp->tx_bank_lock);
+ /* TODO: implement using idr kernel framework*/
+ for (i = 0; i < dp->num_bank_profiles; i++) {
+ if (dp->bank_profiles[i].is_configured &&
+ (dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
+ bank_id = i;
+ goto inc_ref_and_return;
+ }
+ if (!dp->bank_profiles[i].is_configured ||
+ !dp->bank_profiles[i].num_users) {
+ bank_id = i;
+ goto configure_and_return;
+ }
+ }
+
+ if (bank_id == DP_INVALID_BANK_ID) {
+ spin_unlock_bh(&dp->tx_bank_lock);
+ ath12k_err(ab, "unable to find TX bank!");
+ return bank_id;
+ }
+
+configure_and_return:
+ dp->bank_profiles[bank_id].is_configured = true;
+ dp->bank_profiles[bank_id].bank_config = bank_config;
+ configure_register = true;
+inc_ref_and_return:
+ dp->bank_profiles[bank_id].num_users++;
+ spin_unlock_bh(&dp->tx_bank_lock);
+
+ if (configure_register)
+ ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
+ bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
+ dp->bank_profiles[bank_id].num_users);
+
+ return bank_id;
+}
+
+void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
+{
+ spin_lock_bh(&dp->tx_bank_lock);
+ dp->bank_profiles[bank_id].num_users--;
+ spin_unlock_bh(&dp->tx_bank_lock);
+}
+
+static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+
+ kfree(dp->bank_profiles);
+ dp->bank_profiles = NULL;
+}
+
+static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
+ int i;
+
+ dp->num_bank_profiles = num_tcl_banks;
+ dp->bank_profiles = kmalloc_array(num_tcl_banks,
+ sizeof(struct ath12k_dp_tx_bank_profile),
+ GFP_KERNEL);
+ if (!dp->bank_profiles)
+ return -ENOMEM;
+
+ spin_lock_init(&dp->tx_bank_lock);
+
+ for (i = 0; i < num_tcl_banks; i++) {
+ dp->bank_profiles[i].is_configured = false;
+ dp->bank_profiles[i].num_users = 0;
+ }
+
+ return 0;
+}
+
+static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+
+ ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
+ }
+ ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
+}
+
+static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
+ struct hal_srng *srng;
+ int i, ret, tx_comp_ring_num;
+ u32 ring_hash_map;
+
+ ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
+ HAL_SW2WBM_RELEASE, 0, 0,
+ DP_WBM_RELEASE_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
+ DP_TCL_CMD_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
+ 0, 0, DP_TCL_STATUS_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
+ tx_comp_ring_num = map[i].wbm_ring_num;
+
+ ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
+ HAL_TCL_DATA, i, 0,
+ DP_TCL_DATA_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
+ HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
+ DP_TX_COMP_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
+ tx_comp_ring_num, ret);
+ goto err;
+ }
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
+ 0, 0, DP_REO_REINJECT_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
+ HAL_WBM2SW_REL_ERR_RING_NUM, 0,
+ DP_RX_RELEASE_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
+ 0, 0, DP_REO_EXCEPTION_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
+ 0, 0, DP_REO_CMD_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ ath12k_hal_reo_init_cmd_ring(ab, srng);
+
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
+ 0, 0, DP_REO_STATUS_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
+ goto err;
+ }
+
+ /* When hash based routing of rx packet is enabled, 32 entries to map
+ * the hash values to the ring will be configured. Each hash entry uses
+ * four bits to map to a particular ring. The ring mapping will be
+ * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
+ * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
+ */
+ ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ ath12k_hal_reo_hw_setup(ab, ring_hash_map);
+
+ return 0;
+
+err:
+ ath12k_dp_srng_common_cleanup(ab);
+
+ return ret;
+}
+
+static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ int i;
+
+ for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
+ if (!slist[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ slist[i].vaddr, slist[i].paddr);
+ slist[i].vaddr = NULL;
+ }
+}
+
+static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
+ int size,
+ u32 n_link_desc_bank,
+ u32 n_link_desc,
+ u32 last_bank_sz)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ u32 n_entries_per_buf;
+ int num_scatter_buf, scatter_idx;
+ struct hal_wbm_link_desc *scatter_buf;
+ int align_bytes, n_entries;
+ dma_addr_t paddr;
+ int rem_entries;
+ int i;
+ int ret = 0;
+ u32 end_offset, cookie;
+
+ n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
+ ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
+ num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
+
+ if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < num_scatter_buf; i++) {
+ slist[i].vaddr = dma_alloc_coherent(ab->dev,
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ &slist[i].paddr, GFP_KERNEL);
+ if (!slist[i].vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ scatter_idx = 0;
+ scatter_buf = slist[scatter_idx].vaddr;
+ rem_entries = n_entries_per_buf;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries) {
+ cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
+ ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ if (rem_entries) {
+ rem_entries--;
+ scatter_buf++;
+ continue;
+ }
+
+ rem_entries = n_entries_per_buf;
+ scatter_idx++;
+ scatter_buf = slist[scatter_idx].vaddr;
+ }
+ }
+
+ end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
+ sizeof(struct hal_wbm_link_desc);
+ ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
+ n_link_desc, end_offset);
+
+ return 0;
+
+err:
+ ath12k_dp_scatter_idle_link_desc_cleanup(ab);
+
+ return ret;
+}
+
+static void
+ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks)
+{
+ int i;
+
+ for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
+ if (link_desc_banks[i].vaddr_unaligned) {
+ dma_free_coherent(ab->dev,
+ link_desc_banks[i].size,
+ link_desc_banks[i].vaddr_unaligned,
+ link_desc_banks[i].paddr_unaligned);
+ link_desc_banks[i].vaddr_unaligned = NULL;
+ }
+ }
+}
+
+static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ int n_link_desc_bank,
+ int last_bank_sz)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+ int ret = 0;
+ int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ if (i == (n_link_desc_bank - 1) && last_bank_sz)
+ desc_sz = last_bank_sz;
+
+ desc_bank[i].vaddr_unaligned =
+ dma_alloc_coherent(ab->dev, desc_sz,
+ &desc_bank[i].paddr_unaligned,
+ GFP_KERNEL);
+ if (!desc_bank[i].vaddr_unaligned) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
+ HAL_LINK_DESC_ALIGN);
+ desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
+ ((unsigned long)desc_bank[i].vaddr -
+ (unsigned long)desc_bank[i].vaddr_unaligned);
+ desc_bank[i].size = desc_sz;
+ }
+
+ return 0;
+
+err:
+ ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
+
+ return ret;
+}
+
+void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring)
+{
+ ath12k_dp_link_desc_bank_free(ab, desc_bank);
+
+ if (ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ath12k_dp_srng_cleanup(ab, ring);
+ ath12k_dp_scatter_idle_link_desc_cleanup(ab);
+ }
+}
+
+static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ u32 n_mpdu_link_desc, n_mpdu_queue_desc;
+ u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
+ int ret = 0;
+
+ n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
+ HAL_NUM_MPDUS_PER_LINK_DESC;
+
+ n_mpdu_queue_desc = n_mpdu_link_desc /
+ HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
+
+ n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
+ DP_AVG_MSDUS_PER_FLOW) /
+ HAL_NUM_TX_MSDUS_PER_LINK_DESC;
+
+ n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
+ DP_AVG_MSDUS_PER_MPDU) /
+ HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
+ n_tx_msdu_link_desc + n_rx_msdu_link_desc;
+
+ if (*n_link_desc & (*n_link_desc - 1))
+ *n_link_desc = 1 << fls(*n_link_desc);
+
+ ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring,
+ HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc)
+{
+ u32 tot_mem_sz;
+ u32 n_link_desc_bank, last_bank_sz;
+ u32 entry_sz, align_bytes, n_entries;
+ struct hal_wbm_link_desc *desc;
+ u32 paddr;
+ int i, ret;
+ u32 cookie;
+
+ tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
+ tot_mem_sz += HAL_LINK_DESC_ALIGN;
+
+ if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
+ n_link_desc_bank = 1;
+ last_bank_sz = tot_mem_sz;
+ } else {
+ n_link_desc_bank = tot_mem_sz /
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+ last_bank_sz = tot_mem_sz %
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+
+ if (last_bank_sz)
+ n_link_desc_bank += 1;
+ }
+
+ if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
+ return -EINVAL;
+
+ ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
+ n_link_desc_bank, last_bank_sz);
+ if (ret)
+ return ret;
+
+ /* Setup link desc idle list for HW internal usage */
+ entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
+ tot_mem_sz = entry_sz * n_link_desc;
+
+ /* Setup scatter desc list when the total memory requirement is more */
+ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
+ ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
+ n_link_desc_bank,
+ n_link_desc,
+ last_bank_sz);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
+ ret);
+ goto fail_desc_bank_free;
+ }
+
+ return 0;
+ }
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (link_desc_banks[i].size - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries &&
+ (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
+ cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
+ ath12k_hal_set_link_desc_addr(desc,
+ cookie, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ }
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+fail_desc_bank_free:
+ ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
+
+ return ret;
+}
+
+int ath12k_dp_service_srng(struct ath12k_base *ab,
+ struct ath12k_ext_irq_grp *irq_grp,
+ int budget)
+{
+ struct napi_struct *napi = &irq_grp->napi;
+ int grp_id = irq_grp->grp_id;
+ int work_done = 0;
+ int i = 0, j;
+ int tot_work_done = 0;
+ enum dp_monitor_mode monitor_mode;
+ u8 ring_mask;
+
+ while (i < ab->hw_params->max_tx_ring) {
+ if (ab->hw_params->ring_mask->tx[grp_id] &
+ BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
+ ath12k_dp_tx_completion_handler(ab, i);
+ i++;
+ }
+
+ if (ab->hw_params->ring_mask->rx_err[grp_id]) {
+ work_done = ath12k_dp_rx_process_err(ab, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
+ work_done = ath12k_dp_rx_process_wbm_err(ab,
+ napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params->ring_mask->rx[grp_id]) {
+ i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
+ work_done = ath12k_dp_rx_process(ab, i, napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
+ monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
+ ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+
+ if (ring_mask & BIT(id)) {
+ work_done =
+ ath12k_dp_mon_process_ring(ab, id, napi, budget,
+ monitor_mode);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
+ if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
+ monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
+ ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+
+ if (ring_mask & BIT(id)) {
+ work_done =
+ ath12k_dp_mon_process_ring(ab, id, napi, budget,
+ monitor_mode);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
+ if (ab->hw_params->ring_mask->reo_status[grp_id])
+ ath12k_dp_rx_process_reo_status(ab);
+
+ if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, 0,
+ ab->hw_params->hal_params->rx_buf_rbm,
+ true);
+ }
+
+ /* TODO: Implement handler for other interrupts */
+
+done:
+ return tot_work_done;
+}
+
+void ath12k_dp_pdev_free(struct ath12k_base *ab)
+{
+ int i;
+
+ del_timer_sync(&ab->mon_reap_timer);
+
+ for (i = 0; i < ab->num_radios; i++)
+ ath12k_dp_rx_pdev_free(ab, i);
+}
+
+void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev_dp *dp;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ dp->mac_id = i;
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+
+ /* TODO: Add any RXDMA setup required per pdev */
+ }
+}
+
+static void ath12k_dp_service_mon_ring(struct timer_list *t)
+{
+ struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
+ int i;
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
+ ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
+ ATH12K_DP_RX_MONITOR_MODE);
+
+ mod_timer(&ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
+}
+
+static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
+{
+ if (ab->hw_params->rxdma1_enable)
+ return;
+
+ timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
+}
+
+int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ int ret;
+ int i;
+
+ ret = ath12k_dp_rx_htt_setup(ab);
+ if (ret)
+ goto out;
+
+ ath12k_dp_mon_reap_timer_init(ab);
+
+ /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ret = ath12k_dp_rx_pdev_alloc(ab, i);
+ if (ret) {
+ ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
+ i);
+ goto err;
+ }
+ ret = ath12k_dp_rx_pdev_mon_attach(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ ath12k_dp_pdev_free(ab);
+out:
+ return ret;
+}
+
+int ath12k_dp_htt_connect(struct ath12k_dp *dp)
+{
+ struct ath12k_htc_svc_conn_req conn_req = {0};
+ struct ath12k_htc_svc_conn_resp conn_resp = {0};
+ int status;
+
+ conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ dp->eid = conn_resp.eid;
+
+ return 0;
+}
+
+static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
+{
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ /* TODO: Verify the search type and flags since ast hash
+ * is not part of peer mapv3
+ */
+ arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ break;
+ case WMI_VDEV_TYPE_AP:
+ case WMI_VDEV_TYPE_IBSS:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ default:
+ return;
+ }
+}
+
+void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
+{
+ struct ath12k_base *ab = ar->ab;
+
+ arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
+ u32_encode_bits(arvif->vdev_id,
+ HTT_TCL_META_DATA_VDEV_ID) |
+ u32_encode_bits(ar->pdev->pdev_id,
+ HTT_TCL_META_DATA_PDEV_ID);
+
+ /* set HTT extension valid bit to 0 by default */
+ arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+
+ ath12k_dp_update_vdev_search(arvif);
+ arvif->vdev_id_check_en = true;
+ arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
+
+ /* TODO: error path for bank id failure */
+ if (arvif->bank_id == DP_INVALID_BANK_ID) {
+ ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
+ return;
+ }
+}
+
+static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_rx_desc_info *desc_info, *tmp;
+ struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ int i;
+
+ if (!dp->spt_info)
+ return;
+
+ /* RX Descriptor cleanup */
+ spin_lock_bh(&dp->rx_desc_lock);
+
+ list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) {
+ list_del(&desc_info->list);
+ skb = desc_info->skb;
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ /* TX Descriptor cleanup */
+ for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
+ spin_lock_bh(&dp->tx_desc_lock[i]);
+
+ list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
+ list) {
+ list_del(&tx_desc_info->list);
+ skb = tx_desc_info->skb;
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ spin_unlock_bh(&dp->tx_desc_lock[i]);
+ }
+
+ /* unmap SPT pages */
+ for (i = 0; i < dp->num_spt_pages; i++) {
+ if (!dp->spt_info[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE,
+ dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
+ dp->spt_info[i].vaddr = NULL;
+ }
+
+ kfree(dp->spt_info);
+}
+
+static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ if (!dp->reoq_lut.vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+}
+
+void ath12k_dp_free(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+
+ ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ ath12k_dp_cc_cleanup(ab);
+ ath12k_dp_reoq_lut_cleanup(ab);
+ ath12k_dp_deinit_bank_profiles(ab);
+ ath12k_dp_srng_common_cleanup(ab);
+
+ ath12k_dp_rx_reo_cmd_list_cleanup(ab);
+
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++)
+ kfree(dp->tx_ring[i].tx_status);
+
+ ath12k_dp_rx_free(ab);
+ /* Deinit any SOC level resource */
+}
+
+void ath12k_dp_cc_config(struct ath12k_base *ab)
+{
+ u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
+ u32 val = 0;
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
+
+ val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
+ HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
+ u32_encode_bits(ATH12K_CC_PPT_MSB,
+ HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
+ u32_encode_bits(ATH12K_CC_SPT_MSB,
+ HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
+ u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
+ u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
+ u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val);
+
+ /* Enable HW CC for WBM */
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
+
+ val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
+ HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
+ u32_encode_bits(ATH12K_CC_PPT_MSB,
+ HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
+ u32_encode_bits(ATH12K_CC_SPT_MSB,
+ HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
+ u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
+
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
+
+ /* Enable conversion complete indication */
+ val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
+ val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
+ u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
+ u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
+
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
+
+ /* Enable Cookie conversion for WBM2SW Rings */
+ val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
+ val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
+ ab->hw_params->hal_params->wbm2sw_cc_enable;
+
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
+}
+
+static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
+{
+ return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
+}
+
+static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
+ u16 ppt_idx, u16 spt_idx)
+{
+ struct ath12k_dp *dp = &ab->dp;
+
+ return dp->spt_info[ppt_idx].vaddr + spt_idx;
+}
+
+struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
+ u32 cookie)
+{
+ struct ath12k_rx_desc_info **desc_addr_ptr;
+ u16 ppt_idx, spt_idx;
+
+ ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
+ spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+
+ if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES ||
+ spt_idx > ATH12K_MAX_SPT_ENTRIES)
+ return NULL;
+
+ desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
+
+ return *desc_addr_ptr;
+}
+
+struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
+ u32 cookie)
+{
+ struct ath12k_tx_desc_info **desc_addr_ptr;
+ u16 ppt_idx, spt_idx;
+
+ ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
+ spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+
+ if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES ||
+ ppt_idx > ab->dp.num_spt_pages ||
+ spt_idx > ATH12K_MAX_SPT_ENTRIES)
+ return NULL;
+
+ desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
+
+ return *desc_addr_ptr;
+}
+
+static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
+ struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
+ u32 i, j, pool_id, tx_spt_page;
+ u32 ppt_idx;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+
+ /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
+ for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+ rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
+ GFP_ATOMIC);
+
+ if (!rx_descs) {
+ spin_unlock_bh(&dp->rx_desc_lock);
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
+ rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
+ list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
+
+ /* Update descriptor VA in SPT */
+ rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j);
+ *rx_desc_addr = &rx_descs[j];
+ }
+ }
+
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
+ spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
+ tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
+ GFP_ATOMIC);
+
+ if (!tx_descs) {
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+ /* Caller takes care of TX pending and RX desc cleanup */
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+ ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
+ tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
+ tx_descs[j].pool_id = pool_id;
+ list_add_tail(&tx_descs[j].list,
+ &dp->tx_desc_free_list[pool_id]);
+
+ /* Update descriptor VA in SPT */
+ tx_desc_addr =
+ ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
+ *tx_desc_addr = &tx_descs[j];
+ }
+ }
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+ }
+ return 0;
+}
+
+static int ath12k_dp_cc_init(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i, ret = 0;
+ u32 cmem_base;
+
+ INIT_LIST_HEAD(&dp->rx_desc_free_list);
+ INIT_LIST_HEAD(&dp->rx_desc_used_list);
+ spin_lock_init(&dp->rx_desc_lock);
+
+ for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
+ INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
+ INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
+ spin_lock_init(&dp->tx_desc_lock[i]);
+ }
+
+ dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
+ if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
+ dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
+
+ dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info),
+ GFP_KERNEL);
+
+ if (!dp->spt_info) {
+ ath12k_warn(ab, "SPT page allocation failure");
+ return -ENOMEM;
+ }
+
+ cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+
+ for (i = 0; i < dp->num_spt_pages; i++) {
+ dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
+ ATH12K_PAGE_SIZE,
+ &dp->spt_info[i].paddr,
+ GFP_KERNEL);
+
+ if (!dp->spt_info[i].vaddr) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
+ ath12k_warn(ab, "SPT allocated memoty is not 4K aligned");
+ ret = -EINVAL;
+ goto free;
+ }
+
+ /* Write to PPT in CMEM */
+ ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+ dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+ }
+
+ ret = ath12k_dp_cc_desc_init(ab);
+ if (ret) {
+ ath12k_warn(ab, "HW CC desc init failed %d", ret);
+ goto free;
+ }
+
+ return 0;
+free:
+ ath12k_dp_cc_cleanup(ab);
+ return ret;
+}
+
+static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return 0;
+
+ dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
+ DP_REOQ_LUT_SIZE,
+ &dp->reoq_lut.paddr,
+ GFP_KERNEL);
+
+ if (!dp->reoq_lut.vaddr) {
+ ath12k_warn(ab, "failed to allocate memory for reoq table");
+ return -ENOMEM;
+ }
+
+ memset(dp->reoq_lut.vaddr, 0, DP_REOQ_LUT_SIZE);
+
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
+ dp->reoq_lut.paddr);
+ return 0;
+}
+
+int ath12k_dp_alloc(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_srng *srng = NULL;
+ size_t size = 0;
+ u32 n_link_desc = 0;
+ int ret;
+ int i;
+
+ dp->ab = ab;
+
+ INIT_LIST_HEAD(&dp->reo_cmd_list);
+ INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ spin_lock_init(&dp->reo_cmd_lock);
+
+ dp->reo_cmd_cache_flush_count = 0;
+
+ ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+
+ srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
+
+ ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, srng, n_link_desc);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_dp_cc_init(ab);
+
+ if (ret) {
+ ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
+ goto fail_link_desc_cleanup;
+ }
+ ret = ath12k_dp_init_bank_profiles(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
+ goto fail_hw_cc_cleanup;
+ }
+
+ ret = ath12k_dp_srng_common_setup(ab);
+ if (ret)
+ goto fail_dp_bank_profiles_cleanup;
+
+ size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
+
+ ret = ath12k_dp_reoq_lut_setup(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
+ goto fail_cmn_srng_cleanup;
+ }
+
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ dp->tx_ring[i].tcl_data_ring_id = i;
+
+ dp->tx_ring[i].tx_status_head = 0;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
+ if (!dp->tx_ring[i].tx_status) {
+ ret = -ENOMEM;
+ /* FIXME: The allocated tx status is not freed
+ * properly here
+ */
+ goto fail_cmn_reoq_cleanup;
+ }
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
+ ath12k_hal_tx_set_dscp_tid_map(ab, i);
+
+ ret = ath12k_dp_rx_alloc(ab);
+ if (ret)
+ goto fail_dp_rx_free;
+
+ /* Init any SOC level resource for DP */
+
+ return 0;
+
+fail_dp_rx_free:
+ ath12k_dp_rx_free(ab);
+
+fail_cmn_reoq_cleanup:
+ ath12k_dp_reoq_lut_cleanup(ab);
+
+fail_cmn_srng_cleanup:
+ ath12k_dp_srng_common_cleanup(ab);
+
+fail_dp_bank_profiles_cleanup:
+ ath12k_dp_deinit_bank_profiles(ab);
+
+fail_hw_cc_cleanup:
+ ath12k_dp_cc_cleanup(ab);
+
+fail_link_desc_cleanup:
+ ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
new file mode 100644
index 000000000000..36a876d7f61d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -0,0 +1,1816 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_DP_H
+#define ATH12K_DP_H
+
+#include "hal_rx.h"
+#include "hw.h"
+
+#define MAX_RXDMA_PER_PDEV 2
+
+struct ath12k_base;
+struct ath12k_peer;
+struct ath12k_dp;
+struct ath12k_vif;
+struct hal_tcl_status_ring;
+struct ath12k_ext_irq_grp;
+
+#define DP_MON_PURGE_TIMEOUT_MS 100
+#define DP_MON_SERVICE_BUDGET 128
+
+struct dp_srng {
+ u32 *vaddr_unaligned;
+ u32 *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ int size;
+ u32 ring_id;
+};
+
+struct dp_rxdma_ring {
+ struct dp_srng refill_buf_ring;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ int bufs_max;
+};
+
+#define ATH12K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
+
+struct dp_tx_ring {
+ u8 tcl_data_ring_id;
+ struct dp_srng tcl_data_ring;
+ struct dp_srng tcl_comp_ring;
+ struct hal_wbm_completion_ring_tx *tx_status;
+ int tx_status_head;
+ int tx_status_tail;
+};
+
+struct ath12k_pdev_mon_stats {
+ u32 status_ppdu_state;
+ u32 status_ppdu_start;
+ u32 status_ppdu_end;
+ u32 status_ppdu_compl;
+ u32 status_ppdu_start_mis;
+ u32 status_ppdu_end_mis;
+ u32 status_ppdu_done;
+ u32 dest_ppdu_done;
+ u32 dest_mpdu_done;
+ u32 dest_mpdu_drop;
+ u32 dup_mon_linkdesc_cnt;
+ u32 dup_mon_buf_cnt;
+};
+
+struct dp_link_desc_bank {
+ void *vaddr_unaligned;
+ void *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+/* Size to enforce scatter idle list mode */
+#define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
+#define DP_LINK_DESC_BANKS_MAX 8
+
+#define DP_LINK_DESC_START 0x4000
+#define DP_LINK_DESC_SHIFT 3
+
+#define DP_LINK_DESC_COOKIE_SET(id, page) \
+ ((((id) + DP_LINK_DESC_START) << DP_LINK_DESC_SHIFT) | (page))
+
+#define DP_LINK_DESC_BANK_MASK GENMASK(2, 0)
+
+#define DP_RX_DESC_COOKIE_INDEX_MAX 0x3ffff
+#define DP_RX_DESC_COOKIE_POOL_ID_MAX 0x1c0000
+#define DP_RX_DESC_COOKIE_MAX \
+ (DP_RX_DESC_COOKIE_INDEX_MAX | DP_RX_DESC_COOKIE_POOL_ID_MAX)
+#define DP_NOT_PPDU_ID_WRAP_AROUND 20000
+
+enum ath12k_dp_ppdu_state {
+ DP_PPDU_STATUS_START,
+ DP_PPDU_STATUS_DONE,
+};
+
+struct dp_mon_mpdu {
+ struct list_head list;
+ struct sk_buff *head;
+ struct sk_buff *tail;
+};
+
+#define DP_MON_MAX_STATUS_BUF 32
+
+struct ath12k_mon_data {
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct hal_rx_mon_ppdu_info mon_ppdu_info;
+
+ u32 mon_ppdu_status;
+ u32 mon_last_buf_cookie;
+ u64 mon_last_linkdesc_paddr;
+ u16 chan_noise_floor;
+
+ struct ath12k_pdev_mon_stats rx_mon_stats;
+ /* lock for monitor data */
+ spinlock_t mon_lock;
+ struct sk_buff_head rx_status_q;
+ struct dp_mon_mpdu *mon_mpdu;
+ struct list_head dp_rx_mon_mpdu_list;
+ struct sk_buff *dest_skb_q[DP_MON_MAX_STATUS_BUF];
+ struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info;
+ struct dp_mon_tx_ppdu_info *tx_data_ppdu_info;
+};
+
+struct ath12k_pdev_dp {
+ u32 mac_id;
+ atomic_t num_tx_pending;
+ wait_queue_head_t tx_empty_waitq;
+ struct dp_srng rxdma_mon_dst_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng tx_mon_dst_ring[MAX_RXDMA_PER_PDEV];
+
+ struct ieee80211_rx_status rx_status;
+ struct ath12k_mon_data mon_data;
+};
+
+#define DP_NUM_CLIENTS_MAX 64
+#define DP_AVG_TIDS_PER_CLIENT 2
+#define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
+#define DP_AVG_MSDUS_PER_FLOW 128
+#define DP_AVG_FLOWS_PER_TID 2
+#define DP_AVG_MPDUS_PER_TID_MAX 128
+#define DP_AVG_MSDUS_PER_MPDU 4
+
+#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
+
+#define DP_BA_WIN_SZ_MAX 256
+
+#define DP_TCL_NUM_RING_MAX 4
+
+#define DP_IDLE_SCATTER_BUFS_MAX 16
+
+#define DP_WBM_RELEASE_RING_SIZE 64
+#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TX_COMP_RING_SIZE 32768
+#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
+#define DP_TCL_CMD_RING_SIZE 32
+#define DP_TCL_STATUS_RING_SIZE 32
+#define DP_REO_DST_RING_MAX 8
+#define DP_REO_DST_RING_SIZE 2048
+#define DP_REO_REINJECT_RING_SIZE 32
+#define DP_RX_RELEASE_RING_SIZE 1024
+#define DP_REO_EXCEPTION_RING_SIZE 128
+#define DP_REO_CMD_RING_SIZE 128
+#define DP_REO_STATUS_RING_SIZE 2048
+#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RXDMA_REFILL_RING_SIZE 2048
+#define DP_RXDMA_ERR_DST_RING_SIZE 1024
+#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+#define DP_TX_MONITOR_BUF_RING_SIZE 4096
+#define DP_TX_MONITOR_DEST_RING_SIZE 2048
+
+#define DP_TX_MONITOR_BUF_SIZE 2048
+#define DP_TX_MONITOR_BUF_SIZE_MIN 48
+#define DP_TX_MONITOR_BUF_SIZE_MAX 8192
+
+#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_SIZE_LITE 1024
+#define DP_RX_BUFFER_ALIGN_SIZE 128
+
+#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
+#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(19, 18)
+
+#define DP_HW2SW_MACID(mac_id) ({ typeof(mac_id) x = (mac_id); x ? x - 1 : 0; })
+#define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
+
+#define DP_TX_DESC_ID_MAC_ID GENMASK(1, 0)
+#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
+#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+
+#define ATH12K_SHADOW_DP_TIMER_INTERVAL 20
+#define ATH12K_SHADOW_CTRL_TIMER_INTERVAL 10
+
+#define ATH12K_NUM_POOL_TX_DESC 32768
+
+/* TODO: revisit this count during testing */
+#define ATH12K_RX_DESC_COUNT (12288)
+
+#define ATH12K_PAGE_SIZE PAGE_SIZE
+
+/* Total 1024 entries in PPT, i.e 4K/4 considering 4K aligned
+ * SPT pages which makes lower 12bits 0
+ */
+#define ATH12K_MAX_PPT_ENTRIES 1024
+
+/* Total 512 entries in a SPT, i.e 4K Page/8 */
+#define ATH12K_MAX_SPT_ENTRIES 512
+
+#define ATH12K_NUM_RX_SPT_PAGES ((ATH12K_RX_DESC_COUNT) / ATH12K_MAX_SPT_ENTRIES)
+
+#define ATH12K_TX_SPT_PAGES_PER_POOL (ATH12K_NUM_POOL_TX_DESC / \
+ ATH12K_MAX_SPT_ENTRIES)
+#define ATH12K_NUM_TX_SPT_PAGES (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES)
+#define ATH12K_NUM_SPT_PAGES (ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES)
+
+/* The SPT pages are divided for RX and TX, first block for RX
+ * and remaining for TX
+ */
+#define ATH12K_NUM_TX_SPT_PAGE_START ATH12K_NUM_RX_SPT_PAGES
+
+#define ATH12K_DP_RX_DESC_MAGIC 0xBABABABA
+
+/* 4K aligned address have last 12 bits set to 0, this check is done
+ * so that two spt pages address can be stored per 8bytes
+ * of CMEM (PPT)
+ */
+#define ATH12K_SPT_4K_ALIGN_CHECK 0xFFF
+#define ATH12K_SPT_4K_ALIGN_OFFSET 12
+#define ATH12K_PPT_ADDR_OFFSET(ppt_index) (4 * (ppt_index))
+
+/* To indicate HW of CMEM address, b0-31 are cmem base received via QMI */
+#define ATH12K_CMEM_ADDR_MSB 0x10
+
+/* Of 20 bits cookie, b0-b8 is to indicate SPT offset and b9-19 for PPT */
+#define ATH12K_CC_SPT_MSB 8
+#define ATH12K_CC_PPT_MSB 19
+#define ATH12K_CC_PPT_SHIFT 9
+#define ATH12k_DP_CC_COOKIE_SPT GENMASK(8, 0)
+#define ATH12K_DP_CC_COOKIE_PPT GENMASK(19, 9)
+
+#define DP_REO_QREF_NUM GENMASK(31, 16)
+#define DP_MAX_PEER_ID 2047
+
+/* Total size of the LUT is based on 2K peers, each having reference
+ * for 17tids, note each entry is of type ath12k_reo_queue_ref
+ * hence total size is 2048 * 17 * 8 = 278528
+ */
+#define DP_REOQ_LUT_SIZE 278528
+
+/* Invalid TX Bank ID value */
+#define DP_INVALID_BANK_ID -1
+
+struct ath12k_dp_tx_bank_profile {
+ u8 is_configured;
+ u32 num_users;
+ u32 bank_config;
+};
+
+struct ath12k_hp_update_timer {
+ struct timer_list timer;
+ bool started;
+ bool init;
+ u32 tx_num;
+ u32 timer_tx_num;
+ u32 ring_id;
+ u32 interval;
+ struct ath12k_base *ab;
+};
+
+struct ath12k_rx_desc_info {
+ struct list_head list;
+ struct sk_buff *skb;
+ u32 cookie;
+ u32 magic;
+};
+
+struct ath12k_tx_desc_info {
+ struct list_head list;
+ struct sk_buff *skb;
+ u32 desc_id; /* Cookie */
+ u8 mac_id;
+ u8 pool_id;
+};
+
+struct ath12k_spt_info {
+ dma_addr_t paddr;
+ u64 *vaddr;
+};
+
+struct ath12k_reo_queue_ref {
+ u32 info0;
+ u32 info1;
+} __packed;
+
+struct ath12k_reo_q_addr_lut {
+ dma_addr_t paddr;
+ u32 *vaddr;
+};
+
+struct ath12k_dp {
+ struct ath12k_base *ab;
+ u8 num_bank_profiles;
+ /* protects the access and update of bank_profiles */
+ spinlock_t tx_bank_lock;
+ struct ath12k_dp_tx_bank_profile *bank_profiles;
+ enum ath12k_htc_ep_id eid;
+ struct completion htt_tgt_version_received;
+ u8 htt_tgt_ver_major;
+ u8 htt_tgt_ver_minor;
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct dp_srng wbm_idle_ring;
+ struct dp_srng wbm_desc_rel_ring;
+ struct dp_srng tcl_cmd_ring;
+ struct dp_srng tcl_status_ring;
+ struct dp_srng reo_reinject_ring;
+ struct dp_srng rx_rel_ring;
+ struct dp_srng reo_except_ring;
+ struct dp_srng reo_cmd_ring;
+ struct dp_srng reo_status_ring;
+ struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
+ struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
+ struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
+ struct list_head reo_cmd_list;
+ struct list_head reo_cmd_cache_flush_list;
+ u32 reo_cmd_cache_flush_count;
+
+ /* protects access to below fields,
+ * - reo_cmd_list
+ * - reo_cmd_cache_flush_list
+ * - reo_cmd_cache_flush_count
+ */
+ spinlock_t reo_cmd_lock;
+ struct ath12k_hp_update_timer reo_cmd_timer;
+ struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
+ struct ath12k_spt_info *spt_info;
+ u32 num_spt_pages;
+ struct list_head rx_desc_free_list;
+ struct list_head rx_desc_used_list;
+ /* protects the free and used desc list */
+ spinlock_t rx_desc_lock;
+
+ struct list_head tx_desc_free_list[ATH12K_HW_MAX_QUEUES];
+ struct list_head tx_desc_used_list[ATH12K_HW_MAX_QUEUES];
+ /* protects the free and used desc lists */
+ spinlock_t tx_desc_lock[ATH12K_HW_MAX_QUEUES];
+
+ struct dp_rxdma_ring rx_refill_buf_ring;
+ struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_rxdma_ring rxdma_mon_buf_ring;
+ struct dp_rxdma_ring tx_mon_buf_ring;
+ struct ath12k_reo_q_addr_lut reoq_lut;
+};
+
+/* HTT definitions */
+
+#define HTT_TCL_META_DATA_TYPE BIT(0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
+
+/* vdev meta data */
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
+#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
+
+/* peer meta data */
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
+
+#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
+
+/* HTT tx completion is overlayed in wbm_release_ring */
+#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
+#define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0)
+#define HTT_TX_WBM_COMP_INFO1_EXCEPTION_FRAME BIT(4)
+
+#define HTT_TX_WBM_COMP_INFO2_ACK_RSSI GENMASK(31, 24)
+
+struct htt_tx_wbm_completion {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 rsvd1;
+
+} __packed;
+
+enum htt_h2t_msg_type {
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
+ HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
+ HTT_H2T_MSG_TYPE_VDEV_TXRX_STATS_CFG = 0x1a,
+ HTT_H2T_MSG_TYPE_TX_MONITOR_CFG = 0x1b,
+};
+
+#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+
+struct htt_ver_req_cmd {
+ __le32 ver_reg_info;
+} __packed;
+
+enum htt_srng_ring_type {
+ HTT_HW_TO_SW_RING,
+ HTT_SW_TO_HW_RING,
+ HTT_SW_TO_SW_RING,
+};
+
+enum htt_srng_ring_id {
+ HTT_RXDMA_HOST_BUF_RING,
+ HTT_RXDMA_MONITOR_STATUS_RING,
+ HTT_RXDMA_MONITOR_BUF_RING,
+ HTT_RXDMA_MONITOR_DESC_RING,
+ HTT_RXDMA_MONITOR_DEST_RING,
+ HTT_HOST1_TO_FW_RXBUF_RING,
+ HTT_HOST2_TO_FW_RXBUF_RING,
+ HTT_RXDMA_NON_MONITOR_DEST_RING,
+ HTT_TX_MON_HOST2MON_BUF_RING,
+ HTT_TX_MON_MON2HOST_DEST_RING,
+};
+
+/* host -> target HTT_SRING_SETUP message
+ *
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ *
+ * |31 24|23 20|19|18 16|15|14 8|7 0|
+ * |--------------- +-----------------+----------------+------------------|
+ * | ring_type | ring_id | pdev_id | msg_type |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_hi |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size| ring_size |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_data |
+ * |----------------------------------------------------------------------|
+ * | intr_timer_th |IM| intr_batch_counter_th |
+ * |----------------------------------------------------------------------|
+ * | reserved |RR|PTCF| intr_low_threshold |
+ * |----------------------------------------------------------------------|
+ * Where
+ * IM = sw_intr_mode
+ * RR = response_required
+ * PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_SRING_SETUP
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: identify which ring is to setup,
+ * more details can be got from enum htt_srng_ring_id
+ * b'24:31 - ring_type: identify type of host rings,
+ * more details can be got from enum htt_srng_ring_type
+ * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
+ * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ * SW_TO_HW_RING.
+ * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4 - b'0:31 - ring_head_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5 - b'0:31 - ring_head_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31 - ring_msi_data: MSI data
+ * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14 - intr_batch_counter_th:
+ * batch counter threshold is in units of 4-byte words.
+ * HW internally maintains and increments batch count.
+ * (see SRING spec for detail description).
+ * When batch count reaches threshold value, an interrupt
+ * is generated by HW.
+ * b'15 - sw_intr_mode:
+ * This configuration shall be static.
+ * Only programmed at power up.
+ * 0: generate pulse style sw interrupts
+ * 1: generate level style sw interrupts
+ * b'16:31 - intr_timer_th:
+ * The timer init value when timer is idle or is
+ * initialized to start downcounting.
+ * In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15 - intr_low_threshold:
+ * Used only by Consumer ring to generate ring_sw_int_p.
+ * Ring entries low threshold water mark, that is used
+ * in combination with the interrupt timer as well as
+ * the clearing of the level interrupt.
+ * b'16:18 - prefetch_timer_cfg:
+ * Used only by Consumer ring to set timer mode to
+ * support Application prefetch handling.
+ * The external tail offset/pointer will be updated
+ * at following intervals:
+ * 3'b000: (Prefetch feature disabled; used only for debug)
+ * 3'b001: 1 usec
+ * 3'b010: 4 usec
+ * 3'b011: 8 usec (default)
+ * 3'b100: 16 usec
+ * Others: Reserverd
+ * b'19 - response_required:
+ * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ * b'20:31 - reserved: reserved for future use
+ */
+
+#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24)
+
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29)
+
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16)
+
+#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG GENMASK(18, 16)
+#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19)
+
+struct htt_srng_setup_cmd {
+ __le32 info0;
+ __le32 ring_base_addr_lo;
+ __le32 ring_base_addr_hi;
+ __le32 info1;
+ __le32 ring_head_off32_remote_addr_lo;
+ __le32 ring_head_off32_remote_addr_hi;
+ __le32 ring_tail_off32_remote_addr_lo;
+ __le32 ring_tail_off32_remote_addr_hi;
+ __le32 ring_msi_addr_lo;
+ __le32 ring_msi_addr_hi;
+ __le32 msi_data;
+ __le32 intr_info;
+ __le32 info2;
+} __packed;
+
+/* host -> target FW PPDU_STATS config message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW for PPDU_STATS_CFG msg.
+ * The message allows the host to configure the PPDU_STATS_IND messages
+ * produced by the target.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | REQ bit mask | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a req to configure ppdu_stats_ind from target
+ * Value: 0x11
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies which pdevs this PPDU stats configuration applies to
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - REQ_TLV_BIT_MASK
+ * Bits 16:31
+ * Purpose: each set bit indicates the corresponding PPDU stats TLV type
+ * needs to be included in the target's PPDU_STATS_IND messages.
+ * Value: refer htt_ppdu_stats_tlv_tag_t <<<???
+ *
+ */
+
+struct htt_ppdu_stats_cfg_cmd {
+ __le32 msg;
+} __packed;
+
+#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8)
+#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
+
+enum htt_ppdu_stats_tag_type {
+ HTT_PPDU_STATS_TAG_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMMON,
+ HTT_PPDU_STATS_TAG_USR_RATE,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
+ HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
+ HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
+ HTT_PPDU_STATS_TAG_INFO,
+ HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
+
+ /* New TLV's are added above to this line */
+ HTT_PPDU_STATS_TAG_MAX,
+};
+
+#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
+ | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
+
+#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_INFO) | \
+ BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
+ HTT_PPDU_STATS_TAG_DEFAULT)
+
+enum htt_stats_internal_ppdu_frametype {
+ HTT_STATS_PPDU_FTYPE_CTRL,
+ HTT_STATS_PPDU_FTYPE_DATA,
+ HTT_STATS_PPDU_FTYPE_BAR,
+ HTT_STATS_PPDU_FTYPE_MAX
+};
+
+/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * details:
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ * configure RXDMA rings.
+ * The configuration is per ring based and includes both packet subtypes
+ * and PPDU/MPDU TLVs.
+ *
+ * The message would appear as follows:
+ *
+ * |31 26|25|24|23 16|15 8|7 0|
+ * |-----------------+----------------+----------------+---------------|
+ * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ * | rsvd2 | ring_buffer_size |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_0 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_1 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_2 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_3 |
+ * |-------------------------------------------------------------------|
+ * | tlv_filter_in_flags |
+ * |-------------------------------------------------------------------|
+ * Where:
+ * PS = pkt_swap
+ * SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id : Identify the ring to configure.
+ * More details can be got from enum htt_srng_ring_id
+ * b'24 - status_swap: 1 is to swap status TLV
+ * b'25 - pkt_swap: 1 is to swap packet TLV
+ * b'26:31 - rsvd1: reserved for future use
+ * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring,
+ * in byte units.
+ * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31 - packet_type_enable_flags_0:
+ * Enable MGMT packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * FP: Filter_Pass
+ * MD: Monitor_Direct
+ * MO: Monitor_Other
+ * 10 mgmt subtypes * 3 bits -> 30 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31 - packet_type_enable_flags_1:
+ * Enable MGMT packet from 0b1010 to 0b1111
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 - packet_type_enable_flags_2:
+ * Enable CTRL packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31 - packet_type_enable_flags_3:
+ * Enable CTRL packet from 0b1010 to 0b1111,
+ * MCAST_DATA, UCAST_DATA, NULL_DATA
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 - tlv_filter_in_flags:
+ * Filter in Attention/MPDU/PPDU/Header/User tlvs
+ * Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID BIT(26)
+
+#define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET GENMASK(31, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0)
+
+enum htt_rx_filter_tlv_flags {
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3),
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4),
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5),
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6),
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17),
+};
+
+enum htt_rx_data_pkt_filter_tlv_flasg3 {
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26),
+};
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
+ (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
+ (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+
+/* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */
+#define HTT_RX_TLV_FLAGS_RXDMA_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END)
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+
+struct htt_rx_ring_selection_cfg_cmd {
+ __le32 info0;
+ __le32 info1;
+ __le32 pkt_type_en_flags0;
+ __le32 pkt_type_en_flags1;
+ __le32 pkt_type_en_flags2;
+ __le32 pkt_type_en_flags3;
+ __le32 rx_filter_tlv;
+ __le32 rx_packet_offset;
+ __le32 rx_mpdu_offset;
+ __le32 rx_msdu_offset;
+ __le32 rx_attn_offset;
+} __packed;
+
+struct htt_rx_ring_tlv_filter {
+ u32 rx_filter; /* see htt_rx_filter_tlv_flags */
+ u32 pkt_filter_flags0; /* MGMT */
+ u32 pkt_filter_flags1; /* MGMT */
+ u32 pkt_filter_flags2; /* CTRL */
+ u32 pkt_filter_flags3; /* DATA */
+ bool offset_valid;
+ u16 rx_packet_offset;
+ u16 rx_header_offset;
+ u16 rx_mpdu_end_offset;
+ u16 rx_mpdu_start_offset;
+ u16 rx_msdu_end_offset;
+ u16 rx_msdu_start_offset;
+ u16 rx_attn_offset;
+};
+
+#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
+#define HTT_STATS_FRAME_CTRL_TYPE_CTRL 0x1
+#define HTT_STATS_FRAME_CTRL_TYPE_DATA 0x2
+#define HTT_STATS_FRAME_CTRL_TYPE_RESV 0x3
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE GENMASK(15, 0)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE GENMASK(18, 16)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(21, 19)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(24, 22)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(27, 25)
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG GENMASK(2, 0)
+
+struct htt_tx_ring_selection_cfg_cmd {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 tlv_filter_mask_in0;
+ __le32 tlv_filter_mask_in1;
+ __le32 tlv_filter_mask_in2;
+ __le32 tlv_filter_mask_in3;
+ __le32 reserverd[3];
+} __packed;
+
+#define HTT_TX_RING_TLV_FILTER_MGMT_DMA_LEN GENMASK(3, 0)
+#define HTT_TX_RING_TLV_FILTER_CTRL_DMA_LEN GENMASK(7, 4)
+#define HTT_TX_RING_TLV_FILTER_DATA_DMA_LEN GENMASK(11, 8)
+
+#define HTT_TX_MON_FILTER_HYBRID_MODE \
+ (HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS | \
+ HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE | \
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO | \
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2)
+
+struct htt_tx_ring_tlv_filter {
+ u32 tx_mon_downstream_tlv_flags;
+ u32 tx_mon_upstream_tlv_flags0;
+ u32 tx_mon_upstream_tlv_flags1;
+ u32 tx_mon_upstream_tlv_flags2;
+ bool tx_mon_mgmt_filter;
+ bool tx_mon_data_filter;
+ bool tx_mon_ctrl_filter;
+ u16 tx_mon_pkt_dma_len;
+} __packed;
+
+enum htt_tx_mon_upstream_tlv_flags0 {
+ HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS = BIT(1),
+ HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS = BIT(2),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START = BIT(3),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END = BIT(4),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU = BIT(5),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU = BIT(6),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA = BIT(7),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA = BIT(8),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT = BIT(9),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT = BIT(10),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE = BIT(11),
+ HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_BITMAP_ACK = BIT(12),
+ HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_1K_BITMAP_ACK = BIT(13),
+ HTT_TX_FILTER_TLV_FLAGS0_COEX_TX_STATUS = BIT(14),
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO = BIT(15),
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2 = BIT(16),
+};
+
+#define HTT_TX_FILTER_TLV_FLAGS2_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 BIT(11)
+
+/* HTT message target->host */
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f,
+ HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
+ HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+ HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
+ HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND = 0x28,
+ HTT_T2H_MSG_TYPE_PEER_MAP3 = 0x2b,
+ HTT_T2H_MSG_TYPE_VDEV_TXRX_STATS_PERIODIC_IND = 0x2c,
+};
+
+#define HTT_TARGET_VERSION_MAJOR 3
+
+#define HTT_T2H_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8)
+#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16)
+
+struct htt_t2h_version_conf_msg {
+ __le32 version;
+} __packed;
+
+#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8)
+#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
+
+struct htt_t2h_peer_map_event {
+ __le32 info;
+ __le32 mac_addr_l32;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID
+#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID
+#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
+
+struct htt_t2h_peer_unmap_event {
+ __le32 info;
+ __le32 mac_addr_l32;
+ __le32 info1;
+} __packed;
+
+struct htt_resp_msg {
+ union {
+ struct htt_t2h_version_conf_msg version_msg;
+ struct htt_t2h_peer_map_event peer_map_ev;
+ struct htt_t2h_peer_unmap_event peer_unmap_ev;
+ };
+} __packed;
+
+#define HTT_VDEV_GET_STATS_U64(msg_l32, msg_u32)\
+ (((u64)__le32_to_cpu(msg_u32) << 32) | (__le32_to_cpu(msg_l32)))
+#define HTT_T2H_VDEV_STATS_PERIODIC_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VDEV_STATS_PERIODIC_PDEV_ID GENMASK(15, 8)
+#define HTT_T2H_VDEV_STATS_PERIODIC_NUM_VDEV GENMASK(23, 16)
+#define HTT_T2H_VDEV_STATS_PERIODIC_PAYLOAD_BYTES GENMASK(15, 0)
+#define HTT_VDEV_TXRX_STATS_COMMON_TLV 0
+#define HTT_VDEV_TXRX_STATS_HW_STATS_TLV 1
+
+struct htt_t2h_vdev_txrx_stats_ind {
+ __le32 vdev_id;
+ __le32 rx_msdu_byte_cnt_lo;
+ __le32 rx_msdu_byte_cnt_hi;
+ __le32 rx_msdu_cnt_lo;
+ __le32 rx_msdu_cnt_hi;
+ __le32 tx_msdu_byte_cnt_lo;
+ __le32 tx_msdu_byte_cnt_hi;
+ __le32 tx_msdu_cnt_lo;
+ __le32 tx_msdu_cnt_hi;
+ __le32 tx_retry_cnt_lo;
+ __le32 tx_retry_cnt_hi;
+ __le32 tx_retry_byte_cnt_lo;
+ __le32 tx_retry_byte_cnt_hi;
+ __le32 tx_drop_cnt_lo;
+ __le32 tx_drop_cnt_hi;
+ __le32 tx_drop_byte_cnt_lo;
+ __le32 tx_drop_byte_cnt_hi;
+ __le32 msdu_ttl_cnt_lo;
+ __le32 msdu_ttl_cnt_hi;
+ __le32 msdu_ttl_byte_cnt_lo;
+ __le32 msdu_ttl_byte_cnt_hi;
+} __packed;
+
+struct htt_t2h_vdev_common_stats_tlv {
+ __le32 soc_drop_count_lo;
+ __le32 soc_drop_count_hi;
+} __packed;
+
+/* ppdu stats
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host ppdu stats indication message.
+ *
+ *
+ * |31 16|15 12|11 10|9 8|7 0 |
+ * |----------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id | msg type |
+ * |----------------------------------------------------------------------|
+ * | ppdu_id |
+ * |----------------------------------------------------------------------|
+ * | Timestamp in us |
+ * |----------------------------------------------------------------------|
+ * | reserved |
+ * |----------------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_ppdu_stats.h) |
+ * |----------------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a PPDU STATS indication
+ * message.
+ * Value: 0x1d
+ * - mac_id
+ * Bits 9:8
+ * Purpose: mac_id of this ppdu_id
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id of this ppdu_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: total tlv size
+ * Value: payload_size in bytes
+ */
+
+#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
+#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
+
+struct ath12k_htt_ppdu_stats_msg {
+ __le32 info;
+ __le32 ppdu_id;
+ __le32 timestamp;
+ __le32 rsvd;
+ u8 data[];
+} __packed;
+
+struct htt_tlv {
+ __le32 header;
+ u8 value[];
+} __packed;
+
+#define HTT_TLV_TAG GENMASK(11, 0)
+#define HTT_TLV_LEN GENMASK(23, 12)
+
+enum HTT_PPDU_STATS_BW {
+ HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
+ HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
+ HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
+ HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
+ HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
+ HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+ HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
+};
+
+#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8)
+/* bw - HTT_PPDU_STATS_BW */
+#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16)
+
+struct htt_ppdu_stats_common {
+ __le32 ppdu_id;
+ __le16 sched_cmdid;
+ u8 ring_id;
+ u8 num_users;
+ __le32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
+ __le32 chain_mask;
+ __le32 fes_duration_us; /* frame exchange sequence */
+ __le32 ppdu_sch_eval_start_tstmp_us;
+ __le32 ppdu_sch_end_tstmp_us;
+ __le32 ppdu_start_tstmp_us;
+ /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+ * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ */
+ __le16 phy_mode;
+ __le16 bw_mhz;
+} __packed;
+
+enum htt_ppdu_stats_gi {
+ HTT_PPDU_STATS_SGI_0_8_US,
+ HTT_PPDU_STATS_SGI_0_4_US,
+ HTT_PPDU_STATS_SGI_1_6_US,
+ HTT_PPDU_STATS_SGI_3_2_US,
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
+
+enum HTT_PPDU_STATS_PPDU_TYPE {
+ HTT_PPDU_STATS_PPDU_TYPE_SU,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_TRIG,
+ HTT_PPDU_STATS_PPDU_TYPE_BURST_BCN,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_RESP,
+ HTT_PPDU_STATS_PPDU_TYPE_MAX
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0)
+#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1)
+
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
+
+#define HTT_USR_RATE_PREAMBLE(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M)
+#define HTT_USR_RATE_BW(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M)
+#define HTT_USR_RATE_NSS(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M)
+#define HTT_USR_RATE_MCS(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M)
+#define HTT_USR_RATE_GI(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M)
+#define HTT_USR_RATE_DCM(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M)
+
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29)
+
+struct htt_ppdu_stats_user_rate {
+ u8 tid_num;
+ u8 reserved0;
+ __le16 sw_peer_id;
+ __le32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
+ __le16 ru_end;
+ __le16 ru_start;
+ __le16 resp_ru_end;
+ __le16 resp_ru_start;
+ __le32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
+ __le32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
+ /* Note: resp_rate_info is only valid for if resp_type is UL */
+ __le32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+} __packed;
+
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16)
+
+#define HTT_TX_INFO_IS_AMSDU(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M)
+#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M)
+#define HTT_TX_INFO_RATECODE(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M)
+#define HTT_TX_INFO_PEERID(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M)
+
+struct htt_tx_ppdu_stats_info {
+ struct htt_tlv tlv_hdr;
+ __le32 tx_success_bytes;
+ __le32 tx_retry_bytes;
+ __le32 tx_failed_bytes;
+ __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
+ __le16 tx_success_msdus;
+ __le16 tx_retry_msdus;
+ __le16 tx_failed_msdus;
+ __le16 tx_duration; /* united in us */
+} __packed;
+
+enum htt_ppdu_stats_usr_compln_status {
+ HTT_PPDU_STATS_USER_STATUS_OK,
+ HTT_PPDU_STATS_USER_STATUS_FILTERED,
+ HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+ HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+ HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9)
+
+#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M)
+#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M)
+#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M)
+
+struct htt_ppdu_stats_usr_cmpltn_cmn {
+ u8 status;
+ u8 tid_num;
+ __le16 sw_peer_id;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ __le32 ack_rssi;
+ __le16 mpdu_tried;
+ __le16 mpdu_success;
+ __le32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
+} __packed;
+
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0)
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9)
+#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25)
+
+#define HTT_PPDU_STATS_NON_QOS_TID 16
+
+struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
+ __le32 ppdu_id;
+ __le16 sw_peer_id;
+ __le16 reserved0;
+ __le32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
+ __le16 current_seq;
+ __le16 start_seq;
+ __le32 success_bytes;
+} __packed;
+
+struct htt_ppdu_user_stats {
+ u16 peer_id;
+ u16 delay_ba;
+ u32 tlv_flags;
+ bool is_valid_peer_id;
+ struct htt_ppdu_stats_user_rate rate;
+ struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
+ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+};
+
+#define HTT_PPDU_STATS_MAX_USERS 8
+#define HTT_PPDU_DESC_MAX_DEPTH 16
+
+struct htt_ppdu_stats {
+ struct htt_ppdu_stats_common common;
+ struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
+};
+
+struct htt_ppdu_stats_info {
+ u32 tlv_bitmap;
+ u32 ppdu_id;
+ u32 frame_type;
+ u32 frame_ctrl;
+ u32 delay_ba;
+ u32 bar_num_users;
+ struct htt_ppdu_stats ppdu_stats;
+ struct list_head list;
+};
+
+/* @brief target -> host MLO offset indiciation message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host mlo offset indication message.
+ *
+ *
+ * |31 29|28 |26|25 22|21 16|15 13|12 10 |9 8|7 0|
+ * |---------------------------------------------------------------------|
+ * | rsvd1 | mac_freq |chip_id |pdev_id|msgtype|
+ * |---------------------------------------------------------------------|
+ * | sync_timestamp_lo_us |
+ * |---------------------------------------------------------------------|
+ * | sync_timestamp_hi_us |
+ * |---------------------------------------------------------------------|
+ * | mlo_offset_lo |
+ * |---------------------------------------------------------------------|
+ * | mlo_offset_hi |
+ * |---------------------------------------------------------------------|
+ * | mlo_offset_clcks |
+ * |---------------------------------------------------------------------|
+ * | rsvd2 | mlo_comp_clks |mlo_comp_us |
+ * |---------------------------------------------------------------------|
+ * | rsvd3 |mlo_comp_timer |
+ * |---------------------------------------------------------------------|
+ * Header fields
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a MLO offset indication msg
+ * - PDEV_ID
+ * Bits 9:8
+ * Purpose: Pdev of this MLO offset
+ * - CHIP_ID
+ * Bits 12:10
+ * Purpose: chip_id of this MLO offset
+ * - MAC_FREQ
+ * Bits 28:13
+ * - SYNC_TIMESTAMP_LO_US
+ * Purpose: clock frequency of the mac HW block in MHz
+ * Bits: 31:0
+ * Purpose: lower 32 bits of the WLAN global time stamp at which
+ * last sync interrupt was received
+ * - SYNC_TIMESTAMP_HI_US
+ * Bits: 31:0
+ * Purpose: upper 32 bits of WLAN global time stamp at which
+ * last sync interrupt was received
+ * - MLO_OFFSET_LO
+ * Bits: 31:0
+ * Purpose: lower 32 bits of the MLO offset in us
+ * - MLO_OFFSET_HI
+ * Bits: 31:0
+ * Purpose: upper 32 bits of the MLO offset in us
+ * - MLO_COMP_US
+ * Bits: 15:0
+ * Purpose: MLO time stamp compensation applied in us
+ * - MLO_COMP_CLCKS
+ * Bits: 25:16
+ * Purpose: MLO time stamp compensation applied in clock ticks
+ * - MLO_COMP_TIMER
+ * Bits: 21:0
+ * Purpose: Periodic timer at which compensation is applied
+ */
+
+#define HTT_T2H_MLO_OFFSET_INFO_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_MLO_OFFSET_INFO_PDEV_ID GENMASK(9, 8)
+
+struct ath12k_htt_mlo_offset_msg {
+ __le32 info;
+ __le32 sync_timestamp_lo_us;
+ __le32 sync_timestamp_hi_us;
+ __le32 mlo_offset_hi;
+ __le32 mlo_offset_lo;
+ __le32 mlo_offset_clks;
+ __le32 mlo_comp_clks;
+ __le32 mlo_comp_timer;
+} __packed;
+
+/* @brief host -> target FW extended statistics retrieve
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW extended stats retrieve message.
+ * The message specifies the type of stats the host wants to retrieve.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | reserved | stats type | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * | config param [0] |
+ * |-----------------------------------------------------------|
+ * | config param [1] |
+ * |-----------------------------------------------------------|
+ * | config param [2] |
+ * |-----------------------------------------------------------|
+ * | config param [3] |
+ * |-----------------------------------------------------------|
+ * | reserved |
+ * |-----------------------------------------------------------|
+ * | cookie LSBs |
+ * |-----------------------------------------------------------|
+ * | cookie MSBs |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a extended stats upload request message
+ * Value: 0x10
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies the mask of PDEVs to retrieve stats from
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - STATS_TYPE
+ * Bits 23:16
+ * Purpose: identifies which FW statistics to upload
+ * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
+ * - Reserved
+ * Bits 31:24
+ * - CONFIG_PARAM [0]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [1]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [2]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [3]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - Reserved [31:0] for future use.
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ */
+
+struct htt_ext_stats_cfg_hdr {
+ u8 msg_type;
+ u8 pdev_mask;
+ u8 stats_type;
+ u8 reserved;
+} __packed;
+
+struct htt_ext_stats_cfg_cmd {
+ struct htt_ext_stats_cfg_hdr hdr;
+ __le32 cfg_param0;
+ __le32 cfg_param1;
+ __le32 cfg_param2;
+ __le32 cfg_param3;
+ __le32 reserved;
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+} __packed;
+
+/* htt stats config default params */
+#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
+#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
+#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
+
+/* HTT_DBG_EXT_STATS_PEER_INFO
+ * PARAMS:
+ * @config_param0:
+ * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
+ * [Bit15 : Bit 1] htt_peer_stats_req_mode_t
+ * [Bit31 : Bit16] sw_peer_id
+ * @config_param1:
+ * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
+ * 0 bit htt_peer_stats_cmn_tlv
+ * 1 bit htt_peer_details_tlv
+ * 2 bit htt_tx_peer_rate_stats_tlv
+ * 3 bit htt_rx_peer_rate_stats_tlv
+ * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
+ * 5 bit htt_rx_tid_stats_tlv
+ * 6 bit htt_msdu_flow_stats_tlv
+ * @config_param2: [Bit31 : Bit0] mac_addr31to0
+ * @config_param3: [Bit15 : Bit0] mac_addr47to32
+ * [Bit31 : Bit16] reserved
+ */
+#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+
+/* Used to set different configs to the specified stats type.*/
+struct htt_ext_stats_cfg_params {
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg2;
+ u32 cfg3;
+};
+
+enum vdev_stats_offload_timer_duration {
+ ATH12K_STATS_TIMER_DUR_500MS = 1,
+ ATH12K_STATS_TIMER_DUR_1SEC = 2,
+ ATH12K_STATS_TIMER_DUR_2SEC = 3,
+};
+
+static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
+{
+ memcpy(addr, &addr_l32, 4);
+ memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
+}
+
+int ath12k_dp_service_srng(struct ath12k_base *ab,
+ struct ath12k_ext_irq_grp *irq_grp,
+ int budget);
+int ath12k_dp_htt_connect(struct ath12k_dp *dp);
+void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif);
+void ath12k_dp_free(struct ath12k_base *ab);
+int ath12k_dp_alloc(struct ath12k_base *ab);
+void ath12k_dp_cc_config(struct ath12k_base *ab);
+int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
+void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab);
+void ath12k_dp_pdev_free(struct ath12k_base *ab);
+int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type);
+int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr);
+void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr);
+void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring);
+int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries);
+void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring);
+int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc);
+struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
+ u32 cookie);
+struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
+ u32 desc_id);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
new file mode 100644
index 000000000000..a214797c96a2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -0,0 +1,2596 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "dp_mon.h"
+#include "debug.h"
+#include "dp_rx.h"
+#include "dp_tx.h"
+#include "peer.h"
+
+static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->ul_ofdma_user_v0_word0 =
+ __le32_to_cpu(ppdu_end_user->usr_resp_ref);
+ rx_user_status->ul_ofdma_user_v0_word1 =
+ __le32_to_cpu(ppdu_end_user->usr_resp_ref_ext);
+}
+
+static void
+ath12k_dp_mon_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+ u32 mpdu_ok_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_ok_cnt);
+ u32 mpdu_err_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_err_cnt);
+
+ rx_user_status->mpdu_ok_byte_count =
+ u32_get_bits(mpdu_ok_byte_count,
+ HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT);
+ rx_user_status->mpdu_err_byte_count =
+ u32_get_bits(mpdu_err_byte_count,
+ HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT);
+}
+
+static void
+ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->ast_index = ppdu_info->ast_index;
+ rx_user_status->tid = ppdu_info->tid;
+ rx_user_status->tcp_ack_msdu_count =
+ ppdu_info->tcp_ack_msdu_count;
+ rx_user_status->tcp_msdu_count =
+ ppdu_info->tcp_msdu_count;
+ rx_user_status->udp_msdu_count =
+ ppdu_info->udp_msdu_count;
+ rx_user_status->other_msdu_count =
+ ppdu_info->other_msdu_count;
+ rx_user_status->frame_control = ppdu_info->frame_control;
+ rx_user_status->frame_control_info_valid =
+ ppdu_info->frame_control_info_valid;
+ rx_user_status->data_sequence_control_info_valid =
+ ppdu_info->data_sequence_control_info_valid;
+ rx_user_status->first_data_seq_ctrl =
+ ppdu_info->first_data_seq_ctrl;
+ rx_user_status->preamble_type = ppdu_info->preamble_type;
+ rx_user_status->ht_flags = ppdu_info->ht_flags;
+ rx_user_status->vht_flags = ppdu_info->vht_flags;
+ rx_user_status->he_flags = ppdu_info->he_flags;
+ rx_user_status->rs_flags = ppdu_info->rs_flags;
+
+ rx_user_status->mpdu_cnt_fcs_ok =
+ ppdu_info->num_mpdu_fcs_ok;
+ rx_user_status->mpdu_cnt_fcs_err =
+ ppdu_info->num_mpdu_fcs_err;
+ memcpy(&rx_user_status->mpdu_fcs_ok_bitmap[0], &ppdu_info->mpdu_fcs_ok_bitmap[0],
+ HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
+ sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
+
+ ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
+}
+
+static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_vht_sig_a_info *vht_sig =
+ (struct hal_rx_vht_sig_a_info *)tlv_data;
+ u32 nsts, group_id, info0, info1;
+ u8 gi_setting;
+
+ info0 = __le32_to_cpu(vht_sig->info0);
+ info1 = __le32_to_cpu(vht_sig->info1);
+
+ ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
+ ppdu_info->mcs = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_MCS);
+ gi_setting = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING);
+ switch (gi_setting) {
+ case HAL_RX_VHT_SIG_A_NORMAL_GI:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case HAL_RX_VHT_SIG_A_SHORT_GI:
+ case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
+ ppdu_info->gi = HAL_RX_GI_0_4_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_STBC);
+ nsts = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS);
+ if (ppdu_info->is_stbc && nsts > 0)
+ nsts = ((nsts + 1) >> 1) - 1;
+
+ ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK);
+ ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW);
+ ppdu_info->beamformed = u32_get_bits(info1,
+ HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED);
+ group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
+ if (group_id == 0 || group_id == 63)
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ else
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ ppdu_info->vht_flag_values5 = group_id;
+ ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
+ ppdu_info->nss);
+ ppdu_info->vht_flag_values2 = ppdu_info->bw;
+ ppdu_info->vht_flag_values4 =
+ u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
+}
+
+static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_ht_sig_info *ht_sig =
+ (struct hal_rx_ht_sig_info *)tlv_data;
+ u32 info0 = __le32_to_cpu(ht_sig->info0);
+ u32 info1 = __le32_to_cpu(ht_sig->info1);
+
+ ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_MCS);
+ ppdu_info->bw = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_BW);
+ ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC);
+ ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING);
+ ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI);
+ ppdu_info->nss = (ppdu_info->mcs >> 3);
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_lsig_b_info *lsigb =
+ (struct hal_rx_lsig_b_info *)tlv_data;
+ u32 info0 = __le32_to_cpu(lsigb->info0);
+ u8 rate;
+
+ rate = u32_get_bits(info0, HAL_RX_LSIG_B_INFO_INFO0_RATE);
+ switch (rate) {
+ case 1:
+ rate = HAL_RX_LEGACY_RATE_1_MBPS;
+ break;
+ case 2:
+ case 5:
+ rate = HAL_RX_LEGACY_RATE_2_MBPS;
+ break;
+ case 3:
+ case 6:
+ rate = HAL_RX_LEGACY_RATE_5_5_MBPS;
+ break;
+ case 4:
+ case 7:
+ rate = HAL_RX_LEGACY_RATE_11_MBPS;
+ break;
+ default:
+ rate = HAL_RX_LEGACY_RATE_INVALID;
+ }
+
+ ppdu_info->rate = rate;
+ ppdu_info->cck_flag = 1;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_lsig_a_info *lsiga =
+ (struct hal_rx_lsig_a_info *)tlv_data;
+ u32 info0 = __le32_to_cpu(lsiga->info0);
+ u8 rate;
+
+ rate = u32_get_bits(info0, HAL_RX_LSIG_A_INFO_INFO0_RATE);
+ switch (rate) {
+ case 8:
+ rate = HAL_RX_LEGACY_RATE_48_MBPS;
+ break;
+ case 9:
+ rate = HAL_RX_LEGACY_RATE_24_MBPS;
+ break;
+ case 10:
+ rate = HAL_RX_LEGACY_RATE_12_MBPS;
+ break;
+ case 11:
+ rate = HAL_RX_LEGACY_RATE_6_MBPS;
+ break;
+ case 12:
+ rate = HAL_RX_LEGACY_RATE_54_MBPS;
+ break;
+ case 13:
+ rate = HAL_RX_LEGACY_RATE_36_MBPS;
+ break;
+ case 14:
+ rate = HAL_RX_LEGACY_RATE_18_MBPS;
+ break;
+ case 15:
+ rate = HAL_RX_LEGACY_RATE_9_MBPS;
+ break;
+ default:
+ rate = HAL_RX_LEGACY_RATE_INVALID;
+ }
+
+ ppdu_info->rate = rate;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
+ (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
+ u32 info0, value;
+
+ info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+
+ ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN;
+
+ /* HE-data2 */
+ ppdu_info->he_data2 |= HE_TXBF_KNOWN;
+
+ ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS);
+ value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM);
+ value = value << HE_DCM_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING);
+ ppdu_info->ldpc = value;
+ value = value << HE_CODING_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ /* HE-data4 */
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID);
+ value = value << HE_STA_ID_SHIFT;
+ ppdu_info->he_data4 |= value;
+
+ ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS);
+ ppdu_info->beamformed = u32_get_bits(info0,
+ HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF);
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+}
+
+static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
+ (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
+ u32 info0, value;
+
+ info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+
+ ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_CODING_KNOWN;
+
+ ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS);
+ value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING);
+ ppdu_info->ldpc = value;
+ value = value << HE_CODING_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID);
+ value = value << HE_STA_ID_SHIFT;
+ ppdu_info->he_data4 |= value;
+
+ ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS);
+}
+
+static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
+ (struct hal_rx_he_sig_b1_mu_info *)tlv_data;
+ u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0);
+ u16 ru_tones;
+
+ ru_tones = u32_get_bits(info0,
+ HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION);
+ ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ ppdu_info->he_RU[0] = ru_tones;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+}
+
+static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
+ (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
+ u32 info0, info1, value;
+ u16 he_gi = 0, he_ltf = 0;
+
+ info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
+ info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
+
+ ppdu_info->he_mu_flags = 1;
+
+ ppdu_info->he_data1 = HE_MU_FORMAT_TYPE;
+ ppdu_info->he_data1 |=
+ HE_BSS_COLOR_KNOWN |
+ HE_DL_UL_KNOWN |
+ HE_LDPC_EXTRA_SYMBOL_KNOWN |
+ HE_STBC_KNOWN |
+ HE_DATA_BW_RU_KNOWN |
+ HE_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 =
+ HE_GI_KNOWN |
+ HE_LTF_SYMBOLS_KNOWN |
+ HE_PRE_FEC_PADDING_KNOWN |
+ HE_PE_DISAMBIGUITY_KNOWN |
+ HE_TXOP_KNOWN |
+ HE_MIDABLE_PERIODICITY_KNOWN;
+
+ /* data3 */
+ ppdu_info->he_data3 = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR);
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG);
+ value = value << HE_DL_UL_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA);
+ value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC);
+ value = value << HE_STBC_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ /* data4 */
+ ppdu_info->he_data4 = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE);
+ ppdu_info->he_data4 = value;
+
+ /* data5 */
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
+ ppdu_info->he_data5 = value;
+ ppdu_info->bw = value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ break;
+ }
+
+ ppdu_info->gi = he_gi;
+ value = he_gi << HE_GI_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = he_ltf << HE_LTF_SIZE_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB);
+ value = (value << HE_LTF_SYM_SHIFT);
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR);
+ value = value << HE_PRE_FEC_PAD_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM);
+ value = value << HE_PE_DISAMBIGUITY_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ /*data6*/
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION);
+ value = value << HE_DOPPLER_SHIFT;
+ ppdu_info->he_data6 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION);
+ value = value << HE_TXOP_SHIFT;
+ ppdu_info->he_data6 |= value;
+
+ /* HE-MU Flags */
+ /* HE-MU-flags1 */
+ ppdu_info->he_flags1 =
+ HE_SIG_B_MCS_KNOWN |
+ HE_SIG_B_DCM_KNOWN |
+ HE_SIG_B_COMPRESSION_FLAG_1_KNOWN |
+ HE_SIG_B_SYM_NUM_KNOWN |
+ HE_RU_0_KNOWN;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB);
+ ppdu_info->he_flags1 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB);
+ value = value << HE_DCM_FLAG_1_SHIFT;
+ ppdu_info->he_flags1 |= value;
+
+ /* HE-MU-flags2 */
+ ppdu_info->he_flags2 = HE_BW_KNOWN;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
+ ppdu_info->he_flags2 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB);
+ value = value << HE_SIG_B_COMPRESSION_FLAG_2_SHIFT;
+ ppdu_info->he_flags2 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB);
+ value = value - 1;
+ value = value << HE_NUM_SIG_B_SYMBOLS_SHIFT;
+ ppdu_info->he_flags2 |= value;
+
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+}
+
+static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_a_su_info *he_sig_a =
+ (struct hal_rx_he_sig_a_su_info *)tlv_data;
+ u32 info0, info1, value;
+ u32 dcm;
+ u8 he_dcm = 0, he_stbc = 0;
+ u16 he_gi = 0, he_ltf = 0;
+
+ ppdu_info->he_flags = 1;
+
+ info0 = __le32_to_cpu(he_sig_a->info0);
+ info1 = __le32_to_cpu(he_sig_a->info1);
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND);
+ if (value == 0)
+ ppdu_info->he_data1 = HE_TRIG_FORMAT_TYPE;
+ else
+ ppdu_info->he_data1 = HE_SU_FORMAT_TYPE;
+
+ ppdu_info->he_data1 |=
+ HE_BSS_COLOR_KNOWN |
+ HE_BEAM_CHANGE_KNOWN |
+ HE_DL_UL_KNOWN |
+ HE_MCS_KNOWN |
+ HE_DCM_KNOWN |
+ HE_CODING_KNOWN |
+ HE_LDPC_EXTRA_SYMBOL_KNOWN |
+ HE_STBC_KNOWN |
+ HE_DATA_BW_RU_KNOWN |
+ HE_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 |=
+ HE_GI_KNOWN |
+ HE_TXBF_KNOWN |
+ HE_PE_DISAMBIGUITY_KNOWN |
+ HE_TXOP_KNOWN |
+ HE_LTF_SYMBOLS_KNOWN |
+ HE_PRE_FEC_PADDING_KNOWN |
+ HE_MIDABLE_PERIODICITY_KNOWN;
+
+ ppdu_info->he_data3 = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR);
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE);
+ value = value << HE_BEAM_CHANGE_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG);
+ value = value << HE_DL_UL_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
+ ppdu_info->mcs = value;
+ value = value << HE_TRANSMIT_MCS_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
+ he_dcm = value;
+ value = value << HE_DCM_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
+ value = value << HE_CODING_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA);
+ value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
+ he_stbc = value;
+ value = value << HE_STBC_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ /* data4 */
+ ppdu_info->he_data4 = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE);
+
+ /* data5 */
+ value = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
+ ppdu_info->he_data5 = value;
+ ppdu_info->bw = value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_1_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ if (he_dcm && he_stbc) {
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ } else {
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ }
+ break;
+ }
+ ppdu_info->gi = he_gi;
+ value = he_gi << HE_GI_SHIFT;
+ ppdu_info->he_data5 |= value;
+ value = he_ltf << HE_LTF_SIZE_SHIFT;
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+ value = (value << HE_LTF_SYM_SHIFT);
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR);
+ value = value << HE_PRE_FEC_PAD_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
+ value = value << HE_TXBF_SHIFT;
+ ppdu_info->he_data5 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM);
+ value = value << HE_PE_DISAMBIGUITY_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ /* data6 */
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+ value++;
+ ppdu_info->he_data6 = value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND);
+ value = value << HE_DOPPLER_SHIFT;
+ ppdu_info->he_data6 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION);
+ value = value << HE_TXOP_SHIFT;
+ ppdu_info->he_data6 |= value;
+
+ ppdu_info->mcs =
+ u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
+ ppdu_info->bw =
+ u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
+ ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
+ ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
+ ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
+ dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
+ ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+ ppdu_info->dcm = dcm;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static enum hal_rx_mon_status
+ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
+ struct ath12k_mon_data *pmon,
+ u32 tlv_tag, u8 *tlv_data, u32 userid)
+{
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ u32 info[7];
+
+ switch (tlv_tag) {
+ case HAL_RX_PPDU_START: {
+ struct hal_rx_ppdu_start *ppdu_start =
+ (struct hal_rx_ppdu_start *)tlv_data;
+
+ info[0] = __le32_to_cpu(ppdu_start->info0);
+
+ ppdu_info->ppdu_id =
+ u32_get_bits(info[0], HAL_RX_PPDU_START_INFO0_PPDU_ID);
+ ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
+ ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
+
+ if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) {
+ ppdu_info->last_ppdu_id = ppdu_info->ppdu_id;
+ ppdu_info->num_users = 0;
+ memset(&ppdu_info->mpdu_fcs_ok_bitmap, 0,
+ HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
+ sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
+ }
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS: {
+ struct hal_rx_ppdu_end_user_stats *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+
+ info[0] = __le32_to_cpu(eu_stats->info0);
+ info[1] = __le32_to_cpu(eu_stats->info1);
+ info[2] = __le32_to_cpu(eu_stats->info2);
+ info[4] = __le32_to_cpu(eu_stats->info4);
+ info[5] = __le32_to_cpu(eu_stats->info5);
+ info[6] = __le32_to_cpu(eu_stats->info6);
+
+ ppdu_info->ast_index =
+ u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX);
+ ppdu_info->fc_valid =
+ u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
+ ppdu_info->tid =
+ ffs(u32_get_bits(info[6],
+ HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP)
+ - 1);
+ ppdu_info->tcp_msdu_count =
+ u32_get_bits(info[4],
+ HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
+ ppdu_info->udp_msdu_count =
+ u32_get_bits(info[4],
+ HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT);
+ ppdu_info->other_msdu_count =
+ u32_get_bits(info[5],
+ HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT);
+ ppdu_info->tcp_ack_msdu_count =
+ u32_get_bits(info[5],
+ HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT);
+ ppdu_info->preamble_type =
+ u32_get_bits(info[1],
+ HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE);
+ ppdu_info->num_mpdu_fcs_ok =
+ u32_get_bits(info[1],
+ HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK);
+ ppdu_info->num_mpdu_fcs_err =
+ u32_get_bits(info[0],
+ HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
+ switch (ppdu_info->preamble_type) {
+ case HAL_RX_PREAMBLE_11N:
+ ppdu_info->ht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AC:
+ ppdu_info->vht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AX:
+ ppdu_info->he_flags = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ struct hal_rx_user_status *rxuser_stats =
+ &ppdu_info->userstats[userid];
+ ppdu_info->num_users += 1;
+
+ ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats);
+ ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ rxuser_stats);
+ }
+ ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]);
+ ppdu_info->mpdu_fcs_ok_bitmap[1] = __le32_to_cpu(eu_stats->rsvd1[1]);
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS_EXT: {
+ struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1);
+ ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2);
+ ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3);
+ ppdu_info->mpdu_fcs_ok_bitmap[5] = __le32_to_cpu(eu_stats->info4);
+ ppdu_info->mpdu_fcs_ok_bitmap[6] = __le32_to_cpu(eu_stats->info5);
+ ppdu_info->mpdu_fcs_ok_bitmap[7] = __le32_to_cpu(eu_stats->info6);
+ break;
+ }
+ case HAL_PHYRX_HT_SIG:
+ ath12k_dp_mon_parse_ht_sig(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_L_SIG_B:
+ ath12k_dp_mon_parse_l_sig_b(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_L_SIG_A:
+ ath12k_dp_mon_parse_l_sig_a(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_VHT_SIG_A:
+ ath12k_dp_mon_parse_vht_sig_a(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_A_SU:
+ ath12k_dp_mon_parse_he_sig_su(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_A_MU_DL:
+ ath12k_dp_mon_parse_he_sig_mu(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_B1_MU:
+ ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_B2_MU:
+ ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_B2_OFDMA:
+ ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_RSSI_LEGACY: {
+ struct hal_rx_phyrx_rssi_legacy_info *rssi =
+ (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+ u32 reception_type = 0;
+ u32 rssi_legacy_info = __le32_to_cpu(rssi->rsvd[0]);
+
+ info[0] = __le32_to_cpu(rssi->info0);
+
+ /* TODO: Please note that the combined rssi will not be accurate
+ * in MU case. Rssi in MU needs to be retrieved from
+ * PHYRX_OTHER_RECEIVE_INFO TLV.
+ */
+ ppdu_info->rssi_comb =
+ u32_get_bits(info[0],
+ HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB);
+ reception_type =
+ u32_get_bits(rssi_legacy_info,
+ HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION);
+
+ switch (reception_type) {
+ case HAL_RECEPTION_TYPE_ULOFMDA:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ case HAL_RECEPTION_TYPE_ULMIMO:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ default:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ break;
+ }
+ case HAL_RXPCU_PPDU_END_INFO: {
+ struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
+ (struct hal_rx_ppdu_end_duration *)tlv_data;
+
+ info[0] = __le32_to_cpu(ppdu_rx_duration->info0);
+ ppdu_info->rx_duration =
+ u32_get_bits(info[0], HAL_RX_PPDU_END_DURATION);
+ ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
+ ppdu_info->tsft = (ppdu_info->tsft << 32) |
+ __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
+ break;
+ }
+ case HAL_RX_MPDU_START: {
+ struct hal_rx_mpdu_start *mpdu_start =
+ (struct hal_rx_mpdu_start *)tlv_data;
+ struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ u16 peer_id;
+
+ info[1] = __le32_to_cpu(mpdu_start->info1);
+ peer_id = u32_get_bits(info[1], HAL_RX_MPDU_START_INFO1_PEERID);
+ if (peer_id)
+ ppdu_info->peer_id = peer_id;
+
+ ppdu_info->mpdu_len += u32_get_bits(info[1],
+ HAL_RX_MPDU_START_INFO2_MPDU_LEN);
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ info[0] = __le32_to_cpu(mpdu_start->info0);
+ ppdu_info->userid = userid;
+ ppdu_info->ampdu_id[userid] =
+ u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID);
+ }
+
+ mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+ if (!mon_mpdu)
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+
+ break;
+ }
+ case HAL_RX_MSDU_START:
+ /* TODO: add msdu start parsing logic */
+ break;
+ case HAL_MON_BUF_ADDR: {
+ struct dp_rxdma_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
+ struct dp_mon_packet_info *packet_info =
+ (struct dp_mon_packet_info *)tlv_data;
+ int buf_id = u32_get_bits(packet_info->cookie,
+ DP_RXDMA_BUF_COOKIE_BUF_ID);
+ struct sk_buff *msdu;
+ struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ struct ath12k_skb_rxcb *rxcb;
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath12k_warn(ab, "montior destination with invalid buf_id %d\n",
+ buf_id);
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (mon_mpdu->tail)
+ mon_mpdu->tail->next = msdu;
+ else
+ mon_mpdu->tail = msdu;
+
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+
+ break;
+ }
+ case HAL_RX_MSDU_END: {
+ struct rx_msdu_end_qcn9274 *msdu_end =
+ (struct rx_msdu_end_qcn9274 *)tlv_data;
+ bool is_first_msdu_in_mpdu;
+ u16 msdu_end_info;
+
+ msdu_end_info = __le16_to_cpu(msdu_end->info5);
+ is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+ if (is_first_msdu_in_mpdu) {
+ pmon->mon_mpdu->head = pmon->mon_mpdu->tail;
+ pmon->mon_mpdu->tail = NULL;
+ }
+ break;
+ }
+ case HAL_RX_MPDU_END:
+ list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+ break;
+ case HAL_DUMMY:
+ return HAL_RX_MON_STATUS_BUF_DONE;
+ case HAL_RX_PPDU_END_STATUS_DONE:
+ case 0:
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+ default:
+ break;
+ }
+
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *msdu)
+{
+ u32 rx_pkt_offset, l2_hdr_offset;
+
+ rx_pkt_offset = ar->ab->hw_params->hal_desc_sz;
+ l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
+ (struct hal_rx_desc *)msdu->data);
+ skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
+}
+
+static struct sk_buff *
+ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
+ u32 mac_id, struct sk_buff *head_msdu,
+ struct ieee80211_rx_status *rxs, bool *fcs_err)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct sk_buff *msdu, *mpdu_buf, *prev_buf;
+ struct hal_rx_desc *rx_desc;
+ u8 *hdr_desc, *dest, decap_format;
+ struct ieee80211_hdr_3addr *wh;
+ u32 err_bitmap;
+
+ mpdu_buf = NULL;
+
+ if (!head_msdu)
+ goto err_merge_fail;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
+
+ if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
+ *fcs_err = true;
+
+ decap_format = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+
+ ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+
+ if (decap_format == DP_RX_DECAP_TYPE_RAW) {
+ ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu);
+
+ prev_buf = head_msdu;
+ msdu = head_msdu->next;
+
+ while (msdu) {
+ ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
+
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+
+ prev_buf->next = NULL;
+
+ skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
+ u8 qos_pkt = 0;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc);
+
+ /* Base size */
+ wh = (struct ieee80211_hdr_3addr *)hdr_desc;
+
+ if (ieee80211_is_data_qos(wh->frame_control))
+ qos_pkt = 1;
+
+ msdu = head_msdu;
+
+ while (msdu) {
+ ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
+ if (qos_pkt) {
+ dest = skb_push(msdu, sizeof(__le16));
+ if (!dest)
+ goto err_merge_fail;
+ memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
+ }
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+ dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
+ if (!dest)
+ goto err_merge_fail;
+
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "mpdu_buf %pK mpdu_buf->len %u",
+ prev_buf, prev_buf->len);
+ } else {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "decap format %d is not supported!\n",
+ decap_format);
+ goto err_merge_fail;
+ }
+
+ return head_msdu;
+
+err_merge_fail:
+ if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "err_merge_fail mpdu_buf %pK", mpdu_buf);
+ /* Free the head buffer */
+ dev_kfree_skb_any(mpdu_buf);
+ }
+ return NULL;
+}
+
+static void
+ath12k_dp_mon_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
+}
+
+static void
+ath12k_dp_mon_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[0];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[1];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[2];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[3];
+}
+
+static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *mon_skb,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ u8 *ptr = NULL;
+ u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
+
+ rxs->flag |= RX_FLAG_MACTIME_START;
+ rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
+ rxs->nss = ppduinfo->nss + 1;
+
+ if (ampdu_id) {
+ rxs->flag |= RX_FLAG_AMPDU_DETAILS;
+ rxs->ampdu_reference = ampdu_id;
+ }
+
+ if (ppduinfo->he_mu_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
+ ath12k_dp_mon_rx_update_radiotap_he_mu(ppduinfo, ptr);
+ } else if (ppduinfo->he_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
+ ath12k_dp_mon_rx_update_radiotap_he(ppduinfo, ptr);
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->vht_flags) {
+ rxs->encoding = RX_ENC_VHT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->ht_flags) {
+ rxs->encoding = RX_ENC_HT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else {
+ rxs->encoding = RX_ENC_LEGACY;
+ sband = &ar->mac.sbands[rxs->band];
+ rxs->rate_idx = ath12k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
+ ppduinfo->cck_flag);
+ }
+
+ rxs->mactime = ppduinfo->tsft;
+}
+
+static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_sta *pubsta = NULL;
+ struct ath12k_peer *peer;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+ bool is_mcbc = rxcb->is_mcbc;
+ bool is_eapol_tkip = rxcb->is_eapol;
+
+ if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ !(status->flag & RX_FLAG_SKIP_MONITOR)) {
+ he = skb_push(msdu, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ }
+
+ if (!(status->flag & RX_FLAG_ONLY_MONITOR))
+ decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer && peer->sta)
+ pubsta = peer->sta;
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ msdu,
+ msdu->len,
+ peer ? peer->addr : NULL,
+ rxcb->tid,
+ (is_mcbc) ? "mcast" : "ucast",
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ msdu->data, msdu->len);
+ rx_status = IEEE80211_SKB_RXCB(msdu);
+ *rx_status = *status;
+
+ /* TODO: trace rx packet */
+
+ /* PN for multicast packets are not validate in HW,
+ * so skip 802.3 rx path
+ * Also, fast_rx expectes the STA to be authorized, hence
+ * eapol packets are sent in slow path.
+ */
+ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol_tkip &&
+ !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
+ rx_status->flag |= RX_FLAG_8023;
+
+ ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+}
+
+static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
+ struct sk_buff *head_msdu,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct napi_struct *napi)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *mon_skb, *skb_next, *header;
+ struct ieee80211_rx_status *rxs = &dp->rx_status;
+ bool fcs_err = false;
+
+ mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, head_msdu,
+ rxs, &fcs_err);
+ if (!mon_skb)
+ goto mon_deliver_fail;
+
+ header = mon_skb;
+ rxs->flag = 0;
+
+ if (fcs_err)
+ rxs->flag = RX_FLAG_FAILED_FCS_CRC;
+
+ do {
+ skb_next = mon_skb->next;
+ if (!skb_next)
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (mon_skb == header) {
+ header = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
+ ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
+ ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+ mon_skb = skb_next;
+ } while (mon_skb);
+ rxs->flag = 0;
+
+ return 0;
+
+mon_deliver_fail:
+ mon_skb = head_msdu;
+ while (mon_skb) {
+ skb_next = mon_skb->next;
+ dev_kfree_skb_any(mon_skb);
+ mon_skb = skb_next;
+ }
+ return -EINVAL;
+}
+
+static enum hal_rx_mon_status
+ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon,
+ struct sk_buff *skb)
+{
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ struct hal_tlv_hdr *tlv;
+ enum hal_rx_mon_status hal_status;
+ u32 tlv_userid = 0;
+ u16 tlv_tag, tlv_len;
+ u8 *ptr = skb->data;
+
+ memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
+ tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
+ tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
+ ptr += sizeof(*tlv);
+
+ /* The actual length of PPDU_END is the combined length of many PHY
+ * TLVs that follow. Skip the TLV header and
+ * rx_rxpcu_classification_overview that follows the header to get to
+ * next TLV.
+ */
+
+ if (tlv_tag == HAL_RX_PPDU_END)
+ tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+
+ hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
+ tlv_tag, ptr, tlv_userid);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+
+ if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ break;
+
+ } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+
+ return hal_status;
+}
+
+enum hal_rx_mon_status
+ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ int mac_id,
+ struct sk_buff *skb,
+ struct napi_struct *napi)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ struct dp_mon_mpdu *tmp;
+ struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ struct sk_buff *head_msdu, *tail_msdu;
+ enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+
+ ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+ head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
+
+ if (head_msdu && tail_msdu) {
+ ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
+ ppdu_info, napi);
+ }
+
+ kfree(mon_mpdu);
+ }
+ return hal_status;
+}
+
+int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_ring *buf_ring,
+ int req_entries)
+{
+ struct hal_mon_buf_ring *mon_buf;
+ struct sk_buff *skb;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ u32 cookie, buf_id;
+
+ srng = &ab->hal.srng_list[buf_ring->refill_buf_ring.ring_id];
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (req_entries > 0) {
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE);
+ if (unlikely(!skb))
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data, DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ buf_id = idr_alloc(&buf_ring->bufs_idr, skb, 0,
+ buf_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(buf_id < 0))
+ goto fail_dma_unmap;
+
+ mon_buf = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (unlikely(!mon_buf))
+ goto fail_idr_remove;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ mon_buf->paddr_lo = cpu_to_le32(lower_32_bits(paddr));
+ mon_buf->paddr_hi = cpu_to_le32(upper_32_bits(paddr));
+ mon_buf->cookie = cpu_to_le64(cookie);
+
+ req_entries--;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ return 0;
+
+fail_idr_remove:
+ spin_lock_bh(&buf_ring->idr_lock);
+ idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ return -ENOMEM;
+}
+
+static struct dp_mon_tx_ppdu_info *
+ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
+ unsigned int ppdu_id,
+ enum dp_mon_tx_ppdu_info_type type)
+{
+ struct dp_mon_tx_ppdu_info *tx_ppdu_info;
+
+ if (type == DP_MON_TX_PROT_PPDU_INFO) {
+ tx_ppdu_info = pmon->tx_prot_ppdu_info;
+
+ if (tx_ppdu_info && !tx_ppdu_info->is_used)
+ return tx_ppdu_info;
+ kfree(tx_ppdu_info);
+ } else {
+ tx_ppdu_info = pmon->tx_data_ppdu_info;
+
+ if (tx_ppdu_info && !tx_ppdu_info->is_used)
+ return tx_ppdu_info;
+ kfree(tx_ppdu_info);
+ }
+
+ /* allocate new tx_ppdu_info */
+ tx_ppdu_info = kzalloc(sizeof(*tx_ppdu_info), GFP_ATOMIC);
+ if (!tx_ppdu_info)
+ return NULL;
+
+ tx_ppdu_info->is_used = 0;
+ tx_ppdu_info->ppdu_id = ppdu_id;
+
+ if (type == DP_MON_TX_PROT_PPDU_INFO)
+ pmon->tx_prot_ppdu_info = tx_ppdu_info;
+ else
+ pmon->tx_data_ppdu_info = tx_ppdu_info;
+
+ return tx_ppdu_info;
+}
+
+static struct dp_mon_tx_ppdu_info *
+ath12k_dp_mon_hal_tx_ppdu_info(struct ath12k_mon_data *pmon,
+ u16 tlv_tag)
+{
+ switch (tlv_tag) {
+ case HAL_TX_FES_SETUP:
+ case HAL_TX_FLUSH:
+ case HAL_PCU_PPDU_SETUP_INIT:
+ case HAL_TX_PEER_ENTRY:
+ case HAL_TX_QUEUE_EXTENSION:
+ case HAL_TX_MPDU_START:
+ case HAL_TX_MSDU_START:
+ case HAL_TX_DATA:
+ case HAL_MON_BUF_ADDR:
+ case HAL_TX_MPDU_END:
+ case HAL_TX_LAST_MPDU_FETCHED:
+ case HAL_TX_LAST_MPDU_END:
+ case HAL_COEX_TX_REQ:
+ case HAL_TX_RAW_OR_NATIVE_FRAME_SETUP:
+ case HAL_SCH_CRITICAL_TLV_REFERENCE:
+ case HAL_TX_FES_SETUP_COMPLETE:
+ case HAL_TQM_MPDU_GLOBAL_START:
+ case HAL_SCHEDULER_END:
+ case HAL_TX_FES_STATUS_USER_PPDU:
+ break;
+ case HAL_TX_FES_STATUS_PROT: {
+ if (!pmon->tx_prot_ppdu_info->is_used)
+ pmon->tx_prot_ppdu_info->is_used = true;
+
+ return pmon->tx_prot_ppdu_info;
+ }
+ }
+
+ if (!pmon->tx_data_ppdu_info->is_used)
+ pmon->tx_data_ppdu_info->is_used = true;
+
+ return pmon->tx_data_ppdu_info;
+}
+
+#define MAX_MONITOR_HEADER 512
+#define MAX_DUMMY_FRM_BODY 128
+
+struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MAX_MONITOR_HEADER + MAX_DUMMY_FRM_BODY);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MAX_MONITOR_HEADER);
+
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ skb_pull(skb, PTR_ALIGN(skb->data, 4) - skb->data);
+
+ return skb;
+}
+
+static int
+ath12k_dp_mon_tx_gen_cts2self_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_cts *cts;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ cts = (struct ieee80211_cts *)skb->data;
+ memset(cts, 0, MAX_DUMMY_FRM_BODY);
+ cts->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
+ cts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(cts->ra, tx_ppdu_info->rx_status.addr1, sizeof(cts->ra));
+
+ skb_put(skb, sizeof(*cts));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_rts_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_rts *rts;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ rts = (struct ieee80211_rts *)skb->data;
+ memset(rts, 0, MAX_DUMMY_FRM_BODY);
+ rts->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
+ rts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(rts->ra, tx_ppdu_info->rx_status.addr1, sizeof(rts->ra));
+ memcpy(rts->ta, tx_ppdu_info->rx_status.addr2, sizeof(rts->ta));
+
+ skb_put(skb, sizeof(*rts));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_3addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_qos_hdr *qhdr;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ qhdr = (struct ieee80211_qos_hdr *)skb->data;
+ memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
+ qhdr->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
+ qhdr->duration_id = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
+ memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
+ memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
+
+ skb_put(skb, sizeof(*qhdr));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_4addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct dp_mon_qosframe_addr4 *qhdr;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ qhdr = (struct dp_mon_qosframe_addr4 *)skb->data;
+ memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
+ qhdr->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
+ qhdr->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
+ memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
+ memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
+ memcpy(qhdr->addr4, tx_ppdu_info->rx_status.addr4, ETH_ALEN);
+
+ skb_put(skb, sizeof(*qhdr));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_ack_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct dp_mon_frame_min_one *fbmhdr;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ fbmhdr = (struct dp_mon_frame_min_one *)skb->data;
+ memset(fbmhdr, 0, MAX_DUMMY_FRM_BODY);
+ fbmhdr->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_CFACK);
+ memcpy(fbmhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
+
+ /* set duration zero for ack frame */
+ fbmhdr->duration = 0;
+
+ skb_put(skb, sizeof(*fbmhdr));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ int ret = 0;
+
+ switch (tx_ppdu_info->rx_status.medium_prot_type) {
+ case DP_MON_TX_MEDIUM_RTS_LEGACY:
+ case DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW:
+ case DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW:
+ ret = ath12k_dp_mon_tx_gen_rts_frame(tx_ppdu_info);
+ break;
+ case DP_MON_TX_MEDIUM_CTS2SELF:
+ ret = ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
+ break;
+ case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR:
+ ret = ath12k_dp_mon_tx_gen_3addr_qos_null_frame(tx_ppdu_info);
+ break;
+ case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR:
+ ret = ath12k_dp_mon_tx_gen_4addr_qos_null_frame(tx_ppdu_info);
+ break;
+ }
+
+ return ret;
+}
+
+static enum dp_mon_tx_tlv_status
+ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
+ struct ath12k_mon_data *pmon,
+ u16 tlv_tag, u8 *tlv_data, u32 userid)
+{
+ struct dp_mon_tx_ppdu_info *tx_ppdu_info;
+ enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ u32 info[7];
+
+ tx_ppdu_info = ath12k_dp_mon_hal_tx_ppdu_info(pmon, tlv_tag);
+
+ switch (tlv_tag) {
+ case HAL_TX_FES_SETUP: {
+ struct hal_tx_fes_setup *tx_fes_setup =
+ (struct hal_tx_fes_setup *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_fes_setup->info0);
+ tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id);
+ tx_ppdu_info->num_users =
+ u32_get_bits(info[0], HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
+ status = DP_MON_TX_FES_SETUP;
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_END: {
+ struct hal_tx_fes_status_end *tx_fes_status_end =
+ (struct hal_tx_fes_status_end *)tlv_data;
+ u32 tst_15_0, tst_31_16;
+
+ info[0] = __le32_to_cpu(tx_fes_status_end->info0);
+ tst_15_0 =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0);
+ tst_31_16 =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16);
+
+ tx_ppdu_info->rx_status.ppdu_ts = (tst_15_0 | (tst_31_16 << 16));
+ status = DP_MON_TX_FES_STATUS_END;
+ break;
+ }
+
+ case HAL_RX_RESPONSE_REQUIRED_INFO: {
+ struct hal_rx_resp_req_info *rx_resp_req_info =
+ (struct hal_rx_resp_req_info *)tlv_data;
+ u32 addr_32;
+ u16 addr_16;
+
+ info[0] = __le32_to_cpu(rx_resp_req_info->info0);
+ info[1] = __le32_to_cpu(rx_resp_req_info->info1);
+ info[2] = __le32_to_cpu(rx_resp_req_info->info2);
+ info[3] = __le32_to_cpu(rx_resp_req_info->info3);
+ info[4] = __le32_to_cpu(rx_resp_req_info->info4);
+ info[5] = __le32_to_cpu(rx_resp_req_info->info5);
+
+ tx_ppdu_info->rx_status.ppdu_id =
+ u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_PPDU_ID);
+ tx_ppdu_info->rx_status.reception_type =
+ u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE);
+ tx_ppdu_info->rx_status.rx_duration =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_DURATION);
+ tx_ppdu_info->rx_status.mcs =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_RATE_MCS);
+ tx_ppdu_info->rx_status.sgi =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_SGI);
+ tx_ppdu_info->rx_status.is_stbc =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_STBC);
+ tx_ppdu_info->rx_status.ldpc =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_LDPC);
+ tx_ppdu_info->rx_status.is_ampdu =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_IS_AMPDU);
+ tx_ppdu_info->rx_status.num_users =
+ u32_get_bits(info[2], HAL_RX_RESP_REQ_INFO2_NUM_USER);
+
+ addr_32 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO3_ADDR1_31_0);
+ addr_16 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO4_ADDR1_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
+
+ addr_16 = u32_get_bits(info[4], HAL_RX_RESP_REQ_INFO4_ADDR1_15_0);
+ addr_32 = u32_get_bits(info[5], HAL_RX_RESP_REQ_INFO5_ADDR1_47_16);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
+
+ if (tx_ppdu_info->rx_status.reception_type == 0)
+ ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
+ status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
+ break;
+ }
+
+ case HAL_PCU_PPDU_SETUP_INIT: {
+ struct hal_tx_pcu_ppdu_setup_init *ppdu_setup =
+ (struct hal_tx_pcu_ppdu_setup_init *)tlv_data;
+ u32 addr_32;
+ u16 addr_16;
+
+ info[0] = __le32_to_cpu(ppdu_setup->info0);
+ info[1] = __le32_to_cpu(ppdu_setup->info1);
+ info[2] = __le32_to_cpu(ppdu_setup->info2);
+ info[3] = __le32_to_cpu(ppdu_setup->info3);
+ info[4] = __le32_to_cpu(ppdu_setup->info4);
+ info[5] = __le32_to_cpu(ppdu_setup->info5);
+ info[6] = __le32_to_cpu(ppdu_setup->info6);
+
+ /* protection frame address 1 */
+ addr_32 = u32_get_bits(info[1],
+ HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0);
+ addr_16 = u32_get_bits(info[2],
+ HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
+
+ /* protection frame address 2 */
+ addr_16 = u32_get_bits(info[2],
+ HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0);
+ addr_32 = u32_get_bits(info[3],
+ HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
+
+ /* protection frame address 3 */
+ addr_32 = u32_get_bits(info[4],
+ HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0);
+ addr_16 = u32_get_bits(info[5],
+ HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr3);
+
+ /* protection frame address 4 */
+ addr_16 = u32_get_bits(info[5],
+ HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0);
+ addr_32 = u32_get_bits(info[6],
+ HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr4);
+
+ status = u32_get_bits(info[0],
+ HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE);
+ break;
+ }
+
+ case HAL_TX_QUEUE_EXTENSION: {
+ struct hal_tx_queue_exten *tx_q_exten =
+ (struct hal_tx_queue_exten *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_q_exten->info0);
+
+ tx_ppdu_info->rx_status.frame_control =
+ u32_get_bits(info[0],
+ HAL_TX_Q_EXT_INFO0_FRAME_CTRL);
+ tx_ppdu_info->rx_status.fc_valid = true;
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_START: {
+ struct hal_tx_fes_status_start *tx_fes_start =
+ (struct hal_tx_fes_status_start *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_fes_start->info0);
+
+ tx_ppdu_info->rx_status.medium_prot_type =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE);
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_PROT: {
+ struct hal_tx_fes_status_prot *tx_fes_status =
+ (struct hal_tx_fes_status_prot *)tlv_data;
+ u32 start_timestamp;
+ u32 end_timestamp;
+
+ info[0] = __le32_to_cpu(tx_fes_status->info0);
+ info[1] = __le32_to_cpu(tx_fes_status->info1);
+
+ start_timestamp =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0);
+ start_timestamp |=
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16) << 15;
+ end_timestamp =
+ u32_get_bits(info[1],
+ HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0);
+ end_timestamp |=
+ u32_get_bits(info[1],
+ HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16) << 15;
+ tx_ppdu_info->rx_status.rx_duration = end_timestamp - start_timestamp;
+
+ ath12k_dp_mon_tx_gen_prot_frame(tx_ppdu_info);
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_START_PPDU:
+ case HAL_TX_FES_STATUS_START_PROT: {
+ struct hal_tx_fes_status_start_prot *tx_fes_stat_start =
+ (struct hal_tx_fes_status_start_prot *)tlv_data;
+ u64 ppdu_ts;
+
+ info[0] = __le32_to_cpu(tx_fes_stat_start->info0);
+
+ tx_ppdu_info->rx_status.ppdu_ts =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32);
+ ppdu_ts = (u32_get_bits(info[1],
+ HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32));
+ tx_ppdu_info->rx_status.ppdu_ts |= ppdu_ts << 32;
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_USER_PPDU: {
+ struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu =
+ (struct hal_tx_fes_status_user_ppdu *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0);
+
+ tx_ppdu_info->rx_status.rx_duration =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION);
+ break;
+ }
+
+ case HAL_MACTX_HE_SIG_A_SU:
+ ath12k_dp_mon_parse_he_sig_su(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_A_MU_DL:
+ ath12k_dp_mon_parse_he_sig_mu(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_B1_MU:
+ ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_B2_MU:
+ ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_B2_OFDMA:
+ ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_VHT_SIG_A:
+ ath12k_dp_mon_parse_vht_sig_a(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_L_SIG_A:
+ ath12k_dp_mon_parse_l_sig_a(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_L_SIG_B:
+ ath12k_dp_mon_parse_l_sig_b(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_RX_FRAME_BITMAP_ACK: {
+ struct hal_rx_frame_bitmap_ack *fbm_ack =
+ (struct hal_rx_frame_bitmap_ack *)tlv_data;
+ u32 addr_32;
+ u16 addr_16;
+
+ info[0] = __le32_to_cpu(fbm_ack->info0);
+ info[1] = __le32_to_cpu(fbm_ack->info1);
+
+ addr_32 = u32_get_bits(info[0],
+ HAL_RX_FBM_ACK_INFO0_ADDR1_31_0);
+ addr_16 = u32_get_bits(info[1],
+ HAL_RX_FBM_ACK_INFO1_ADDR1_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
+
+ ath12k_dp_mon_tx_gen_ack_frame(tx_ppdu_info);
+ break;
+ }
+
+ case HAL_MACTX_PHY_DESC: {
+ struct hal_tx_phy_desc *tx_phy_desc =
+ (struct hal_tx_phy_desc *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_phy_desc->info0);
+ info[1] = __le32_to_cpu(tx_phy_desc->info1);
+ info[2] = __le32_to_cpu(tx_phy_desc->info2);
+ info[3] = __le32_to_cpu(tx_phy_desc->info3);
+
+ tx_ppdu_info->rx_status.beamformed =
+ u32_get_bits(info[0],
+ HAL_TX_PHY_DESC_INFO0_BF_TYPE);
+ tx_ppdu_info->rx_status.preamble_type =
+ u32_get_bits(info[0],
+ HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B);
+ tx_ppdu_info->rx_status.mcs =
+ u32_get_bits(info[1],
+ HAL_TX_PHY_DESC_INFO1_MCS);
+ tx_ppdu_info->rx_status.ltf_size =
+ u32_get_bits(info[3],
+ HAL_TX_PHY_DESC_INFO3_LTF_SIZE);
+ tx_ppdu_info->rx_status.nss =
+ u32_get_bits(info[2],
+ HAL_TX_PHY_DESC_INFO2_NSS);
+ tx_ppdu_info->rx_status.chan_num =
+ u32_get_bits(info[3],
+ HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL);
+ tx_ppdu_info->rx_status.bw =
+ u32_get_bits(info[0],
+ HAL_TX_PHY_DESC_INFO0_BANDWIDTH);
+ break;
+ }
+
+ case HAL_TX_MPDU_START: {
+ struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
+
+ mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+ if (!mon_mpdu)
+ return DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ status = DP_MON_TX_MPDU_START;
+ break;
+ }
+
+ case HAL_MON_BUF_ADDR: {
+ struct dp_rxdma_ring *buf_ring = &ab->dp.tx_mon_buf_ring;
+ struct dp_mon_packet_info *packet_info =
+ (struct dp_mon_packet_info *)tlv_data;
+ int buf_id = u32_get_bits(packet_info->cookie,
+ DP_RXDMA_BUF_COOKIE_BUF_ID);
+ struct sk_buff *msdu;
+ struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
+ struct ath12k_skb_rxcb *rxcb;
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath12k_warn(ab, "montior destination with invalid buf_id %d\n",
+ buf_id);
+ return DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (!mon_mpdu->head)
+ mon_mpdu->head = msdu;
+ else if (mon_mpdu->tail)
+ mon_mpdu->tail->next = msdu;
+
+ mon_mpdu->tail = msdu;
+
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+ status = DP_MON_TX_BUFFER_ADDR;
+ break;
+ }
+
+ case HAL_TX_MPDU_END:
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+ break;
+ }
+
+ return status;
+}
+
+enum dp_mon_tx_tlv_status
+ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
+ struct hal_tlv_hdr *tx_tlv,
+ u8 *num_users)
+{
+ u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ u32 info0;
+
+ switch (tlv_tag) {
+ case HAL_TX_FES_SETUP: {
+ struct hal_tx_fes_setup *tx_fes_setup =
+ (struct hal_tx_fes_setup *)tx_tlv;
+
+ info0 = __le32_to_cpu(tx_fes_setup->info0);
+
+ *num_users = u32_get_bits(info0, HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
+ tlv_status = DP_MON_TX_FES_SETUP;
+ break;
+ }
+
+ case HAL_RX_RESPONSE_REQUIRED_INFO: {
+ /* TODO: need to update *num_users */
+ tlv_status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
+ break;
+ }
+ }
+
+ return tlv_status;
+}
+
+static void
+ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi,
+ struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct dp_mon_mpdu *tmp, *mon_mpdu;
+ struct sk_buff *head_msdu;
+
+ list_for_each_entry_safe(mon_mpdu, tmp,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+ head_msdu = mon_mpdu->head;
+
+ if (head_msdu)
+ ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
+ &tx_ppdu_info->rx_status, napi);
+
+ kfree(mon_mpdu);
+ }
+}
+
+enum hal_rx_mon_status
+ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ int mac_id,
+ struct sk_buff *skb,
+ struct napi_struct *napi,
+ u32 ppdu_id)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info, *tx_data_ppdu_info;
+ struct hal_tlv_hdr *tlv;
+ u8 *ptr = skb->data;
+ u16 tlv_tag;
+ u16 tlv_len;
+ u32 tlv_userid = 0;
+ u8 num_user;
+ u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
+
+ tx_prot_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
+ DP_MON_TX_PROT_PPDU_INFO);
+ if (!tx_prot_ppdu_info)
+ return -ENOMEM;
+
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
+
+ tlv_status = ath12k_dp_mon_tx_status_get_num_user(tlv_tag, tlv, &num_user);
+ if (tlv_status == DP_MON_TX_STATUS_PPDU_NOT_DONE || !num_user)
+ return -EINVAL;
+
+ tx_data_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
+ DP_MON_TX_DATA_PPDU_INFO);
+ if (!tx_data_ppdu_info)
+ return -ENOMEM;
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
+ tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
+ tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
+
+ tlv_status = ath12k_dp_mon_tx_parse_status_tlv(ab, pmon,
+ tlv_tag, ptr,
+ tlv_userid);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+ if ((ptr - skb->data) >= DP_TX_MONITOR_BUF_SIZE)
+ break;
+ } while (tlv_status != DP_MON_TX_FES_STATUS_END);
+
+ ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info);
+ ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info);
+
+ return tlv_status;
+}
+
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
+ enum dp_monitor_mode monitor_mode,
+ struct napi_struct *napi)
+{
+ struct hal_mon_dest_desc *mon_dst_desc;
+ struct ath12k_pdev_dp *pdev_dp = &ar->dp;
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb;
+ struct dp_srng *mon_dst_ring;
+ struct hal_srng *srng;
+ struct dp_rxdma_ring *buf_ring;
+ u64 cookie;
+ u32 ppdu_id;
+ int num_buffs_reaped = 0, srng_id, buf_id;
+ u8 dest_idx = 0, i;
+ bool end_of_ppdu;
+ struct hal_rx_mon_ppdu_info *ppdu_info;
+ struct ath12k_peer *peer = NULL;
+
+ ppdu_info = &pmon->mon_ppdu_info;
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+
+ if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) {
+ mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
+ buf_ring = &dp->rxdma_mon_buf_ring;
+ } else {
+ mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id];
+ buf_ring = &dp->tx_mon_buf_ring;
+ }
+
+ srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
+
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (likely(*budget)) {
+ *budget -= 1;
+ mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
+ if (unlikely(!mon_dst_desc))
+ break;
+
+ cookie = le32_to_cpu(mon_dst_desc->cookie);
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ skb = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!skb)) {
+ ath12k_warn(ab, "montior destination with invalid buf_id %d\n",
+ buf_id);
+ goto move_next;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ pmon->dest_skb_q[dest_idx] = skb;
+ dest_idx++;
+ ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id);
+ end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
+ HAL_MON_DEST_INFO0_END_OF_PPDU);
+ if (!end_of_ppdu)
+ continue;
+
+ for (i = 0; i < dest_idx; i++) {
+ skb = pmon->dest_skb_q[i];
+
+ if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
+ ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id,
+ skb, napi);
+ else
+ ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id,
+ skb, napi, ppdu_id);
+
+ peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ dev_kfree_skb_any(skb);
+ pmon->dest_skb_q[i] = NULL;
+ }
+
+ dest_idx = 0;
+move_next:
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+ ath12k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+static void
+ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *user_stats,
+ u32 num_msdu)
+{
+ u32 rate_idx = 0;
+ u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs;
+ u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1;
+ u32 bw_idx = ppdu_info->bw;
+ u32 gi_idx = ppdu_info->gi;
+
+ if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) ||
+ (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) {
+ return;
+ }
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) {
+ rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx;
+ rate_idx += bw_idx * 2 + gi_idx;
+ } else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) {
+ gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi);
+ rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx;
+ rate_idx += bw_idx * 3 + gi_idx;
+ } else {
+ return;
+ }
+
+ rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu;
+ if (user_stats)
+ rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count;
+ else
+ rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len;
+}
+
+static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
+ struct ath12k_sta *arsta,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ u32 num_msdu;
+
+ if (!rx_stats)
+ return;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+
+ num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
+ ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
+
+ rx_stats->num_msdu += num_msdu;
+ rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
+ ppdu_info->tcp_ack_msdu_count;
+ rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
+ rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
+ ppdu_info->nss = 1;
+ ppdu_info->mcs = HAL_RX_MAX_MCS;
+ ppdu_info->tid = IEEE80211_NUM_TIDS;
+ }
+
+ if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
+ rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
+
+ if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
+ rx_stats->tid_count[ppdu_info->tid] += num_msdu;
+
+ if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
+ rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
+
+ if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
+ rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
+
+ if (ppdu_info->is_stbc)
+ rx_stats->stbc_count += num_msdu;
+
+ if (ppdu_info->beamformed)
+ rx_stats->beamformed_count += num_msdu;
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1)
+ rx_stats->ampdu_msdu_count += num_msdu;
+ else
+ rx_stats->non_ampdu_msdu_count += num_msdu;
+
+ rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
+ rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
+ rx_stats->dcm_count += ppdu_info->dcm;
+
+ rx_stats->rx_duration += ppdu_info->rx_duration;
+ arsta->rx_duration = rx_stats->rx_duration;
+
+ if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) {
+ rx_stats->pkt_stats.nss_count[ppdu_info->nss - 1] += num_msdu;
+ rx_stats->byte_stats.nss_count[ppdu_info->nss - 1] += ppdu_info->mpdu_len;
+ }
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N &&
+ ppdu_info->mcs <= HAL_RX_MAX_MCS_HT) {
+ rx_stats->pkt_stats.ht_mcs_count[ppdu_info->mcs] += num_msdu;
+ rx_stats->byte_stats.ht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+ /* To fit into rate table for HT packets */
+ ppdu_info->mcs = ppdu_info->mcs % 8;
+ }
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC &&
+ ppdu_info->mcs <= HAL_RX_MAX_MCS_VHT) {
+ rx_stats->pkt_stats.vht_mcs_count[ppdu_info->mcs] += num_msdu;
+ rx_stats->byte_stats.vht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+ }
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX &&
+ ppdu_info->mcs <= HAL_RX_MAX_MCS_HE) {
+ rx_stats->pkt_stats.he_mcs_count[ppdu_info->mcs] += num_msdu;
+ rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+ }
+
+ if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) &&
+ ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) {
+ rx_stats->pkt_stats.legacy_count[ppdu_info->rate] += num_msdu;
+ rx_stats->byte_stats.legacy_count[ppdu_info->rate] += ppdu_info->mpdu_len;
+ }
+
+ if (ppdu_info->gi < HAL_RX_GI_MAX) {
+ rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu;
+ rx_stats->byte_stats.gi_count[ppdu_info->gi] += ppdu_info->mpdu_len;
+ }
+
+ if (ppdu_info->bw < HAL_RX_BW_MAX) {
+ rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu;
+ rx_stats->byte_stats.bw_count[ppdu_info->bw] += ppdu_info->mpdu_len;
+ }
+
+ ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info,
+ NULL, num_msdu);
+}
+
+void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_user_status *rx_user_status;
+ u32 num_users, i, mu_ul_user_v0_word0, mu_ul_user_v0_word1, ru_size;
+
+ if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
+ return;
+
+ num_users = ppdu_info->num_users;
+ if (num_users > HAL_MAX_UL_MU_USERS)
+ num_users = HAL_MAX_UL_MU_USERS;
+
+ for (i = 0; i < num_users; i++) {
+ rx_user_status = &ppdu_info->userstats[i];
+ mu_ul_user_v0_word0 =
+ rx_user_status->ul_ofdma_user_v0_word0;
+ mu_ul_user_v0_word1 =
+ rx_user_status->ul_ofdma_user_v0_word1;
+
+ if (u32_get_bits(mu_ul_user_v0_word0,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID) &&
+ !u32_get_bits(mu_ul_user_v0_word0,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER)) {
+ rx_user_status->mcs =
+ u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS);
+ rx_user_status->nss =
+ u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS) + 1;
+
+ rx_user_status->ofdma_info_valid = 1;
+ rx_user_status->ul_ofdma_ru_start_index =
+ u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START);
+
+ ru_size = u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE);
+ rx_user_status->ul_ofdma_ru_width = ru_size;
+ rx_user_status->ul_ofdma_ru_size = ru_size;
+ }
+ rx_user_status->ldpc = u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC);
+ }
+ ppdu_info->ldpc = 1;
+}
+
+static void
+ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ u32 uid)
+{
+ struct ath12k_sta *arsta = NULL;
+ struct ath12k_rx_peer_stats *rx_stats = NULL;
+ struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
+ struct ath12k_peer *peer;
+ u32 num_msdu;
+
+ if (user_stats->ast_index == 0 || user_stats->ast_index == 0xFFFF)
+ return;
+
+ peer = ath12k_peer_find_by_ast(ar->ab, user_stats->ast_index);
+
+ if (!peer) {
+ ath12k_warn(ar->ab, "peer ast idx %d can't be found\n",
+ user_stats->ast_index);
+ return;
+ }
+
+ arsta = (struct ath12k_sta *)peer->sta->drv_priv;
+ rx_stats = arsta->rx_stats;
+
+ if (!rx_stats)
+ return;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+
+ num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
+ user_stats->udp_msdu_count + user_stats->other_msdu_count;
+
+ rx_stats->num_msdu += num_msdu;
+ rx_stats->tcp_msdu_count += user_stats->tcp_msdu_count +
+ user_stats->tcp_ack_msdu_count;
+ rx_stats->udp_msdu_count += user_stats->udp_msdu_count;
+ rx_stats->other_msdu_count += user_stats->other_msdu_count;
+
+ if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
+ rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
+
+ if (user_stats->tid <= IEEE80211_NUM_TIDS)
+ rx_stats->tid_count[user_stats->tid] += num_msdu;
+
+ if (user_stats->preamble_type < HAL_RX_PREAMBLE_MAX)
+ rx_stats->pream_cnt[user_stats->preamble_type] += num_msdu;
+
+ if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
+ rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
+
+ if (ppdu_info->is_stbc)
+ rx_stats->stbc_count += num_msdu;
+
+ if (ppdu_info->beamformed)
+ rx_stats->beamformed_count += num_msdu;
+
+ if (user_stats->mpdu_cnt_fcs_ok > 1)
+ rx_stats->ampdu_msdu_count += num_msdu;
+ else
+ rx_stats->non_ampdu_msdu_count += num_msdu;
+
+ rx_stats->num_mpdu_fcs_ok += user_stats->mpdu_cnt_fcs_ok;
+ rx_stats->num_mpdu_fcs_err += user_stats->mpdu_cnt_fcs_err;
+ rx_stats->dcm_count += ppdu_info->dcm;
+ if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO)
+ rx_stats->ru_alloc_cnt[user_stats->ul_ofdma_ru_size] += num_msdu;
+
+ rx_stats->rx_duration += ppdu_info->rx_duration;
+ arsta->rx_duration = rx_stats->rx_duration;
+
+ if (user_stats->nss > 0 && user_stats->nss <= HAL_RX_MAX_NSS) {
+ rx_stats->pkt_stats.nss_count[user_stats->nss - 1] += num_msdu;
+ rx_stats->byte_stats.nss_count[user_stats->nss - 1] +=
+ user_stats->mpdu_ok_byte_count;
+ }
+
+ if (user_stats->preamble_type == HAL_RX_PREAMBLE_11AX &&
+ user_stats->mcs <= HAL_RX_MAX_MCS_HE) {
+ rx_stats->pkt_stats.he_mcs_count[user_stats->mcs] += num_msdu;
+ rx_stats->byte_stats.he_mcs_count[user_stats->mcs] +=
+ user_stats->mpdu_ok_byte_count;
+ }
+
+ if (ppdu_info->gi < HAL_RX_GI_MAX) {
+ rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu;
+ rx_stats->byte_stats.gi_count[ppdu_info->gi] +=
+ user_stats->mpdu_ok_byte_count;
+ }
+
+ if (ppdu_info->bw < HAL_RX_BW_MAX) {
+ rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu;
+ rx_stats->byte_stats.bw_count[ppdu_info->bw] +=
+ user_stats->mpdu_ok_byte_count;
+ }
+
+ ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info,
+ user_stats, num_msdu);
+}
+
+static void
+ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 num_users, i;
+
+ num_users = ppdu_info->num_users;
+ if (num_users > HAL_MAX_UL_MU_USERS)
+ num_users = HAL_MAX_UL_MU_USERS;
+
+ for (i = 0; i < num_users; i++)
+ ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i);
+}
+
+int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi, int *budget)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev_dp *pdev_dp = &ar->dp;
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_mon_dest_desc *mon_dst_desc;
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb;
+ struct dp_srng *mon_dst_ring;
+ struct hal_srng *srng;
+ struct dp_rxdma_ring *buf_ring;
+ struct ath12k_sta *arsta = NULL;
+ struct ath12k_peer *peer;
+ u64 cookie;
+ int num_buffs_reaped = 0, srng_id, buf_id;
+ u8 dest_idx = 0, i;
+ bool end_of_ppdu;
+ u32 hal_status;
+
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+ mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
+ buf_ring = &dp->rxdma_mon_buf_ring;
+
+ srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (likely(*budget)) {
+ *budget -= 1;
+ mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
+ if (unlikely(!mon_dst_desc))
+ break;
+ cookie = le32_to_cpu(mon_dst_desc->cookie);
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ skb = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!skb)) {
+ ath12k_warn(ab, "montior destination with invalid buf_id %d\n",
+ buf_id);
+ goto move_next;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ pmon->dest_skb_q[dest_idx] = skb;
+ dest_idx++;
+ end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
+ HAL_MON_DEST_INFO0_END_OF_PPDU);
+ if (!end_of_ppdu)
+ continue;
+
+ for (i = 0; i < dest_idx; i++) {
+ skb = pmon->dest_skb_q[i];
+ hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
+ arsta = (struct ath12k_sta *)peer->sta->drv_priv;
+ ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
+ ppdu_info);
+ } else if ((ppdu_info->fc_valid) &&
+ (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
+ ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
+ ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+ }
+
+ dest_idx = 0;
+move_next:
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+ ath12k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ return num_buffs_reaped;
+}
+
+int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget,
+ enum dp_monitor_mode monitor_mode)
+{
+ struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id);
+ int num_buffs_reaped = 0;
+
+ if (!ar->monitor_started)
+ ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget);
+ else
+ num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget,
+ monitor_mode, napi);
+
+ return num_buffs_reaped;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h
new file mode 100644
index 000000000000..c18c385798a1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_DP_MON_H
+#define ATH12K_DP_MON_H
+
+#include "core.h"
+
+enum dp_monitor_mode {
+ ATH12K_DP_TX_MONITOR_MODE,
+ ATH12K_DP_RX_MONITOR_MODE
+};
+
+enum dp_mon_tx_ppdu_info_type {
+ DP_MON_TX_PROT_PPDU_INFO,
+ DP_MON_TX_DATA_PPDU_INFO
+};
+
+enum dp_mon_tx_tlv_status {
+ DP_MON_TX_FES_SETUP,
+ DP_MON_TX_FES_STATUS_END,
+ DP_MON_RX_RESPONSE_REQUIRED_INFO,
+ DP_MON_RESPONSE_END_STATUS_INFO,
+ DP_MON_TX_MPDU_START,
+ DP_MON_TX_MSDU_START,
+ DP_MON_TX_BUFFER_ADDR,
+ DP_MON_TX_DATA,
+ DP_MON_TX_STATUS_PPDU_NOT_DONE,
+};
+
+enum dp_mon_tx_medium_protection_type {
+ DP_MON_TX_MEDIUM_NO_PROTECTION,
+ DP_MON_TX_MEDIUM_RTS_LEGACY,
+ DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW,
+ DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW,
+ DP_MON_TX_MEDIUM_CTS2SELF,
+ DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR,
+ DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR
+};
+
+struct dp_mon_qosframe_addr4 {
+ __le16 frame_control;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed;
+
+struct dp_mon_frame_min_one {
+ __le16 frame_control;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+} __packed;
+
+struct dp_mon_packet_info {
+ u64 cookie;
+ u16 dma_length;
+ bool msdu_continuation;
+ bool truncated;
+};
+
+struct dp_mon_tx_ppdu_info {
+ u32 ppdu_id;
+ u8 num_users;
+ bool is_used;
+ struct hal_rx_mon_ppdu_info rx_status;
+ struct list_head dp_tx_mon_mpdu_list;
+ struct dp_mon_mpdu *tx_mon_mpdu;
+};
+
+enum hal_rx_mon_status
+ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ int mac_id, struct sk_buff *skb,
+ struct napi_struct *napi);
+int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_ring *buf_ring,
+ int req_entries);
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id,
+ int *budget, enum dp_monitor_mode monitor_mode,
+ struct napi_struct *napi);
+int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget,
+ enum dp_monitor_mode monitor_mode);
+struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void);
+enum dp_mon_tx_tlv_status
+ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
+ struct hal_tlv_hdr *tx_tlv,
+ u8 *num_users);
+enum hal_rx_mon_status
+ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ int mac_id,
+ struct sk_buff *skb,
+ struct napi_struct *napi,
+ u32 ppdu_id);
+void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info);
+int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi, int *budget);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
new file mode 100644
index 000000000000..83a43ad48c51
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -0,0 +1,4234 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/ieee80211.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <crypto/hash.h>
+#include "core.h"
+#include "debug.h"
+#include "hal_desc.h"
+#include "hw.h"
+#include "dp_rx.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "peer.h"
+#include "dp_mon.h"
+
+#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+
+static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
+}
+
+u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
+}
+
+static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
+}
+
+static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
+}
+
+static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
+}
+
+static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ return ieee80211_has_morefrags(hdr->frame_control);
+}
+
+static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+}
+
+static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
+}
+
+static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
+}
+
+static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
+}
+
+static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
+}
+
+static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
+}
+
+u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
+}
+
+static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
+}
+
+static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
+}
+
+static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
+}
+
+static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
+}
+
+static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
+}
+
+static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
+}
+
+static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
+}
+
+static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
+}
+
+static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
+}
+
+u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
+}
+
+static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
+}
+
+static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
+}
+
+static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
+ struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
+}
+
+static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
+ struct hal_rx_desc *desc,
+ u16 len)
+{
+ ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
+}
+
+static bool ath12k_dp_rx_h_is_mcbc(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_is_mcbc(desc);
+}
+
+static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
+}
+
+static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
+}
+
+static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
+ struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
+}
+
+static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
+ struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
+}
+
+static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
+}
+
+static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
+{
+ int i, reaped = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
+
+ do {
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
+ reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
+ DP_MON_SERVICE_BUDGET,
+ ATH12K_DP_RX_MONITOR_MODE);
+
+ /* nothing more to reap */
+ if (reaped < DP_MON_SERVICE_BUDGET)
+ return 0;
+
+ } while (time_before(jiffies, timeout));
+
+ ath12k_warn(ab, "dp mon ring purge timeout");
+
+ return -ETIMEDOUT;
+}
+
+/* Returns number of Rx buffers replenished */
+int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ bool hw_cc)
+{
+ struct ath12k_buffer_addr *desc;
+ struct hal_srng *srng;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+ struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_rx_desc_info *rx_desc;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ if (hw_cc) {
+ spin_lock_bh(&dp->rx_desc_lock);
+
+ /* Get desc from free list and store in used list
+ * for cleanup purposes
+ *
+ * TODO: pass the removed descs rather than
+ * add/read to optimize
+ */
+ rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
+ struct ath12k_rx_desc_info,
+ list);
+ if (!rx_desc) {
+ spin_unlock_bh(&dp->rx_desc_lock);
+ goto fail_dma_unmap;
+ }
+
+ rx_desc->skb = skb;
+ cookie = rx_desc->cookie;
+ list_del(&rx_desc->list);
+ list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
+
+ spin_unlock_bh(&dp->rx_desc_lock);
+ } else {
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id < 0)
+ goto fail_dma_unmap;
+ cookie = u32_encode_bits(mac_id,
+ DP_RXDMA_BUF_COOKIE_PDEV_ID) |
+ u32_encode_bits(buf_id,
+ DP_RXDMA_BUF_COOKIE_BUF_ID);
+ }
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_buf_unassign;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ num_remain--;
+
+ ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_buf_unassign:
+ if (hw_cc) {
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_del(&rx_desc->list);
+ list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
+ rx_desc->skb = NULL;
+ spin_unlock_bh(&dp->rx_desc_lock);
+ } else {
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ }
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct sk_buff *skb;
+ int buf_id;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* TODO: Understand where internal driver does this dma_unmap
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ return 0;
+}
+
+static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+
+ rx_ring = &dp->tx_mon_buf_ring;
+ ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+
+ return 0;
+}
+
+static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
+ struct dp_rxdma_ring *rx_ring,
+ u32 ringtype)
+{
+ int num_entries;
+
+ num_entries = rx_ring->refill_buf_ring.size /
+ ath12k_hal_srng_get_entrysize(ab, ringtype);
+
+ rx_ring->bufs_max = num_entries;
+ if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))
+ ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
+ else
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries,
+ ab->hw_params->hal_params->rx_buf_rbm,
+ ringtype == HAL_RXDMA_BUF);
+ return 0;
+}
+
+static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int ret;
+
+ ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
+ HAL_RXDMA_BUF);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to setup HAL_RXDMA_BUF\n");
+ return ret;
+ }
+
+ if (ab->hw_params->rxdma1_enable) {
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
+ HAL_RXDMA_MONITOR_BUF);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ rx_ring = &dp->tx_mon_buf_ring;
+ ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
+ HAL_TX_MONITOR_BUF);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to setup HAL_TX_MONITOR_BUF\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct ath12k_base *ab = ar->ab;
+ int i;
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
+ ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
+ }
+}
+
+void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+ ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
+}
+
+int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int ret;
+ int i;
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
+ HAL_REO_DST, i, 0,
+ DP_REO_DST_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup reo_dst_ring\n");
+ goto err_reo_cleanup;
+ }
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
+
+ return ret;
+}
+
+static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct ath12k_base *ab = ar->ab;
+ int i;
+ int ret;
+ u32 mac_id = dp->mac_id;
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ret = ath12k_dp_srng_setup(ar->ab,
+ &dp->rxdma_mon_dst_ring[i],
+ HAL_RXDMA_MONITOR_DST,
+ 0, mac_id + i,
+ DP_RXDMA_MONITOR_DST_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DST\n");
+ return ret;
+ }
+
+ ret = ath12k_dp_srng_setup(ar->ab,
+ &dp->tx_mon_dst_ring[i],
+ HAL_TX_MONITOR_DST,
+ 0, mac_id + i,
+ DP_TX_MONITOR_DEST_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to setup HAL_TX_MONITOR_DST\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
+ struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ list_del(&cmd->list);
+ dma_unmap_single(ab->dev, cmd->data.paddr,
+ cmd->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd->data.vaddr);
+ kfree(cmd);
+ }
+
+ list_for_each_entry_safe(cmd_cache, tmp_cache,
+ &dp->reo_cmd_cache_flush_list, list) {
+ list_del(&cmd_cache->list);
+ dp->reo_cmd_cache_flush_count--;
+ dma_unmap_single(ab->dev, cmd_cache->data.paddr,
+ cmd_cache->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd_cache->data.vaddr);
+ kfree(cmd_cache);
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+}
+
+static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct ath12k_dp_rx_tid *rx_tid = ctx;
+
+ if (status != HAL_REO_CMD_SUCCESS)
+ ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
+ rx_tid->tid, status);
+
+ dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+}
+
+static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status))
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp_rx_reo_cmd *dp_cmd;
+ struct hal_srng *cmd_ring;
+ int cmd_num;
+
+ cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+
+ /* cmd_num should start from 1, during failure return the error code */
+ if (cmd_num < 0)
+ return cmd_num;
+
+ /* reo cmd ring descriptors has cmd_num starting from 1 */
+ if (cmd_num == 0)
+ return -EINVAL;
+
+ if (!cb)
+ return 0;
+
+ /* Can this be optimized so that we keep the pending command list only
+ * for tid delete command to free up the resource on the command status
+ * indication?
+ */
+ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
+
+ if (!dp_cmd)
+ return -ENOMEM;
+
+ memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
+ dp_cmd->cmd_num = cmd_num;
+ dp_cmd->handler = cb;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return 0;
+}
+
+static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid *rx_tid)
+{
+ struct ath12k_hal_reo_cmd cmd = {0};
+ unsigned long tot_desc_sz, desc_sz;
+ int ret;
+
+ tot_desc_sz = rx_tid->size;
+ desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+ while (tot_desc_sz > desc_sz) {
+ tot_desc_sz -= desc_sz;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE, &cmd,
+ NULL);
+ if (ret)
+ ath12k_warn(ab,
+ "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE,
+ &cmd, ath12k_dp_reo_cmd_free);
+ if (ret) {
+ ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+ }
+}
+
+static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_dp_rx_tid *rx_tid = ctx;
+ struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
+
+ if (status == HAL_REO_CMD_DRAIN) {
+ goto free_desc;
+ } else if (status != HAL_REO_CMD_SUCCESS) {
+ /* Shouldn't happen! Cleanup in case of other failure? */
+ ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
+ rx_tid->tid, status);
+ return;
+ }
+
+ elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+ if (!elem)
+ goto free_desc;
+
+ elem->ts = jiffies;
+ memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ dp->reo_cmd_cache_flush_count++;
+
+ /* Flush and invalidate aged REO desc from HW cache */
+ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
+ list) {
+ if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
+ time_after(jiffies, elem->ts +
+ msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
+ list_del(&elem->list);
+ dp->reo_cmd_cache_flush_count--;
+
+ /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
+ * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
+ * is used in only two contexts, one is in this function called
+ * from napi and the other in ath12k_dp_free during core destroy.
+ * Before dp_free, the irqs would be disabled and would wait to
+ * synchronize. Hence there wouldn’t be any race against add or
+ * delete to this list. Hence unlock-lock is safe here.
+ */
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ ath12k_dp_reo_cache_flush(ab, &elem->data);
+ kfree(elem);
+ spin_lock_bh(&dp->reo_cmd_lock);
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return;
+free_desc:
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+}
+
+static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
+ dma_addr_t paddr)
+{
+ struct ath12k_reo_queue_ref *qref;
+ struct ath12k_dp *dp = &ab->dp;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ /* TODO: based on ML peer or not, select the LUT. below assumes non
+ * ML peer
+ */
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+
+ qref->info0 = u32_encode_bits(lower_32_bits(paddr),
+ BUFFER_ADDR_INFO0_ADDR);
+ qref->info1 = u32_encode_bits(upper_32_bits(paddr),
+ BUFFER_ADDR_INFO1_ADDR) |
+ u32_encode_bits(tid, DP_REO_QREF_NUM);
+}
+
+static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
+{
+ struct ath12k_reo_queue_ref *qref;
+ struct ath12k_dp *dp = &ab->dp;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ /* TODO: based on ML peer or not, select the LUT. below assumes non
+ * ML peer
+ */
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+
+ qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
+ qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
+ u32_encode_bits(tid, DP_REO_QREF_NUM);
+}
+
+void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
+ struct ath12k_peer *peer, u8 tid)
+{
+ struct ath12k_hal_reo_cmd cmd = {0};
+ struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+ int ret;
+
+ if (!rx_tid->active)
+ return;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
+ ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath12k_dp_rx_tid_del_func);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ tid, ret);
+ dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+ }
+
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+
+ rx_tid->active = false;
+}
+
+/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
+ * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
+ * that.
+ */
+static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
+ struct hal_reo_dest_ring *ring,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
+ struct hal_wbm_release_ring *desc;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ int ret = 0;
+
+ srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
+
+exit:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
+ bool rel_link_desc)
+{
+ struct ath12k_base *ab = rx_tid->ab;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rx_tid->dst_ring_desc) {
+ if (rel_link_desc)
+ ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ kfree(rx_tid->dst_ring_desc);
+ rx_tid->dst_ring_desc = NULL;
+ }
+
+ rx_tid->cur_sn = 0;
+ rx_tid->last_frag_no = 0;
+ rx_tid->rx_frag_bitmap = 0;
+ __skb_queue_purge(&rx_tid->rx_frags);
+}
+
+void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
+{
+ struct ath12k_dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ ath12k_dp_rx_peer_tid_delete(ar, peer, i);
+ ath12k_dp_rx_frags_cleanup(rx_tid, true);
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+ }
+}
+
+static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
+ struct ath12k_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn)
+{
+ struct ath12k_hal_reo_cmd cmd = {0};
+ int ret;
+
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+ cmd.ba_window_size = ba_win_sz;
+
+ if (update_ssn) {
+ cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+ cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
+ }
+
+ ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ NULL);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ return ret;
+ }
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ return 0;
+}
+
+int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_rx_reo_queue *addr_aligned;
+ struct ath12k_peer *peer;
+ struct ath12k_dp_rx_tid *rx_tid;
+ u32 hw_desc_sz;
+ void *vaddr;
+ dma_addr_t paddr;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
+ return -ENOENT;
+ }
+
+ if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "reo qref table is not setup\n");
+ return -EINVAL;
+ }
+
+ if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
+ ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
+ peer->peer_id, tid);
+ spin_unlock_bh(&ab->base_lock);
+ return -EINVAL;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ /* Update the tid queue if it is already setup */
+ if (rx_tid->active) {
+ paddr = rx_tid->paddr;
+ ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
+ ba_win_sz, ssn, true);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
+ return ret;
+ }
+
+ return ret;
+ }
+
+ rx_tid->tid = tid;
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ /* TODO: Optimize the memory allocation for qos tid based on
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
+ if (!vaddr) {
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOMEM;
+ }
+
+ addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
+ ssn, pn_type);
+
+ paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret) {
+ spin_unlock_bh(&ab->base_lock);
+ goto err_mem_free;
+ }
+
+ rx_tid->vaddr = vaddr;
+ rx_tid->paddr = paddr;
+ rx_tid->size = hw_desc_sz;
+ rx_tid->active = true;
+
+ if (ab->hw_params->reoq_lut_support) {
+ /* Update the REO queue LUT at the corresponding peer id
+ * and tid with qaddr.
+ */
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ spin_unlock_bh(&ab->base_lock);
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
+ paddr, tid, 1, ba_win_sz);
+ }
+
+ return ret;
+
+err_mem_free:
+ kfree(vaddr);
+
+ return ret;
+}
+
+int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ int ret;
+
+ ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
+ params->tid, params->buf_size,
+ params->ssn, arsta->pn_type);
+ if (ret)
+ ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
+
+ return ret;
+}
+
+int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ bool active;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
+ return -ENOENT;
+ }
+
+ active = peer->rx_tid[params->tid].active;
+
+ if (!active) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
+ params->tid, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_hal_reo_cmd cmd = {0};
+ struct ath12k_peer *peer;
+ struct ath12k_dp_rx_tid *rx_tid;
+ u8 tid;
+ int ret = 0;
+
+ /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
+ * We use mac80211 PN/TSC replay check functionality for bcast/mcast
+ * for now.
+ */
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return 0;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_PN |
+ HAL_REO_CMD_UPD0_PN_SIZE |
+ HAL_REO_CMD_UPD0_PN_VALID |
+ HAL_REO_CMD_UPD0_PN_CHECK |
+ HAL_REO_CMD_UPD0_SVLD;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (key_cmd == SET_KEY) {
+ cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
+ cmd.pn_size = 48;
+ }
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
+ peer_addr);
+ return -ENOENT;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ continue;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE,
+ &cmd, NULL);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
+ tid, peer_addr, ret);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
+}
+
+static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
+{
+ int i;
+
+ for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
+ if (ppdu_stats->user_stats[i].is_valid_peer_id) {
+ if (peer_id == ppdu_stats->user_stats[i].peer_id)
+ return i;
+ } else {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
+ u16 tag, u16 len, const void *ptr,
+ void *data)
+{
+ const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
+ const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
+ const struct htt_ppdu_stats_user_rate *user_rate;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct htt_ppdu_user_stats *user_stats;
+ int cur_user;
+ u16 peer_id;
+
+ ppdu_info = data;
+
+ switch (tag) {
+ case HTT_PPDU_STATS_TAG_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_common)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ memcpy(&ppdu_info->ppdu_stats.common, ptr,
+ sizeof(struct htt_ppdu_stats_common));
+ break;
+ case HTT_PPDU_STATS_TAG_USR_RATE:
+ if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ user_rate = ptr;
+ peer_id = le16_to_cpu(user_rate->sw_peer_id);
+ cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy(&user_stats->rate, ptr,
+ sizeof(struct htt_ppdu_stats_user_rate));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ cmplt_cmn = ptr;
+ peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
+ cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy(&user_stats->cmpltn_cmn, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
+ if (len <
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ba_status = ptr;
+ peer_id = le16_to_cpu(ba_status->sw_peer_id);
+ cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy(&user_stats->ack_ba, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ }
+ return 0;
+}
+
+static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const struct htt_tlv *tlv;
+ const void *begin = ptr;
+ u16 tlv_tag, tlv_len;
+ int ret = -EINVAL;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+ tlv = (struct htt_tlv *)ptr;
+ tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
+ tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret == -ENOMEM)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+ return 0;
+}
+
+static void
+ath12k_update_per_peer_tx_stats(struct ath12k *ar,
+ struct htt_ppdu_stats *ppdu_stats, u8 user)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *arsta;
+ struct htt_ppdu_stats_user_rate *user_rate;
+ struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
+ struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
+ struct htt_ppdu_stats_common *common = &ppdu_stats->common;
+ int ret;
+ u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
+ u32 v, succ_bytes = 0;
+ u16 tones, rate = 0, succ_pkts = 0;
+ u32 tx_duration = 0;
+ u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
+ bool is_ampdu = false;
+
+ if (!usr_stats)
+ return;
+
+ if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
+ return;
+
+ if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
+ is_ampdu =
+ HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+
+ if (usr_stats->tlv_flags &
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
+ succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
+ succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
+ HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
+ tid = le32_get_bits(usr_stats->ack_ba.info,
+ HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
+ }
+
+ if (common->fes_duration_us)
+ tx_duration = le32_to_cpu(common->fes_duration_us);
+
+ user_rate = &usr_stats->rate;
+ flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
+ bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
+ nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
+ mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
+ sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+ dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
+
+ /* Note: If host configured fixed rates and in some other special
+ * cases, the broadcast/management frames are sent in different rates.
+ * Firmware rate's control to be skipped for this?
+ */
+
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
+ ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
+ ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
+ mcs, nss);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
+ ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
+ flags,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ return;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
+
+ if (!peer || !peer->sta) {
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath12k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+
+ switch (flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ arsta->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ arsta->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ arsta->txrate.mcs = mcs + 8 * (nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_HE:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ arsta->txrate.he_dcm = dcm;
+ arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ tones = le16_to_cpu(user_rate->ru_end) -
+ le16_to_cpu(user_rate->ru_start) + 1;
+ v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
+ arsta->txrate.he_ru_alloc = v;
+ break;
+ }
+
+ arsta->txrate.nss = nss;
+ arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ arsta->tx_duration += tx_duration;
+ memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
+
+ /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
+ * So skip peer stats update for mgmt packets.
+ */
+ if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
+ memset(peer_stats, 0, sizeof(*peer_stats));
+ peer_stats->succ_pkts = succ_pkts;
+ peer_stats->succ_bytes = succ_bytes;
+ peer_stats->is_ampdu = is_ampdu;
+ peer_stats->duration = tx_duration;
+ peer_stats->ba_fails =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
+ struct htt_ppdu_stats *ppdu_stats)
+{
+ u8 user;
+
+ for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
+ ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
+}
+
+static
+struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
+ u32 ppdu_id)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+
+ lockdep_assert_held(&ar->data_lock);
+ if (!list_empty(&ar->ppdu_stats_info)) {
+ list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
+ if (ppdu_info->ppdu_id == ppdu_id)
+ return ppdu_info;
+ }
+
+ if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
+ ppdu_info = list_first_entry(&ar->ppdu_stats_info,
+ typeof(*ppdu_info), list);
+ list_del(&ppdu_info->list);
+ ar->ppdu_stat_list_depth--;
+ ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
+ kfree(ppdu_info);
+ }
+ }
+
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
+ if (!ppdu_info)
+ return NULL;
+
+ list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
+ ar->ppdu_stat_list_depth++;
+
+ return ppdu_info;
+}
+
+static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
+ struct htt_ppdu_user_stats *usr_stats)
+{
+ peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
+ peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
+ peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
+ peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
+ peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
+ peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
+ peer->ppdu_stats_delayba.resp_rate_flags =
+ le32_to_cpu(usr_stats->rate.resp_rate_flags);
+
+ peer->delayba_flag = true;
+}
+
+static void ath12k_copy_to_bar(struct ath12k_peer *peer,
+ struct htt_ppdu_user_stats *usr_stats)
+{
+ usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
+ usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
+ usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
+ usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
+ usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
+ usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
+ usr_stats->rate.resp_rate_flags =
+ cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
+
+ peer->delayba_flag = false;
+}
+
+static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_htt_ppdu_stats_msg *msg;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct ath12k_peer *peer = NULL;
+ struct htt_ppdu_user_stats *usr_stats = NULL;
+ u32 peer_id = 0;
+ struct ath12k *ar;
+ int ret, i;
+ u8 pdev_id;
+ u32 ppdu_id, len;
+
+ msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
+ len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
+ pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
+ ppdu_id = le32_to_cpu(msg->ppdu_id);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
+ if (!ppdu_info) {
+ spin_unlock_bh(&ar->data_lock);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ppdu_info->ppdu_id = ppdu_id;
+ ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath12k_htt_tlv_ppdu_stats_parse,
+ (void *)ppdu_info);
+ if (ret) {
+ spin_unlock_bh(&ar->data_lock);
+ ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
+ goto exit;
+ }
+
+ /* back up data rate tlv for all peers */
+ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
+ (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
+ ppdu_info->delay_ba) {
+ for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
+ peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ continue;
+ }
+
+ usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
+ if (usr_stats->delay_ba)
+ ath12k_copy_to_delay_stats(peer, usr_stats);
+ spin_unlock_bh(&ab->base_lock);
+ }
+ }
+
+ /* restore all peers' data rate tlv to mu-bar tlv */
+ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
+ (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
+ for (i = 0; i < ppdu_info->bar_num_users; i++) {
+ peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ continue;
+ }
+
+ usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
+ if (peer->delayba_flag)
+ ath12k_copy_to_bar(peer, usr_stats);
+ spin_unlock_bh(&ab->base_lock);
+ }
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+exit:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_htt_mlo_offset_msg *msg;
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar;
+ u8 pdev_id;
+
+ msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
+ pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
+ HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = ar->pdev;
+
+ pdev->timestamp.info = __le32_to_cpu(msg->info);
+ pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
+ pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
+ pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
+ pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
+ pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
+ pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
+ pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
+ enum htt_t2h_msg_type type;
+ u16 peer_id;
+ u8 vdev_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 peer_mac_h16;
+ u16 ast_hash = 0;
+ u16 hw_peer_id;
+
+ type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF:
+ dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
+ HTT_T2H_VERSION_CONF_MAJOR);
+ dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
+ HTT_T2H_VERSION_CONF_MINOR);
+ complete(&dp->htt_tgt_version_received);
+ break;
+ /* TODO: remove unused peer map versions after testing */
+ case HTT_T2H_MSG_TYPE_PEER_MAP:
+ vdev_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_VDEV_ID);
+ peer_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_PEER_ID);
+ peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
+ ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
+ peer_mac_h16, mac_addr);
+ ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP2:
+ vdev_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_VDEV_ID);
+ peer_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_PEER_ID);
+ peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
+ ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
+ peer_mac_h16, mac_addr);
+ ast_hash = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
+ hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
+ ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
+ hw_peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP3:
+ vdev_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_VDEV_ID);
+ peer_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_PEER_ID);
+ peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
+ ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
+ peer_mac_h16, mac_addr);
+ ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
+ peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
+ peer_id = le32_get_bits(resp->peer_unmap_ev.info,
+ HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
+ ath12k_peer_unmap_event(ab, peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
+ ath12k_htt_pull_ppdu_stats(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ break;
+ case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
+ ath12k_htt_mlo_offset_event_handler(ab, skb);
+ break;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
+ type);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+}
+
+static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff *first, struct sk_buff *last,
+ u8 l3pad_bytes, int msdu_len)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
+ int buf_first_hdr_len, buf_first_len;
+ struct hal_rx_desc *ldesc;
+ int space_extra, rem_len, buf_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ /* As the msdu is spread across multiple rx buffers,
+ * find the offset to the start of msdu for computing
+ * the length of the msdu in the first buffer.
+ */
+ buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
+ buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
+
+ if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
+ skb_put(first, buf_first_hdr_len + msdu_len);
+ skb_pull(first, buf_first_hdr_len);
+ return 0;
+ }
+
+ ldesc = (struct hal_rx_desc *)last->data;
+ rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
+ rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
+
+ /* MSDU spans over multiple buffers because the length of the MSDU
+ * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
+ * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
+ */
+ skb_put(first, DP_RX_BUFFER_SIZE);
+ skb_pull(first, buf_first_hdr_len);
+
+ /* When an MSDU spread over multiple buffers MSDU_END
+ * tlvs are valid only in the last buffer. Copy those tlvs.
+ */
+ ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
+
+ space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
+ if (space_extra > 0 &&
+ (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
+ /* Free up all buffers of the MSDU */
+ while ((skb = __skb_dequeue(msdu_list)) != NULL) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ return -ENOMEM;
+ }
+
+ rem_len = msdu_len - buf_first_len;
+ while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (rxcb->is_continuation)
+ buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
+ else
+ buf_len = rem_len;
+
+ if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
+ WARN_ON_ONCE(1);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ skb_put(skb, buf_len + hal_rx_desc_sz);
+ skb_pull(skb, hal_rx_desc_sz);
+ skb_copy_from_linear_data(skb, skb_put(first, buf_len),
+ buf_len);
+ dev_kfree_skb_any(skb);
+
+ rem_len -= buf_len;
+ if (!rxcb->is_continuation)
+ break;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
+ struct sk_buff *first)
+{
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
+
+ if (!rxcb->is_continuation)
+ return first;
+
+ skb_queue_walk(msdu_list, skb) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation)
+ return skb;
+ }
+
+ return NULL;
+}
+
+static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ath12k_base *ab = ar->ab;
+ bool ip_csum_fail, l4_csum_fail;
+
+ ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
+ l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
+
+ msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+}
+
+static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return 0;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
+ return 0;
+}
+
+static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_IV_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
+ struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ u8 *crypto_hdr;
+ u16 qos_ctl;
+
+ /* pull decapped header */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ skb_pull(msdu, hdr_len);
+
+ /* Rebuild qos header */
+ hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+
+ /* Reset the order bit as the HT_Control header is stripped */
+ hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
+
+ qos_ctl = rxcb->tid;
+
+ if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
+ qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+
+ /* TODO: Add other QoS ctl fields when required */
+
+ /* copy decap header before overwriting for reuse below */
+ memcpy(decap_hdr, hdr, hdr_len);
+
+ /* Rebuild crypto header for mac80211 use */
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
+ ath12k_dp_rx_desc_get_crypto_header(ar->ab,
+ rxcb->rx_desc, crypto_hdr,
+ enctype);
+ }
+
+ memcpy(skb_push(msdu,
+ IEEE80211_QOS_CTL_LEN), &qos_ctl,
+ IEEE80211_QOS_CTL_LEN);
+ memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
+}
+
+static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+
+ if (!rxcb->is_first_msdu ||
+ !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (!decrypted)
+ return;
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_icv_len(ar, enctype));
+ }
+
+ /* MMIC */
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
+ skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
+
+ /* Head */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove(msdu->data + crypto_len, msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct ath12k_skb_rxcb *rxcb,
+ struct ieee80211_rx_status *status,
+ enum hal_encrypt_type enctype)
+{
+ struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ struct ath12k_base *ab = ar->ab;
+ size_t hdr_len, crypto_len;
+ struct ieee80211_hdr *hdr;
+ u16 qos_ctl;
+ __le16 fc;
+ u8 *crypto_hdr;
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+ crypto_hdr = skb_push(msdu, crypto_len);
+ ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
+ }
+
+ fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
+ hdr_len = ieee80211_hdrlen(fc);
+ skb_push(msdu, hdr_len);
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr->frame_control = fc;
+
+ /* Get wifi header from rx_desc */
+ ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
+
+ if (rxcb->is_mcbc)
+ status->flag &= ~RX_FLAG_PN_VALIDATED;
+
+ /* Add QOS header */
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos_ctl = rxcb->tid;
+ if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
+ qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+
+ /* TODO: Add other QoS ctl fields when required */
+ memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
+ &qos_ctl, IEEE80211_QOS_CTL_LEN);
+ }
+}
+
+static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
+ struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
+
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ rfc.snap_type = eth->h_proto;
+ skb_pull(msdu, sizeof(*eth));
+ memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
+ sizeof(rfc));
+ ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ struct ath12k_base *ab = ar->ab;
+ u8 decap;
+ struct ethhdr *ehdr;
+
+ decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+
+ switch (decap) {
+ case DP_RX_DECAP_TYPE_NATIVE_WIFI:
+ ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_RAW:
+ ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
+ decrypted);
+ break;
+ case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+ ehdr = (struct ethhdr *)msdu->data;
+
+ /* mac80211 allows fast path only for authorized STA */
+ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
+ ATH12K_SKB_RXCB(msdu)->is_eapol = true;
+ ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
+ break;
+ }
+
+ /* PN for mcast packets will be validated in mac80211;
+ * remove eth header and add 802.11 header.
+ */
+ if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
+ ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_8023:
+ /* TODO: Handle undecap for these formats */
+ break;
+ }
+}
+
+struct ath12k_peer *
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ struct ath12k_peer *peer = NULL;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rxcb->peer_id)
+ peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
+
+ if (peer)
+ return peer;
+
+ if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
+ return NULL;
+
+ peer = ath12k_peer_find_by_addr(ab,
+ ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
+ rx_desc));
+ return peer;
+}
+
+static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ bool fill_crypto_hdr;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_skb_rxcb *rxcb;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted = false;
+ struct ieee80211_hdr *hdr;
+ struct ath12k_peer *peer;
+ u32 err_bitmap;
+
+ /* PN for multicast packets will be checked in mac80211 */
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ fill_crypto_hdr = ath12k_dp_rx_h_is_mcbc(ar->ab, rx_desc);
+ rxcb->is_mcbc = fill_crypto_hdr;
+
+ if (rxcb->is_mcbc)
+ rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer) {
+ if (rxcb->is_mcbc)
+ enctype = peer->sec_type_grp;
+ else
+ enctype = peer->sec_type;
+ } else {
+ enctype = HAL_ENCRYPT_TYPE_OPEN;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
+ is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact */
+ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
+
+ if (fill_crypto_hdr)
+ rx_status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ rx_status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_PN_VALIDATED;
+ }
+
+ ath12k_dp_rx_h_csum_offload(ar, msdu);
+ ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ enctype, rx_status, is_decrypted);
+
+ if (!is_decrypted || fill_crypto_hdr)
+ return;
+
+ if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+}
+
+static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 bw;
+ u8 rate_mcs, nss;
+ u8 sgi;
+ bool is_cck;
+
+ pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
+ bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
+ rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
+ nss = ath12k_dp_rx_h_nss(ab, rx_desc);
+ sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->nss = nss;
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->nss = nss;
+ rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ }
+}
+
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ath12k_base *ab = ar->ab;
+ u8 channel_num;
+ u32 center_freq, meta_data;
+ struct ieee80211_channel *channel;
+
+ rx_status->freq = 0;
+ rx_status->rate_idx = 0;
+ rx_status->nss = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+ rx_status->enc_flags = 0;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+ channel_num = meta_data;
+ center_freq = meta_data >> 16;
+
+ if (center_freq >= 5935 && center_freq <= 7105) {
+ rx_status->band = NL80211_BAND_6GHZ;
+ } else if (channel_num >= 1 && channel_num <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (channel_num >= 36 && channel_num <= 173) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ spin_lock_bh(&ar->data_lock);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
+ rx_desc, sizeof(*rx_desc));
+ }
+
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+
+ ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
+}
+
+static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath12k_base *ab = ar->ab;
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_radiotap_he *he;
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_sta *pubsta;
+ struct ath12k_peer *peer;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+ bool is_mcbc = rxcb->is_mcbc;
+ bool is_eapol = rxcb->is_eapol;
+
+ if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ !(status->flag & RX_FLAG_SKIP_MONITOR)) {
+ he = skb_push(msdu, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ }
+
+ if (!(status->flag & RX_FLAG_ONLY_MONITOR))
+ decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_dp_rx_h_find_peer(ab, msdu);
+
+ pubsta = peer ? peer->sta : NULL;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ msdu,
+ msdu->len,
+ peer ? peer->addr : NULL,
+ rxcb->tid,
+ is_mcbc ? "mcast" : "ucast",
+ ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ msdu->data, msdu->len);
+
+ rx_status = IEEE80211_SKB_RXCB(msdu);
+ *rx_status = *status;
+
+ /* TODO: trace rx packet */
+
+ /* PN for multicast packets are not validate in HW,
+ * so skip 802.3 rx path
+ * Also, fast_rx expectes the STA to be authorized, hence
+ * eapol packets are sent in slow path.
+ */
+ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
+ !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
+ rx_status->flag |= RX_FLAG_8023;
+
+ ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+}
+
+static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ struct ath12k_skb_rxcb *rxcb;
+ struct sk_buff *last_buf;
+ u8 l3_pad_bytes;
+ u16 msdu_len;
+ int ret;
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath12k_warn(ab,
+ "No valid Rx buffer to access MSDU_END tlv\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+ if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
+ ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
+ l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else if (!rxcb->is_continuation) {
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ ret = -EINVAL;
+ ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(*rx_desc));
+ goto free_out;
+ }
+ skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
+ } else {
+ ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
+ }
+ }
+
+ ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+ ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+
+ rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+
+ return 0;
+
+free_out:
+ return ret;
+}
+
+static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
+ struct napi_struct *napi,
+ struct sk_buff_head *msdu_list,
+ int ring_id)
+{
+ struct ieee80211_rx_status rx_status = {0};
+ struct ath12k_skb_rxcb *rxcb;
+ struct sk_buff *msdu;
+ struct ath12k *ar;
+ u8 mac_id;
+ int ret;
+
+ if (skb_queue_empty(msdu_list))
+ return;
+
+ rcu_read_lock();
+
+ while ((msdu = __skb_dequeue(msdu_list))) {
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ mac_id = rxcb->mac_id;
+ ar = ab->pdevs[mac_id].ar;
+ if (!rcu_dereference(ab->pdevs_active[mac_id])) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Unable to process msdu %d", ret);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
+ }
+
+ rcu_read_unlock();
+}
+
+int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct hal_reo_dest_ring *desc;
+ int num_buffs_reaped = 0;
+ struct sk_buff_head msdu_list;
+ struct ath12k_skb_rxcb *rxcb;
+ int total_msdu_reaped = 0;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ bool done = false;
+ int mac_id;
+ u64 desc_va;
+
+ __skb_queue_head_init(&msdu_list);
+
+ srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+try_again:
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
+
+ cookie = le32_get_bits(desc->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ mac_id = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+
+ desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
+ le32_to_cpu(desc->buf_va_lo));
+ desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+
+ /* retry manual desc retrieval */
+ if (!desc_info) {
+ desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+ if (!desc_info) {
+ ath12k_warn(ab, "Invalid cookie in manual desc retrival");
+ continue;
+ }
+ }
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+ ath12k_warn(ab, "Check HW CC implementation");
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped++;
+
+ push_reason = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_PUSH_REASON);
+ if (push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ dev_kfree_skb_any(msdu);
+ ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+ continue;
+ }
+
+ rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+ rxcb->mac_id = mac_id;
+ rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
+ RX_MPDU_DESC_META_DATA_PEER_ID);
+ rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
+ RX_MPDU_DESC_INFO0_TID);
+
+ __skb_queue_tail(&msdu_list, msdu);
+
+ if (!rxcb->is_continuation) {
+ total_msdu_reaped++;
+ done = true;
+ } else {
+ done = false;
+ }
+
+ if (total_msdu_reaped >= budget)
+ break;
+ }
+
+ /* Hw might have updated the head pointer after we cached it.
+ * In this case, even though there are entries in the ring we'll
+ * get rx_desc NULL. Give the read another try with updated cached
+ * head pointer so that we can reap complete MPDU in the current
+ * rx processing.
+ */
+ if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
+ ath12k_hal_srng_access_end(ab, srng);
+ goto try_again;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!total_msdu_reaped)
+ goto exit;
+
+ /* TODO: Move to implicit BM? */
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
+ ab->hw_params->hal_params->rx_buf_rbm, true);
+
+ ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
+ ring_id);
+
+exit:
+ return total_msdu_reaped;
+}
+
+static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
+{
+ struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
+
+ spin_lock_bh(&rx_tid->ab->base_lock);
+ if (rx_tid->last_frag_no &&
+ rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+ return;
+ }
+ ath12k_dp_rx_frags_cleanup(rx_tid, true);
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+}
+
+int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct crypto_shash *tfm;
+ struct ath12k_peer *peer;
+ struct ath12k_dp_rx_tid *rx_tid;
+ int i;
+
+ tfm = crypto_alloc_shash("michael_mic", 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
+ return -ENOENT;
+ }
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+ rx_tid->ab = ab;
+ timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
+ skb_queue_head_init(&rx_tid->rx_frags);
+ }
+
+ peer->tfm_mmic = tfm;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+}
+
+static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
+ struct ieee80211_hdr *hdr, u8 *data,
+ size_t data_len, u8 *mic)
+{
+ SHASH_DESC_ON_STACK(desc, tfm);
+ u8 mic_hdr[16] = {0};
+ u8 tid = 0;
+ int ret;
+
+ if (!tfm)
+ return -EINVAL;
+
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, key, 8);
+ if (ret)
+ goto out;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out;
+
+ /* TKIP MIC header */
+ memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ mic_hdr[12] = tid;
+
+ ret = crypto_shash_update(desc, mic_hdr, 16);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(desc, data, data_len);
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(desc, mic);
+out:
+ shash_desc_zero(desc);
+ return ret;
+}
+
+static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
+ struct sk_buff *msdu)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
+ struct ieee80211_key_conf *key_conf;
+ struct ieee80211_hdr *hdr;
+ u8 mic[IEEE80211_CCMP_MIC_LEN];
+ int head_len, tail_len, ret;
+ size_t data_len;
+ u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u8 *key, *data;
+ u8 key_idx;
+
+ if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
+ return 0;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
+ tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
+
+ if (!is_multicast_ether_addr(hdr->addr1))
+ key_idx = peer->ucast_keyidx;
+ else
+ key_idx = peer->mcast_keyidx;
+
+ key_conf = peer->keys[key_idx];
+
+ data = msdu->data + head_len;
+ data_len = msdu->len - head_len - tail_len;
+ key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
+
+ ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
+ if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
+ goto mic_fail;
+
+ return 0;
+
+mic_fail:
+ (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
+ (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
+
+ rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
+ skb_pull(msdu, hal_rx_desc_sz);
+
+ ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
+ ieee80211_rx(ar->hw, msdu);
+ return -EINVAL;
+}
+
+static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype, u32 flags)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ if (!flags)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
+
+ if (flags & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_mic_len(ar, enctype));
+
+ if (flags & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_icv_len(ar, enctype));
+
+ if (flags & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove(msdu->data + hal_rx_desc_sz + crypto_len,
+ msdu->data + hal_rx_desc_sz, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
+ struct ath12k_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ struct sk_buff **defrag_skb)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc;
+ struct sk_buff *skb, *first_frag, *last_frag;
+ struct ieee80211_hdr *hdr;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted = false;
+ int msdu_len = 0;
+ int extra_space;
+ u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ last_frag = skb_peek_tail(&rx_tid->rx_frags);
+
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ flags = 0;
+ rx_desc = (struct hal_rx_desc *)skb->data;
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+
+ enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN)
+ is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
+ rx_desc);
+
+ if (is_decrypted) {
+ if (skb != first_frag)
+ flags |= RX_FLAG_IV_STRIPPED;
+ if (skb != last_frag)
+ flags |= RX_FLAG_ICV_STRIPPED |
+ RX_FLAG_MIC_STRIPPED;
+ }
+
+ /* RX fragments are always raw packets */
+ if (skb != last_frag)
+ skb_trim(skb, skb->len - FCS_LEN);
+ ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
+
+ if (skb != first_frag)
+ skb_pull(skb, hal_rx_desc_sz +
+ ieee80211_hdrlen(hdr->frame_control));
+ msdu_len += skb->len;
+ }
+
+ extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
+ if (extra_space > 0 &&
+ (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
+ return -ENOMEM;
+
+ __skb_unlink(first_frag, &rx_tid->rx_frags);
+ while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
+ skb_put_data(first_frag, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+
+ hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
+ ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
+
+ if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
+ first_frag = NULL;
+
+ *defrag_skb = first_frag;
+ return 0;
+}
+
+static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
+ struct ath12k_dp_rx_tid *rx_tid,
+ struct sk_buff *defrag_skb)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
+ struct hal_reo_entrance_ring *reo_ent_ring;
+ struct hal_reo_dest_ring *reo_dest_ring;
+ struct dp_link_desc_bank *link_desc_banks;
+ struct hal_rx_msdu_link *msdu_link;
+ struct hal_rx_msdu_details *msdu0;
+ struct hal_srng *srng;
+ dma_addr_t link_paddr, buf_paddr;
+ u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
+ u32 cookie, hal_rx_desc_sz, dest_ring_info0;
+ int ret;
+ struct ath12k_rx_desc_info *desc_info;
+ u8 dst_ind;
+
+ hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ link_desc_banks = dp->link_desc_banks;
+ reo_dest_ring = rx_tid->dst_ring_desc;
+
+ ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
+ &link_paddr, &cookie);
+ desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
+
+ msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
+ (link_paddr - link_desc_banks[desc_bank].paddr));
+ msdu0 = &msdu_link->msdu_link[0];
+ msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
+ dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
+
+ memset(msdu0, 0, sizeof(*msdu0));
+
+ msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
+ u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
+ u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
+ u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
+ RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
+ u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
+ u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
+ msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
+ msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
+
+ /* change msdu len in hal rx desc */
+ ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
+
+ buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
+ defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, buf_paddr))
+ return -ENOMEM;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
+ struct ath12k_rx_desc_info,
+ list);
+ if (!desc_info) {
+ spin_unlock_bh(&dp->rx_desc_lock);
+ ath12k_warn(ab, "failed to find rx desc for reinject\n");
+ ret = -ENOMEM;
+ goto err_unmap_dma;
+ }
+
+ desc_info->skb = defrag_skb;
+
+ list_del(&desc_info->list);
+ list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
+
+ ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
+ desc_info->cookie,
+ HAL_RX_BUF_RBM_SW3_BM);
+
+ /* Fill mpdu details into reo entrace ring */
+ srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_ent_ring) {
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ ret = -ENOSPC;
+ goto err_free_desc;
+ }
+ memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
+
+ ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
+ cookie,
+ HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
+
+ mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
+ u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
+ u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
+ u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
+ u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
+
+ reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
+ reo_ent_ring->rx_mpdu_info.peer_meta_data =
+ reo_dest_ring->rx_mpdu_info.peer_meta_data;
+
+ reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
+ reo_ent_ring->info0 = le32_encode_bits(upper_32_bits(rx_tid->paddr),
+ HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(dst_ind, HAL_REO_ENTR_RING_INFO0_DEST_IND);
+
+ reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
+ HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
+ dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ reo_ent_ring->info2 =
+ cpu_to_le32(u32_get_bits(dest_ring_info0,
+ HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+err_free_desc:
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_del(&desc_info->list);
+ list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
+ desc_info->skb = NULL;
+ spin_unlock_bh(&dp->rx_desc_lock);
+err_unmap_dma:
+ dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_FROM_DEVICE);
+ return ret;
+}
+
+static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
+ struct sk_buff *a, struct sk_buff *b)
+{
+ int frag1, frag2;
+
+ frag1 = ath12k_dp_rx_h_frag_no(ab, a);
+ frag2 = ath12k_dp_rx_h_frag_no(ab, b);
+
+ return frag1 - frag2;
+}
+
+static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
+ struct sk_buff_head *frag_list,
+ struct sk_buff *cur_frag)
+{
+ struct sk_buff *skb;
+ int cmp;
+
+ skb_queue_walk(frag_list, skb) {
+ cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
+ if (cmp < 0)
+ continue;
+ __skb_queue_before(frag_list, skb, cur_frag);
+ return;
+ }
+ __skb_queue_tail(frag_list, cur_frag);
+}
+
+static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+ ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
+
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+
+ return pn;
+}
+
+static bool
+ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
+{
+ struct ath12k_base *ab = ar->ab;
+ enum hal_encrypt_type encrypt_type;
+ struct sk_buff *first_frag, *skb;
+ struct hal_rx_desc *desc;
+ u64 last_pn;
+ u64 cur_pn;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ desc = (struct hal_rx_desc *)first_frag->data;
+
+ encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
+ if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
+ return true;
+
+ last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ if (skb == first_frag)
+ continue;
+
+ cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
+ if (cur_pn != last_pn + 1)
+ return false;
+ last_pn = cur_pn;
+ }
+ return true;
+}
+
+static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct hal_reo_dest_ring *ring_desc)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc;
+ struct ath12k_peer *peer;
+ struct ath12k_dp_rx_tid *rx_tid;
+ struct sk_buff *defrag_skb = NULL;
+ u32 peer_id;
+ u16 seqno, frag_no;
+ u8 tid;
+ int ret = 0;
+ bool more_frags;
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
+ tid = ath12k_dp_rx_h_tid(ab, rx_desc);
+ seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
+ frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
+ more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
+
+ if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
+ !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
+ tid > IEEE80211_NUM_TIDS)
+ return -EINVAL;
+
+ /* received unfragmented packet in reo
+ * exception ring, this shouldn't happen
+ * as these packets typically come from
+ * reo2sw srngs.
+ */
+ if (WARN_ON_ONCE(!frag_no && !more_frags))
+ return -EINVAL;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
+ peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+ skb_queue_empty(&rx_tid->rx_frags)) {
+ /* Flush stored fragments and start a new sequence */
+ ath12k_dp_rx_frags_cleanup(rx_tid, true);
+ rx_tid->cur_sn = seqno;
+ }
+
+ if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
+ /* Fragment already present */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (frag_no > __fls(rx_tid->rx_frag_bitmap))
+ __skb_queue_tail(&rx_tid->rx_frags, msdu);
+ else
+ ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
+
+ rx_tid->rx_frag_bitmap |= BIT(frag_no);
+ if (!more_frags)
+ rx_tid->last_frag_no = frag_no;
+
+ if (frag_no == 0) {
+ rx_tid->dst_ring_desc = kmemdup(ring_desc,
+ sizeof(*rx_tid->dst_ring_desc),
+ GFP_ATOMIC);
+ if (!rx_tid->dst_ring_desc) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ ath12k_dp_rx_link_desc_return(ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ if (!rx_tid->last_frag_no ||
+ rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
+ mod_timer(&rx_tid->frag_timer, jiffies +
+ ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
+ goto out_unlock;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer)
+ goto err_frags_cleanup;
+
+ if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
+ goto err_frags_cleanup;
+
+ if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
+ goto err_frags_cleanup;
+
+ if (!defrag_skb)
+ goto err_frags_cleanup;
+
+ if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
+ goto err_frags_cleanup;
+
+ ath12k_dp_rx_frags_cleanup(rx_tid, false);
+ goto out_unlock;
+
+err_frags_cleanup:
+ dev_kfree_skb_any(defrag_skb);
+ ath12k_dp_rx_frags_cleanup(rx_tid, true);
+out_unlock:
+ spin_unlock_bh(&ab->base_lock);
+ return ret;
+}
+
+static int
+ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
+ bool drop, u32 cookie)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct sk_buff *msdu;
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_rx_desc *rx_desc;
+ u16 msdu_len;
+ u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ struct ath12k_rx_desc_info *desc_info;
+ u64 desc_va;
+
+ desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
+ le32_to_cpu(desc->buf_va_lo));
+ desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+
+ /* retry manual desc retrieval */
+ if (!desc_info) {
+ desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+ if (!desc_info) {
+ ath12k_warn(ab, "Invalid cookie in manual desc retrival");
+ return -EINVAL;
+ }
+ }
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+ ath12k_warn(ab, " RX Exception, Check HW CC implementation");
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+ spin_lock_bh(&ab->dp.rx_desc_lock);
+ list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
+ spin_unlock_bh(&ab->dp.rx_desc_lock);
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return 0;
+ }
+
+ rcu_read_lock();
+ if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(*rx_desc));
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ skb_put(msdu, hal_rx_desc_sz + msdu_len);
+
+ if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
+ dev_kfree_skb_any(msdu);
+ ath12k_dp_rx_link_desc_return(ar->ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+exit:
+ rcu_read_unlock();
+ return 0;
+}
+
+int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
+ int budget)
+{
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ struct dp_link_desc_bank *link_desc_banks;
+ enum hal_rx_buf_return_buf_manager rbm;
+ struct hal_rx_msdu_link *link_desc_va;
+ int tot_n_bufs_reaped, quota, ret, i;
+ struct hal_reo_dest_ring *reo_desc;
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_srng *reo_except;
+ u32 desc_bank, num_msdus;
+ struct hal_srng *srng;
+ struct ath12k_dp *dp;
+ int mac_id;
+ struct ath12k *ar;
+ dma_addr_t paddr;
+ bool is_frag;
+ bool drop = false;
+
+ tot_n_bufs_reaped = 0;
+ quota = budget;
+
+ dp = &ab->dp;
+ reo_except = &dp->reo_except_ring;
+ link_desc_banks = dp->link_desc_banks;
+
+ srng = &ab->hal.srng_list[reo_except->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (budget &&
+ (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ ab->soc_stats.err_ring_pkts++;
+ ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
+ &desc_bank);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse error reo desc %d\n",
+ ret);
+ continue;
+ }
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
+ &rbm);
+ if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW3_BM &&
+ rbm != ab->hw_params->hal_params->rx_buf_rbm) {
+ ab->soc_stats.invalid_rbm++;
+ ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
+ ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ HAL_WBM_REL_BM_ACT_REL_MSDU);
+ continue;
+ }
+
+ is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
+ RX_MPDU_DESC_INFO0_FRAG_FLAG);
+
+ /* Process only rx fragments with one msdu per link desc below, and drop
+ * msdu's indicated due to error reasons.
+ */
+ if (!is_frag || num_msdus > 1) {
+ drop = true;
+ /* Return the link desc back to wbm idle list */
+ ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ for (i = 0; i < num_msdus; i++) {
+ mac_id = le32_get_bits(reo_desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
+ msdu_cookies[i]))
+ tot_n_bufs_reaped++;
+ }
+
+ if (tot_n_bufs_reaped >= quota) {
+ tot_n_bufs_reaped = quota;
+ goto exit;
+ }
+
+ budget = quota - tot_n_bufs_reaped;
+ }
+
+exit:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ rx_ring = &dp->rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped,
+ ab->hw_params->hal_params->rx_buf_rbm, true);
+
+ return tot_n_bufs_reaped;
+}
+
+static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
+ int msdu_len,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff *skb, *tmp;
+ struct ath12k_skb_rxcb *rxcb;
+ int n_buffs;
+
+ n_buffs = DIV_ROUND_UP(msdu_len,
+ (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
+
+ skb_queue_walk_safe(msdu_list, skb, tmp) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
+ rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
+ if (!n_buffs)
+ break;
+ __skb_unlink(skb, msdu_list);
+ dev_kfree_skb_any(skb);
+ n_buffs--;
+ }
+ }
+}
+
+static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath12k_base *ab = ar->ab;
+ u16 msdu_len, peer_id;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
+ peer_id = ath12k_dp_rx_h_peer_id(ab, desc);
+
+ if (!ath12k_peer_find_by_id(ab, peer_id)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n",
+ peer_id);
+ return -EINVAL;
+ }
+
+ if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
+ /* First buffer will be freed by the caller, so deduct it's length */
+ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
+ ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
+ return -EINVAL;
+ }
+
+ /* Even after cleaning up the sg buffers in the msdu list with above check
+ * any msdu received with continuation flag needs to be dropped as invalid.
+ * This protects against some random err frame with continuation flag.
+ */
+ if (rxcb->is_continuation)
+ return -EINVAL;
+
+ if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
+ ath12k_warn(ar->ab,
+ "msdu_done bit not set in null_q_des processing\n");
+ __skb_queue_purge(msdu_list);
+ return -EIO;
+ }
+
+ /* Handle NULL queue descriptor violations arising out a missing
+ * REO queue for a given peer or a given TID. This typically
+ * may happen if a packet is received on a QOS enabled TID before the
+ * ADDBA negotiation for that TID, when the TID queue is setup. Or
+ * it may also happen for MC/BC frames if they are not routed to the
+ * non-QOS TID queue, in the absence of any other default TID queue.
+ * This error can show up both in a REO destination or WBM release ring.
+ */
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else {
+ l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
+
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ }
+ ath12k_dp_rx_h_ppdu(ar, desc, status);
+
+ ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
+
+ rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
+
+ /* Please note that caller will having the access to msdu and completing
+ * rx with mac80211. Need not worry about cleaning up amsdu_list.
+ */
+
+ return 0;
+}
+
+static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
+ if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ drop = true;
+ break;
+ case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
+ /* TODO: Do not drop PN failed packets in the driver;
+ * instead, it is good to drop such packets in mac80211
+ * after incrementing the replay counters.
+ */
+ fallthrough;
+ default:
+ /* TODO: Review other errors and process them to mac80211
+ * as appropriate.
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath12k_base *ab = ar->ab;
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
+ rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
+
+ l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
+ msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+
+ ath12k_dp_rx_h_ppdu(ar, desc, status);
+
+ status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
+
+ ath12k_dp_rx_h_undecap(ar, msdu, desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+}
+
+static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ bool drop = false;
+ u32 err_bitmap;
+
+ ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+ err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
+ if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
+ ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ break;
+ }
+ fallthrough;
+ default:
+ /* TODO: Review other rxdma error code to check if anything is
+ * worth reporting to mac80211
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status rxs = {0};
+ bool drop = true;
+
+ switch (rxcb->err_rel_src) {
+ case HAL_WBM_REL_SRC_MODULE_REO:
+ drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ break;
+ case HAL_WBM_REL_SRC_MODULE_RXDMA:
+ drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ break;
+ default:
+ /* msdu will get freed */
+ break;
+ }
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
+}
+
+int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
+ struct napi_struct *napi, int budget)
+{
+ struct ath12k *ar;
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct hal_rx_wbm_rel_info err_info;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct ath12k_skb_rxcb *rxcb;
+ void *rx_desc;
+ int mac_id;
+ int num_buffs_reaped = 0;
+ struct ath12k_rx_desc_info *desc_info;
+ int ret, i;
+
+ for (i = 0; i < ab->num_radios; i++)
+ __skb_queue_head_init(&msdu_list[i]);
+
+ srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
+ rx_ring = &dp->rx_refill_buf_ring;
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (budget) {
+ rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!rx_desc)
+ break;
+
+ ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to parse rx error in wbm_rel ring desc %d\n",
+ ret);
+ continue;
+ }
+
+ desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc;
+
+ /* retry manual desc retrieval if hw cc is not done */
+ if (!desc_info) {
+ desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
+ if (!desc_info) {
+ ath12k_warn(ab, "Invalid cookie in manual desc retrival");
+ continue;
+ }
+ }
+
+ /* FIXME: Extract mac id correctly. Since descs are not tied
+ * to mac, we can extract from vdev id in ring desc.
+ */
+ mac_id = 0;
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+ ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped++;
+
+ if (!err_info.continuation)
+ budget--;
+
+ if (err_info.push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ rxcb->err_rel_src = err_info.err_rel_src;
+ rxcb->err_code = err_info.err_code;
+ rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
+
+ rxcb->is_first_msdu = err_info.first_msdu;
+ rxcb->is_last_msdu = err_info.last_msdu;
+ rxcb->is_continuation = err_info.continuation;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!num_buffs_reaped)
+ goto done;
+
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
+ ab->hw_params->hal_params->rx_buf_rbm, true);
+
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!rcu_dereference(ab->pdevs_active[i])) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ ar = ab->pdevs[i].ar;
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
+ ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ }
+ rcu_read_unlock();
+done:
+ return num_buffs_reaped;
+}
+
+void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_tlv_64_hdr *hdr;
+ struct hal_srng *srng;
+ struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
+ bool found = false;
+ u16 tag;
+ struct hal_reo_status reo_status;
+
+ srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
+
+ memset(&reo_status, 0, sizeof(reo_status));
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
+
+ switch (tag) {
+ case HAL_REO_GET_QUEUE_STATS_STATUS:
+ ath12k_hal_reo_status_queue_stats(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_QUEUE_STATUS:
+ ath12k_hal_reo_flush_queue_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_CACHE_STATUS:
+ ath12k_hal_reo_flush_cache_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_UNBLOCK_CACHE_STATUS:
+ ath12k_hal_reo_unblk_cache_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
+ ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
+ ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
+ ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
+ &reo_status);
+ break;
+ default:
+ ath12k_warn(ab, "Unknown reo status type %d\n", tag);
+ continue;
+ }
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
+ found = true;
+ list_del(&cmd->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ if (found) {
+ cmd->handler(dp, (void *)&cmd->data,
+ reo_status.uniform_hdr.cmd_status);
+ kfree(cmd);
+ }
+
+ found = false;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath12k_dp_rx_free(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+
+ ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ if (ab->hw_params->rx_mac_buf_ring)
+ ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
+ ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
+
+ ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
+
+ ath12k_dp_rxdma_buf_free(ab);
+}
+
+void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
+{
+ struct ath12k *ar = ab->pdevs[mac_id].ar;
+
+ ath12k_dp_rx_pdev_srng_free(ar);
+}
+
+int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 ring_id;
+ int ret;
+ u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+
+ tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
+ tlv_filter.offset_valid = true;
+ tlv_filter.rx_packet_offset = hal_rx_desc_sz;
+
+ tlv_filter.rx_mpdu_start_offset =
+ ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ tlv_filter.rx_msdu_end_offset =
+ ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+
+ /* TODO: Selectively subscribe to required qwords within msdu_end
+ * and mpdu_start and setup the mask in below msg
+ * and modify the rx_desc struct
+ */
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+
+ return ret;
+}
+
+int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 ring_id;
+ int ret;
+ u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ int i;
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+
+ tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
+ tlv_filter.offset_valid = true;
+ tlv_filter.rx_packet_offset = hal_rx_desc_sz;
+
+ tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
+
+ tlv_filter.rx_mpdu_start_offset =
+ ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ tlv_filter.rx_msdu_end_offset =
+ ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+
+ /* TODO: Selectively subscribe to required qwords within msdu_end
+ * and mpdu_start and setup the mask in below msg
+ * and modify the rx_desc struct
+ */
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ }
+
+ return ret;
+}
+
+int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ u32 ring_id;
+ int i, ret;
+
+ /* TODO: Need to verify the HTT setup for QCN9224 */
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->hw_params->rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ i, HAL_RXDMA_BUF);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
+ ring_id = dp->rxdma_err_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ i, HAL_RXDMA_DST);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ if (ab->hw_params->rxdma1_enable) {
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ 0, HAL_RXDMA_MONITOR_BUF);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ 0, HAL_TX_MONITOR_BUF);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_rx_alloc(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i, ret;
+
+ idr_init(&dp->rx_refill_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
+
+ idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
+
+ idr_init(&dp->tx_mon_buf_ring.bufs_idr);
+ spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
+
+ ret = ath12k_dp_srng_setup(ab,
+ &dp->rx_refill_buf_ring.refill_buf_ring,
+ HAL_RXDMA_BUF, 0, 0,
+ DP_RXDMA_BUF_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
+ return ret;
+ }
+
+ if (ab->hw_params->rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ret = ath12k_dp_srng_setup(ab,
+ &dp->rx_mac_buf_ring[i],
+ HAL_RXDMA_BUF, 1,
+ i, 1024);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
+ i);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
+ ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
+ HAL_RXDMA_DST, 0, i,
+ DP_RXDMA_ERR_DST_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
+ return ret;
+ }
+ }
+
+ if (ab->hw_params->rxdma1_enable) {
+ ret = ath12k_dp_srng_setup(ab,
+ &dp->rxdma_mon_buf_ring.refill_buf_ring,
+ HAL_RXDMA_MONITOR_BUF, 0, 0,
+ DP_RXDMA_MONITOR_BUF_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ ret = ath12k_dp_srng_setup(ab,
+ &dp->tx_mon_buf_ring.refill_buf_ring,
+ HAL_TX_MONITOR_BUF, 0, 0,
+ DP_TX_MONITOR_BUF_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
+ return ret;
+ }
+ }
+
+ ret = ath12k_dp_rxdma_buf_setup(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rxdma ring\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
+{
+ struct ath12k *ar = ab->pdevs[mac_id].ar;
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ u32 ring_id;
+ int i;
+ int ret;
+
+ if (!ab->hw_params->rxdma1_enable)
+ goto out;
+
+ ret = ath12k_dp_rx_pdev_srng_alloc(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rx srngs\n");
+ return ret;
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i,
+ HAL_RXDMA_MONITOR_DST);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to configure rxdma_mon_dst_ring %d %d\n",
+ i, ret);
+ return ret;
+ }
+
+ ring_id = dp->tx_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i,
+ HAL_TX_MONITOR_DST);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to configure tx_mon_dst_ring %d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+out:
+ return 0;
+}
+
+static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
+
+ skb_queue_head_init(&pmon->rx_status_q);
+
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+
+ memset(&pmon->rx_mon_stats, 0,
+ sizeof(pmon->rx_mon_stats));
+ return 0;
+}
+
+int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct ath12k_mon_data *pmon = &dp->mon_data;
+ int ret = 0;
+
+ ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
+ return ret;
+ }
+
+ /* if rxdma1_enable is false, no need to setup
+ * rxdma_mon_desc_ring.
+ */
+ if (!ar->ab->hw_params->rxdma1_enable)
+ return 0;
+
+ pmon->mon_last_linkdesc_paddr = 0;
+ pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
+ spin_lock_init(&pmon->mon_lock);
+
+ return 0;
+}
+
+int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
+{
+ /* start reap timer */
+ mod_timer(&ab->mon_reap_timer,
+ jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
+
+ return 0;
+}
+
+int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
+{
+ int ret;
+
+ if (stop_timer)
+ del_timer_sync(&ab->mon_reap_timer);
+
+ /* reap all the monitor related rings */
+ ret = ath12k_dp_purge_mon_ring(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
new file mode 100644
index 000000000000..c955b5c859d1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_DP_RX_H
+#define ATH12K_DP_RX_H
+
+#include "core.h"
+#include "rx_desc.h"
+#include "debug.h"
+
+#define DP_MAX_NWIFI_HDR_LEN 30
+
+struct ath12k_dp_rx_tid {
+ u8 tid;
+ u32 *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+ u32 ba_win_sz;
+ bool active;
+
+ /* Info related to rx fragments */
+ u32 cur_sn;
+ u16 last_frag_no;
+ u16 rx_frag_bitmap;
+
+ struct sk_buff_head rx_frags;
+ struct hal_reo_dest_ring *dst_ring_desc;
+
+ /* Timer info related to fragments */
+ struct timer_list frag_timer;
+ struct ath12k_base *ab;
+};
+
+struct ath12k_dp_rx_reo_cache_flush_elem {
+ struct list_head list;
+ struct ath12k_dp_rx_tid data;
+ unsigned long ts;
+};
+
+struct ath12k_dp_rx_reo_cmd {
+ struct list_head list;
+ struct ath12k_dp_rx_tid data;
+ int cmd_num;
+ void (*handler)(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status);
+};
+
+#define ATH12K_DP_RX_REO_DESC_FREE_THRES 64
+#define ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS 1000
+
+enum ath12k_dp_rx_decap_type {
+ DP_RX_DECAP_TYPE_RAW,
+ DP_RX_DECAP_TYPE_NATIVE_WIFI,
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX,
+ DP_RX_DECAP_TYPE_8023,
+};
+
+struct ath12k_dp_rx_rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
+{
+ u32 ret = 0;
+
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case RX_MSDU_START_SGI_1_6_US:
+ ret = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case RX_MSDU_START_SGI_3_2_US:
+ ret = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ return ret;
+}
+
+int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key);
+void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer);
+void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
+ struct ath12k_peer *peer, u8 tid);
+int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type);
+void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+ struct sk_buff *skb);
+int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab);
+void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab);
+int ath12k_dp_rx_htt_setup(struct ath12k_base *ab);
+int ath12k_dp_rx_alloc(struct ath12k_base *ab);
+void ath12k_dp_rx_free(struct ath12k_base *ab);
+int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int pdev_idx);
+void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int pdev_idx);
+void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab);
+void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab);
+int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
+ struct napi_struct *napi, int budget);
+int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
+ int budget);
+int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id,
+ struct napi_struct *napi,
+ int budget);
+int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ bool hw_cc);
+int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar);
+int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id);
+
+int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab);
+int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer);
+u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
+ struct hal_rx_desc *desc);
+struct ath12k_peer *
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
+u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
+ struct hal_rx_desc *desc);
+u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
+ struct hal_rx_desc *desc);
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status);
+struct ath12k_peer *
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
+
+int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
+int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
+
+#endif /* ATH12K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
new file mode 100644
index 000000000000..95294f35155c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
+#include "hw.h"
+
+static enum hal_tcl_encap_type
+ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return HAL_TCL_ENCAP_TYPE_ETHERNET;
+
+ return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
+}
+
+static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ hdr = (void *)skb->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
+
+ if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ else if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HAL_DESC_REO_NON_QOS_TID;
+ else
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return HAL_ENCRYPT_TYPE_WEP_40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return HAL_ENCRYPT_TYPE_WEP_104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return HAL_ENCRYPT_TYPE_TKIP_MIC;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return HAL_ENCRYPT_TYPE_CCMP_128;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return HAL_ENCRYPT_TYPE_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return HAL_ENCRYPT_TYPE_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return HAL_ENCRYPT_TYPE_AES_GCMP_256;
+ default:
+ return HAL_ENCRYPT_TYPE_OPEN;
+ }
+}
+
+static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
+ struct ath12k_tx_desc_info *tx_desc,
+ u8 pool_id)
+{
+ spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+}
+
+static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
+ u8 pool_id)
+{
+ struct ath12k_tx_desc_info *desc;
+
+ spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id],
+ struct ath12k_tx_desc_info,
+ list);
+ if (!desc) {
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+ ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
+ return NULL;
+ }
+
+ list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]);
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+
+ return desc;
+}
+
+static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd,
+ struct hal_tx_info *ti)
+{
+ struct hal_tx_msdu_ext_desc *tcl_ext_cmd = (struct hal_tx_msdu_ext_desc *)cmd;
+
+ tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr,
+ HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO);
+ tcl_ext_cmd->info1 = le32_encode_bits(0x0,
+ HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) |
+ le32_encode_bits(ti->data_len,
+ HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
+
+ tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+ le32_encode_bits(ti->encap_type,
+ HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
+ le32_encode_bits(ti->encrypt_type,
+ HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
+}
+
+int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_tx_info ti = {0};
+ struct ath12k_tx_desc_info *tx_desc;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct hal_tcl_data_cmd *hal_tcl_desc;
+ struct hal_tx_msdu_ext_desc *msg;
+ struct sk_buff *skb_ext_desc;
+ struct hal_srng *tcl_ring;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct dp_tx_ring *tx_ring;
+ u8 pool_id;
+ u8 hal_ring_id;
+ int ret;
+ u8 ring_selector, ring_map = 0;
+ bool tcl_ring_retry;
+ bool msdu_ext_desc = false;
+
+ if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+ !ieee80211_is_data(hdr->frame_control))
+ return -ENOTSUPP;
+
+ pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
+
+ /* Let the default ring selection be based on current processor
+ * number, where one of the 3 tcl rings are selected based on
+ * the smp_processor_id(). In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ * TODO: Add throttling logic when all rings are full
+ */
+ ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
+
+tcl_ring_sel:
+ tcl_ring_retry = false;
+ ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
+
+ ring_map |= BIT(ti.ring_id);
+ ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
+
+ tx_ring = &dp->tx_ring[ti.ring_id];
+
+ tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
+ if (!tx_desc)
+ return -ENOMEM;
+
+ ti.bank_id = arvif->bank_id;
+ ti.meta_data_flags = arvif->tcl_metadata;
+
+ if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+ test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
+ if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
+ ti.encrypt_type =
+ ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
+
+ if (ieee80211_has_protected(hdr->frame_control))
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ } else {
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
+ msdu_ext_desc = true;
+ }
+
+ ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
+ ti.addr_search_flags = arvif->hal_addr_search_flags;
+ ti.search_type = arvif->search_type;
+ ti.type = HAL_TCL_DESC_TYPE_BUFFER;
+ ti.pkt_offset = 0;
+ ti.lmac_id = ar->lmac_id;
+ ti.vdev_id = arvif->vdev_id;
+ ti.bss_ast_hash = arvif->ast_hash;
+ ti.bss_ast_idx = arvif->ast_idx;
+ ti.dscp_tid_tbl_idx = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
+ }
+
+ ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
+
+ ti.tid = ath12k_dp_tx_get_tid(skb);
+
+ switch (ti.encap_type) {
+ case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+ ath12k_dp_tx_encap_nwifi(skb);
+ break;
+ case HAL_TCL_ENCAP_TYPE_RAW:
+ if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ret = -EINVAL;
+ goto fail_remove_tx_buf;
+ }
+ break;
+ case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ /* no need to encap */
+ break;
+ case HAL_TCL_ENCAP_TYPE_802_3:
+ default:
+ /* TODO: Take care of other encap modes as well */
+ ret = -EINVAL;
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ goto fail_remove_tx_buf;
+ }
+
+ ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, ti.paddr)) {
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
+ ret = -ENOMEM;
+ goto fail_remove_tx_buf;
+ }
+
+ tx_desc->skb = skb;
+ tx_desc->mac_id = ar->pdev_idx;
+ ti.desc_id = tx_desc->desc_id;
+ ti.data_len = skb->len;
+ skb_cb->paddr = ti.paddr;
+ skb_cb->vif = arvif->vif;
+ skb_cb->ar = ar;
+
+ if (msdu_ext_desc) {
+ skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
+ if (!skb_ext_desc) {
+ ret = -ENOMEM;
+ goto fail_unmap_dma;
+ }
+
+ skb_put(skb_ext_desc, sizeof(struct hal_tx_msdu_ext_desc));
+ memset(skb_ext_desc->data, 0, skb_ext_desc->len);
+
+ msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
+ ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
+
+ ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
+ skb_ext_desc->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ab->dev, ti.paddr);
+ if (ret) {
+ kfree(skb_ext_desc);
+ goto fail_unmap_dma;
+ }
+
+ ti.data_len = skb_ext_desc->len;
+ ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
+
+ skb_cb->paddr_ext_desc = ti.paddr;
+ }
+
+ hal_ring_id = tx_ring->tcl_data_ring.ring_id;
+ tcl_ring = &ab->hal.srng_list[hal_ring_id];
+
+ spin_lock_bh(&tcl_ring->lock);
+
+ ath12k_hal_srng_access_begin(ab, tcl_ring);
+
+ hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
+ if (!hal_tcl_desc) {
+ /* NOTE: It is highly unlikely we'll be running out of tcl_ring
+ * desc because the desc is directly enqueued onto hw queue.
+ */
+ ath12k_hal_srng_access_end(ab, tcl_ring);
+ ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+ spin_unlock_bh(&tcl_ring->lock);
+ ret = -ENOMEM;
+
+ /* Checking for available tcl descritors in another ring in
+ * case of failure due to full tcl ring now, is better than
+ * checking this ring earlier for each pkt tx.
+ * Restart ring selection if some rings are not checked yet.
+ */
+ if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
+ ab->hw_params->tcl_ring_retry) {
+ tcl_ring_retry = true;
+ ring_selector++;
+ }
+
+ goto fail_unmap_dma;
+ }
+
+ ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
+
+ ath12k_hal_srng_access_end(ab, tcl_ring);
+
+ spin_unlock_bh(&tcl_ring->lock);
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
+ skb->data, skb->len);
+
+ atomic_inc(&ar->dp.num_tx_pending);
+
+ return 0;
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+
+fail_remove_tx_buf:
+ ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+ if (tcl_ring_retry)
+ goto tcl_ring_sel;
+
+ return ret;
+}
+
+static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
+ struct sk_buff *msdu, u8 mac_id,
+ struct dp_tx_ring *tx_ring)
+{
+ struct ath12k *ar;
+ struct ath12k_skb_cb *skb_cb;
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (skb_cb->paddr_ext_desc)
+ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(msdu);
+
+ ar = ab->pdevs[mac_id].ar;
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+}
+
+static void
+ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
+ struct sk_buff *msdu,
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_dp_htt_wbm_tx_status *ts)
+{
+ struct ieee80211_tx_info *info;
+ struct ath12k_skb_cb *skb_cb;
+ struct ath12k *ar;
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+ info = IEEE80211_SKB_CB(msdu);
+
+ ar = skb_cb->ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (skb_cb->paddr_ext_desc)
+ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (ts->acked) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ }
+ }
+
+ ieee80211_tx_status(ar->hw, msdu);
+}
+
+static void
+ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
+ void *desc, u8 mac_id,
+ struct sk_buff *msdu,
+ struct dp_tx_ring *tx_ring)
+{
+ struct htt_tx_wbm_completion *status_desc;
+ struct ath12k_dp_htt_wbm_tx_status ts = {0};
+ enum hal_wbm_htt_tx_comp_status wbm_status;
+
+ status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+
+ wbm_status = le32_get_bits(status_desc->info0,
+ HTT_TX_WBM_COMP_INFO0_STATUS);
+
+ switch (wbm_status) {
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+ ts.ack_rssi = le32_get_bits(status_desc->info2,
+ HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
+ ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+ ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+ /* This event is to be handled only when the driver decides to
+ * use WDS offload functionality.
+ */
+ break;
+ default:
+ ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ break;
+ }
+}
+
+static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_tx_info *info;
+ struct ath12k_skb_cb *skb_cb;
+
+ if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
+ /* Must not happen */
+ return;
+ }
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (skb_cb->paddr_ext_desc)
+ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+
+ rcu_read_lock();
+
+ if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (!skb_cb->vif) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* skip tx rate update from ieee80211_status*/
+ info->status.rates[0].idx = -1;
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
+ !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ /* NOTE: Tx rate status reporting. Tx completion status does not have
+ * necessary information (for example nss) to build the tx rate.
+ * Might end up reporting it out-of-band from HTT stats.
+ */
+
+ ieee80211_tx_status(ar->hw, msdu);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
+ struct hal_wbm_completion_ring_tx *desc,
+ struct hal_tx_status *ts)
+{
+ ts->buf_rel_source =
+ le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
+ if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+ ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+ return;
+
+ if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+ return;
+
+ ts->status = le32_get_bits(desc->info0,
+ HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
+
+ ts->ppdu_id = le32_get_bits(desc->info1,
+ HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
+ if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
+ ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
+ else
+ ts->rate_stats = 0;
+}
+
+void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
+{
+ struct ath12k *ar;
+ struct ath12k_dp *dp = &ab->dp;
+ int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+ struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
+ struct ath12k_tx_desc_info *tx_desc = NULL;
+ struct sk_buff *msdu;
+ struct hal_tx_status ts = { 0 };
+ struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+ struct hal_wbm_release_ring *desc;
+ u8 mac_id;
+ u64 desc_va;
+
+ spin_lock_bh(&status_ring->lock);
+
+ ath12k_hal_srng_access_begin(ab, status_ring);
+
+ while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) {
+ desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
+ if (!desc)
+ break;
+
+ memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
+ desc, sizeof(*desc));
+ tx_ring->tx_status_head =
+ ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head);
+ }
+
+ if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
+ (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
+ /* TODO: Process pending tx_status messages when kfifo_is_full() */
+ ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+ }
+
+ ath12k_hal_srng_access_end(ab, status_ring);
+
+ spin_unlock_bh(&status_ring->lock);
+
+ while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ struct hal_wbm_completion_ring_tx *tx_status;
+ u32 desc_id;
+
+ tx_ring->tx_status_tail =
+ ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+ tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+ ath12k_dp_tx_status_parse(ab, tx_status, &ts);
+
+ if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
+ /* HW done cookie conversion */
+ desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
+ le32_to_cpu(tx_status->buf_va_lo));
+ tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
+ } else {
+ /* SW does cookie conversion to VA */
+ desc_id = le32_get_bits(tx_status->buf_va_hi,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
+ }
+ if (!tx_desc) {
+ ath12k_warn(ab, "unable to retrieve tx_desc!");
+ continue;
+ }
+
+ msdu = tx_desc->skb;
+ mac_id = tx_desc->mac_id;
+
+ /* Release descriptor as soon as extracting necessary info
+ * to reduce contention
+ */
+ ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
+ if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
+ ath12k_dp_tx_process_htt_tx_complete(ab,
+ (void *)tx_status,
+ mac_id, msdu,
+ tx_ring);
+ continue;
+ }
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
+ }
+}
+
+static int
+ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
+ int mac_id, u32 ring_id,
+ enum hal_ring_type ring_type,
+ enum htt_srng_ring_type *htt_ring_type,
+ enum htt_srng_ring_id *htt_ring_id)
+{
+ int ret = 0;
+
+ switch (ring_type) {
+ case HAL_RXDMA_BUF:
+ /* for some targets, host fills rx buffer to fw and fw fills to
+ * rxbuf ring for each rxdma
+ */
+ if (!ab->hw_params->rx_mac_buf_ring) {
+ if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
+ ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ } else {
+ if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
+ *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
+ *htt_ring_type = HTT_SW_TO_SW_RING;
+ } else {
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ }
+ }
+ break;
+ case HAL_RXDMA_DST:
+ *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DST:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DESC:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_TX_MONITOR_BUF:
+ *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_TX_MONITOR_DST:
+ *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ default:
+ ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type)
+{
+ struct htt_srng_setup_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ u32 ring_entry_sz;
+ int len = sizeof(*cmd);
+ dma_addr_t hp_addr, tp_addr;
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath12k_hal_srng_get_params(ab, srng, &params);
+
+ hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
+ tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_srng_setup_cmd *)skb->data;
+ cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
+ HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
+ HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
+ else
+ cmd->info0 |= le32_encode_bits(mac_id,
+ HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
+ cmd->info0 |= le32_encode_bits(htt_ring_type,
+ HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
+ cmd->info0 |= le32_encode_bits(htt_ring_id,
+ HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
+
+ cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
+ HAL_ADDR_LSB_REG_MASK);
+
+ cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT);
+
+ ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
+ if (ret < 0)
+ goto err_free;
+
+ ring_entry_sz = ret;
+
+ ring_entry_sz >>= 2;
+ cmd->info1 = le32_encode_bits(ring_entry_sz,
+ HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
+ cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
+ HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
+ cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
+ cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
+ cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
+ if (htt_ring_type == HTT_SW_TO_HW_RING)
+ cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
+
+ cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
+ cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
+
+ cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
+ cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
+
+ cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
+ cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
+ cmd->msi_data = cpu_to_le32(params.msi_data);
+
+ cmd->intr_info =
+ le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
+ HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
+ cmd->intr_info |=
+ le32_encode_bits(params.intr_timer_thres_us >> 3,
+ HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
+
+ cmd->info2 = 0;
+ if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ cmd->info2 = le32_encode_bits(params.low_threshold,
+ HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
+ __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
+ cmd->msi_data);
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
+ ring_id, ring_type, cmd->intr_info, cmd->info2);
+
+ ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ver_req_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ init_completion(&dp->htt_tgt_version_received);
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ver_req_cmd *)skb->data;
+ cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
+ HTT_VER_REQ_INFO_MSG_ID);
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath12k_warn(ab, "htt target version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
+ ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
+ dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ppdu_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ u8 pdev_mask;
+ int ret;
+ int i;
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
+ HTT_PPDU_STATS_CFG_MSG_TYPE);
+
+ pdev_mask = 1 << (i + 1);
+ cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
+ cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter)
+{
+ struct htt_rx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath12k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ le32_encode_bits(DP_SW2HW_MACID(mac_id),
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ else
+ cmd->info0 |=
+ le32_encode_bits(mac_id,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ cmd->info0 |= le32_encode_bits(htt_ring_id,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
+ cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
+ HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
+ cmd->info1 = le32_encode_bits(rx_buf_size,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
+ cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
+ cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
+ cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
+ cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
+ cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
+
+ if (tlv_filter->offset_valid) {
+ cmd->rx_packet_offset =
+ le32_encode_bits(tlv_filter->rx_packet_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
+
+ cmd->rx_packet_offset |=
+ le32_encode_bits(tlv_filter->rx_header_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
+
+ cmd->rx_mpdu_offset =
+ le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
+
+ cmd->rx_mpdu_offset |=
+ le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
+
+ cmd->rx_msdu_offset =
+ le32_encode_bits(tlv_filter->rx_msdu_end_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
+
+ cmd->rx_msdu_offset |=
+ le32_encode_bits(tlv_filter->rx_msdu_start_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
+
+ cmd->rx_attn_offset =
+ le32_encode_bits(tlv_filter->rx_attn_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
+ }
+
+ ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int
+ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ext_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
+
+ cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+
+ cmd->hdr.stats_type = type;
+ cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
+ cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
+ cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
+ cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
+ cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
+ cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ ath12k_warn(ab, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
+{
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+ if (ret) {
+ ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+ if (ret) {
+ ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ int ret, ring_id;
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+ tlv_filter.offset_valid = false;
+
+ if (!reset) {
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.pkt_filter_flags0 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ }
+
+ if (ab->hw_params->rxdma1_enable) {
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
+ HAL_RXDMA_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int tx_buf_size,
+ struct htt_tx_ring_tlv_filter *htt_tlv_filter)
+{
+ struct htt_tx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath12k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ le32_encode_bits(DP_SW2HW_MACID(mac_id),
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ else
+ cmd->info0 |=
+ le32_encode_bits(mac_id,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ cmd->info0 |= le32_encode_bits(htt_ring_id,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
+
+ cmd->info1 |=
+ le32_encode_bits(tx_buf_size,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
+
+ if (htt_tlv_filter->tx_mon_mgmt_filter) {
+ cmd->info1 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
+ cmd->info1 |=
+ le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
+ cmd->info2 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
+ }
+
+ if (htt_tlv_filter->tx_mon_data_filter) {
+ cmd->info1 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
+ cmd->info1 |=
+ le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
+ cmd->info2 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
+ }
+
+ if (htt_tlv_filter->tx_mon_ctrl_filter) {
+ cmd->info1 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
+ cmd->info1 |=
+ le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
+ cmd->info2 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
+ }
+
+ cmd->tlv_filter_mask_in0 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
+ cmd->tlv_filter_mask_in1 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
+ cmd->tlv_filter_mask_in2 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
+ cmd->tlv_filter_mask_in3 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
+
+ ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_tx_ring_tlv_filter tlv_filter = {0};
+ int ret, ring_id;
+
+ ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
+
+ /* TODO: Need to set upstream/downstream tlv filters
+ * here
+ */
+
+ if (ab->hw_params->rxdma1_enable) {
+ ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
+ HAL_TX_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
new file mode 100644
index 000000000000..436d77e5e9ee
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_DP_TX_H
+#define ATH12K_DP_TX_H
+
+#include "core.h"
+#include "hal_tx.h"
+
+struct ath12k_dp_htt_wbm_tx_status {
+ bool acked;
+ int ack_rssi;
+};
+
+int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
+int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct sk_buff *skb);
+void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
+
+int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
+int
+ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie);
+int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+
+int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter);
+void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id);
+int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int tx_buf_size,
+ struct htt_tx_ring_tlv_filter *htt_tlv_filter);
+int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
new file mode 100644
index 000000000000..95d04819083f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -0,0 +1,2222 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/dma-mapping.h>
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "debug.h"
+#include "hal_desc.h"
+#include "hif.h"
+
+static const struct hal_srng_config hw_srng_config_template[] = {
+ /* TODO: max_rings can populated by querying HW capabilities */
+ [HAL_REO_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+ .max_rings = 8,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_EXCEPTION] = {
+ /* Designating REO2SW0 ring as exception ring.
+ * Any of theREO2SW rings can be used as exception ring.
+ */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_REINJECT] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_CMD] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_64_hdr) +
+ sizeof(struct hal_reo_get_queue_stats)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_64_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_DATA] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+ .max_rings = 6,
+ .entry_size = sizeof(struct hal_tcl_data_cmd) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_CMD] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_status_ring)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_SRC] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_DST_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_WBM_IDLE_LINK] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_SW2WBM_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE,
+ .max_rings = 2,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_WBM2SW_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ .max_rings = 8,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_RXDMA_BUF] = {
+ .start_ring_id = HAL_SRNG_SW2RXDMA_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_DMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ .max_rings = 0,
+ .entry_size = 0,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_BUF] = {
+ .start_ring_id = HAL_SRNG_SW2RXMON_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_STATUS] = { 0, },
+ [HAL_RXDMA_MONITOR_DESC] = { 0, },
+ [HAL_RXDMA_DIR_BUF] = {
+ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ .max_rings = 2,
+ .entry_size = 8 >> 2, /* TODO: Define the struct */
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_PPE2TCL] = {
+ .start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_PPE_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TX_MONITOR_BUF] = {
+ .start_ring_id = HAL_SRNG_SW2TXMON_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_TX_MONITOR_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ }
+};
+
+static const struct ath12k_hal_tcl_to_wbm_rbm_map
+ath12k_hal_qcn9274_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
+ {
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .wbm_ring_num = 1,
+ .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
+ },
+ {
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+ {
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ }
+};
+
+static const struct ath12k_hal_tcl_to_wbm_rbm_map
+ath12k_hal_wcn7850_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
+ {
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+ {
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ },
+};
+
+static unsigned int ath12k_hal_reo1_ring_id_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_msi1_base_lsb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_msi1_base_msb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_msi1_data_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_base_msb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_producer_int_setup_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_hp_addr_lsb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_hp_addr_msb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_misc_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static u16 ath12k_hw_qcn9274_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274.msdu_end.phy_meta_data);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+ RX_MSDU_END_INFO5_TID);
+}
+
+static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274.mpdu_start.sw_peer_id);
+}
+
+static void ath12k_hw_qcn9274_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy(&fdesc->u.qcn9274.msdu_end, &ldesc->u.qcn9274.msdu_end,
+ sizeof(struct rx_msdu_end_qcn9274));
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274.mpdu_start.phy_ppdu_id);
+}
+
+static void ath12k_hw_qcn9274_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info10);
+
+ info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
+ info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+
+ desc->u.qcn9274.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+static u8 *ath12k_hw_qcn9274_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9274.msdu_payload[0];
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274, mpdu_start);
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274, msdu_end);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9274.mpdu_start.addr2;
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_is_mcbc(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
+}
+
+static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.qcn9274.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.qcn9274.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.qcn9274.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.qcn9274.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.qcn9274.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.qcn9274.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.qcn9274.mpdu_start.seq_ctrl;
+}
+
+static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = le32_get_bits(desc->u.qcn9274.mpdu_start.info5,
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[1]);
+ crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]);
+}
+
+static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl);
+}
+
+static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+ s->reg_size[0] = HAL_SW2REO1_RING_BASE_LSB(ab) - HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_SW2REO1_RING_HP - HAL_SW2REO_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ s = &hal->srng_config[HAL_CE_SRC];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+
+ s = &hal->srng_config[HAL_CE_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+
+ s = &hal->srng_config[HAL_CE_DST_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+
+ s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+ s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) -
+ HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_WBM_SW1_RELEASE_RING_HP - HAL_WBM_SW_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
+ HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
+ /* Some LMAC rings are not accesed from the host:
+ * RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS,
+ * RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC,
+ * RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA
+ */
+ s = &hal->srng_config[HAL_PPE2TCL];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_PPE_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_PPE_RELEASE_RING_HP;
+
+ return 0;
+}
+
+static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static bool ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.qcn9274.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+const struct hal_ops hal_qcn9274_ops = {
+ .rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
+ .rx_desc_encrypt_valid = ath12k_hw_qcn9274_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath12k_hw_qcn9274_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath12k_hw_qcn9274_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_rx_desc_copy_end_tlv,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath12k_hw_qcn9274_rx_desc_set_msdu_len,
+ .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_rx_desc_get_msdu_payload,
+ .rx_desc_get_mpdu_start_offset = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset,
+ .rx_desc_get_msdu_end_offset = ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset,
+ .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2,
+ .rx_desc_is_mcbc = ath12k_hw_qcn9274_rx_desc_is_mcbc,
+ .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
+ .rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
+ .rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
+ .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
+ .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
+ .dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
+ .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
+ .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
+ .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
+ .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
+};
+
+static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static u16 ath12k_hw_wcn7850_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn7850.msdu_end.phy_meta_data);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_TID);
+}
+
+static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn7850.mpdu_start.sw_peer_id);
+}
+
+static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end,
+ sizeof(struct rx_msdu_end_qcn9274));
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return le64_get_bits(desc->u.wcn7850.mpdu_start_tag,
+ HAL_TLV_HDR_TAG);
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn7850.mpdu_start.phy_ppdu_id);
+}
+
+static void ath12k_hw_wcn7850_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info10);
+
+ info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
+ info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+
+ desc->u.wcn7850.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+static u8 *ath12k_hw_wcn7850_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn7850.msdu_payload[0];
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset(void)
+{
+ return offsetof(struct hal_rx_desc_wcn7850, mpdu_start_tag);
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset(void)
+{
+ return offsetof(struct hal_rx_desc_wcn7850, msdu_end_tag);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static u8 *ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn7850.mpdu_start.addr2;
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_is_mcbc(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
+}
+
+static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.wcn7850.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.wcn7850.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.wcn7850.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.wcn7850.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.wcn7850.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.wcn7850.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.wcn7850.mpdu_start.seq_ctrl;
+}
+
+static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = u32_get_bits(__le32_to_cpu(desc->u.wcn7850.mpdu_start.info5),
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[1]);
+ crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
+}
+
+static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl);
+}
+
+static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->max_rings = 1;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->max_rings = 5;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ s = &hal->srng_config[HAL_CE_SRC];
+ s->max_rings = 12;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+
+ s = &hal->srng_config[HAL_CE_DST];
+ s->max_rings = 12;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+
+ s = &hal->srng_config[HAL_CE_DST_STATUS];
+ s->max_rings = 12;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+
+ s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+ s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+ s->max_rings = 1;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
+ HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_RXDMA_BUF];
+ s->max_rings = 2;
+ s->mac_type = ATH12K_HAL_SRNG_PMAC;
+
+ s = &hal->srng_config[HAL_RXDMA_DST];
+ s->max_rings = 1;
+ s->entry_size = sizeof(struct hal_reo_entrance_ring) >> 2;
+
+ /* below rings are not used */
+ s = &hal->srng_config[HAL_RXDMA_DIR_BUF];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_PPE2TCL];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_PPE_RELEASE];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_TX_MONITOR_BUF];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_TX_MONITOR_DST];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_PPE2TCL];
+ s->max_rings = 0;
+
+ return 0;
+}
+
+static bool ath12k_hw_wcn7850_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static bool ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_wcn7850_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.wcn7850.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+const struct hal_ops hal_wcn7850_ops = {
+ .rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes,
+ .rx_desc_encrypt_valid = ath12k_hw_wcn7850_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath12k_hw_wcn7850_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath12k_hw_wcn7850_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath12k_hw_wcn7850_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath12k_hw_wcn7850_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath12k_hw_wcn7850_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath12k_hw_wcn7850_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath12k_hw_wcn7850_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath12k_hw_wcn7850_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_end_tlv = ath12k_hw_wcn7850_rx_desc_copy_end_tlv,
+ .rx_desc_get_mpdu_start_tag = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath12k_hw_wcn7850_rx_desc_set_msdu_len,
+ .rx_desc_get_msdu_payload = ath12k_hw_wcn7850_rx_desc_get_msdu_payload,
+ .rx_desc_get_mpdu_start_offset = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset,
+ .rx_desc_get_msdu_end_offset = ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset,
+ .rx_desc_mac_addr2_valid = ath12k_hw_wcn7850_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2,
+ .rx_desc_is_mcbc = ath12k_hw_wcn7850_rx_desc_is_mcbc,
+ .rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
+ .rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
+ .rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
+ .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
+ .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
+ .dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
+ .dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
+ .dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
+ .dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted,
+ .dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err,
+};
+
+static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
+ GFP_KERNEL);
+ if (!hal->rdp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath12k_hal_free_cont_rdp(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->rdp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ dma_free_coherent(ab->dev, size,
+ hal->rdp.vaddr, hal->rdp.paddr);
+ hal->rdp.vaddr = NULL;
+}
+
+static int ath12k_hal_alloc_cont_wrp(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
+ hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
+ GFP_KERNEL);
+ if (!hal->wrp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath12k_hal_free_cont_wrp(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->wrp.vaddr)
+ return;
+
+ size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
+ dma_free_coherent(ab->dev, size,
+ hal->wrp.vaddr, hal->wrp.paddr);
+ hal->wrp.vaddr = NULL;
+}
+
+static void ath12k_hal_ce_dst_setup(struct ath12k_base *ab,
+ struct hal_srng *srng, int ring_num)
+{
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
+ u32 addr;
+ u32 val;
+
+ addr = HAL_CE_DST_RING_CTRL +
+ srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
+ ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
+
+ val = ath12k_hif_read32(ab, addr);
+ val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
+ val |= u32_encode_bits(srng->u.dst_ring.max_buffer_length,
+ HAL_CE_DST_R0_DEST_CTRL_MAX_LEN);
+ ath12k_hif_write32(ab, addr, val);
+}
+
+static void ath12k_hal_srng_dst_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 val;
+ u64 hp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath12k_hif_write32(ab, reg_base +
+ ath12k_hal_reo1_ring_msi1_base_lsb_offset(ab),
+ srng->msi_addr);
+
+ val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_REO1_RING_MSI1_BASE_MSB_ADDR) |
+ HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath12k_hif_write32(ab, reg_base +
+ ath12k_hal_reo1_ring_msi1_base_msb_offset(ab), val);
+
+ ath12k_hif_write32(ab,
+ reg_base + ath12k_hal_reo1_ring_msi1_data_offset(ab),
+ srng->msi_data);
+ }
+
+ ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
+ u32_encode_bits((srng->entry_size * srng->num_entries),
+ HAL_REO1_RING_BASE_MSB_RING_SIZE);
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_base_msb_offset(ab), val);
+
+ val = u32_encode_bits(srng->ring_id, HAL_REO1_RING_ID_RING_ID) |
+ u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_id_offset(ab), val);
+
+ /* interrupt setup */
+ val = u32_encode_bits((srng->intr_timer_thres_us >> 3),
+ HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD);
+
+ val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
+ HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD);
+
+ ath12k_hif_write32(ab,
+ reg_base + ath12k_hal_reo1_ring_producer_int_setup_offset(ab),
+ val);
+
+ hp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_lsb_offset(ab),
+ hp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_msb_offset(ab),
+ hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath12k_hif_write32(ab, reg_base, 0);
+ ath12k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
+ *srng->u.dst_ring.hp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_REO1_RING_MISC_MSI_SWAP;
+ val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_misc_offset(ab), val);
+}
+
+static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 val;
+ u64 tp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath12k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
+
+ val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_TCL1_RING_MSI1_BASE_MSB_ADDR) |
+ HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath12k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
+ val);
+
+ ath12k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
+ srng->msi_data);
+ }
+
+ ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
+ u32_encode_bits((srng->entry_size * srng->num_entries),
+ HAL_TCL1_RING_BASE_MSB_RING_SIZE);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+
+ val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
+
+ val = u32_encode_bits(srng->intr_timer_thres_us,
+ HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD);
+
+ val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
+ HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD);
+
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
+ val);
+
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ val |= u32_encode_bits(srng->u.src_ring.low_threshold,
+ HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD);
+ }
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
+ val);
+
+ if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ tp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
+ tp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
+ tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+ }
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath12k_hif_write32(ab, reg_base, 0);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+ *srng->u.src_ring.tp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_TCL1_RING_MISC_MSI_SWAP;
+
+ /* Loop count is not used for SRC rings */
+ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
+
+ val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+ if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK)
+ val |= HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE;
+
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
+}
+
+static void ath12k_hal_srng_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath12k_hal_srng_src_hw_init(ab, srng);
+ else
+ ath12k_hal_srng_dst_hw_init(ab, srng);
+}
+
+static int ath12k_hal_srng_get_ring_id(struct ath12k_base *ab,
+ enum hal_ring_type type,
+ int ring_num, int mac_id)
+{
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
+ int ring_id;
+
+ if (ring_num >= srng_config->max_rings) {
+ ath12k_warn(ab, "invalid ring number :%d\n", ring_num);
+ return -EINVAL;
+ }
+
+ ring_id = srng_config->start_ring_id + ring_num;
+ if (srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
+ ring_id += mac_id * HAL_SRNG_RINGS_PER_PMAC;
+
+ if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
+ return -EINVAL;
+
+ return ring_id;
+}
+
+int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type)
+{
+ struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &ab->hal.srng_config[ring_type];
+
+ return (srng_config->entry_size << 2);
+}
+
+int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type)
+{
+ struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &ab->hal.srng_config[ring_type];
+
+ return (srng_config->max_size / srng_config->entry_size);
+}
+
+void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params)
+{
+ params->ring_base_paddr = srng->ring_base_paddr;
+ params->ring_base_vaddr = srng->ring_base_vaddr;
+ params->num_entries = srng->num_entries;
+ params->intr_timer_thres_us = srng->intr_timer_thres_us;
+ params->intr_batch_cntr_thres_entries =
+ srng->intr_batch_cntr_thres_entries;
+ params->low_threshold = srng->u.src_ring.low_threshold;
+ params->msi_addr = srng->msi_addr;
+ params->msi2_addr = srng->msi2_addr;
+ params->msi_data = srng->msi_data;
+ params->msi2_data = srng->msi2_data;
+ params->flags = srng->flags;
+}
+
+dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+ else
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+}
+
+dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+ else
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+}
+
+u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type)
+{
+ switch (type) {
+ case HAL_CE_DESC_SRC:
+ return sizeof(struct hal_ce_srng_src_desc);
+ case HAL_CE_DESC_DST:
+ return sizeof(struct hal_ce_srng_dest_desc);
+ case HAL_CE_DESC_DST_STATUS:
+ return sizeof(struct hal_ce_srng_dst_status_desc);
+ }
+
+ return 0;
+}
+
+void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
+ u32 len, u32 id, u8 byte_swap_data)
+{
+ desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
+ desc->buffer_addr_info =
+ le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI) |
+ le32_encode_bits(byte_swap_data,
+ HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP) |
+ le32_encode_bits(0, HAL_CE_SRC_DESC_ADDR_INFO_GATHER) |
+ le32_encode_bits(len, HAL_CE_SRC_DESC_ADDR_INFO_LEN);
+ desc->meta_info = le32_encode_bits(id, HAL_CE_SRC_DESC_META_INFO_DATA);
+}
+
+void ath12k_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, dma_addr_t paddr)
+{
+ desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
+ desc->buffer_addr_info =
+ le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI);
+}
+
+u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc)
+{
+ u32 len;
+
+ len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+ desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+
+ return len;
+}
+
+void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr)
+{
+ desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
+ BUFFER_ADDR_INFO0_ADDR);
+ desc->buf_addr_info.info1 =
+ le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ BUFFER_ADDR_INFO1_ADDR) |
+ le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+ le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
+}
+
+void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
+ return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
+
+ return NULL;
+}
+
+void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+
+ srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ tp = srng->u.dst_ring.tp;
+
+ if (sync_hw_ptr) {
+ hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = hp;
+ } else {
+ hp = srng->u.dst_ring.cached_hp;
+ }
+
+ if (hp >= tp)
+ return (hp - tp) / srng->entry_size;
+ else
+ return (srng->ring_size - tp + hp) / srng->entry_size;
+}
+
+/* Returns number of available entries in src ring */
+int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ hp = srng->u.src_ring.hp;
+
+ if (sync_hw_ptr) {
+ tp = *srng->u.src_ring.tp_addr;
+ srng->u.src_ring.cached_tp = tp;
+ } else {
+ tp = srng->u.src_ring.cached_tp;
+ }
+
+ if (tp > hp)
+ return ((tp - hp) / srng->entry_size) - 1;
+ else
+ return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
+}
+
+void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: Using % is expensive, but we have to do this since size of some
+ * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
+ * if separate function is defined for rings having power of 2 ring size
+ * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
+ * overhead of % by using mask (with &).
+ */
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = next_hp;
+
+ /* TODO: Reap functionality is not used by all rings. If particular
+ * ring does not use reap functionality, we need not update reap_hp
+ * with next_hp pointer. Need to make sure a separate function is used
+ * before doing any optimization by removing below code updating
+ * reap_hp.
+ */
+ srng->u.src_ring.reap_hp = next_hp;
+
+ return desc;
+}
+
+void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+ u32 next_reap_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
+ srng->ring_size;
+
+ if (next_reap_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + next_reap_hp;
+ srng->u.src_ring.reap_hp = next_reap_hp;
+
+ return desc;
+}
+
+void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ srng->u.src_ring.cached_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ else
+ srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+}
+
+/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
+ * should have been called before this.
+ */
+void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: See if we need a write memory barrier here */
+ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+ /* For LMAC rings, ring pointer updates are done through FW and
+ * hence written to a shared memory location that is read by FW
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ }
+ } else {
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ ath12k_hif_write32(ab,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem,
+ srng->u.src_ring.hp);
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ ath12k_hif_write32(ab,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem,
+ srng->u.dst_ring.tp);
+ }
+ }
+
+ srng->timestamp = jiffies;
+}
+
+void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset)
+{
+ struct ath12k_buffer_addr *link_addr;
+ int i;
+ u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
+ u32 val;
+
+ link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+
+ for (i = 1; i < nsbufs; i++) {
+ link_addr->info0 = cpu_to_le32(sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK);
+
+ link_addr->info1 =
+ le32_encode_bits((u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
+ le32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG);
+
+ link_addr = (void *)sbuf[i].vaddr +
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+ }
+
+ val = u32_encode_bits(reg_scatter_buf_sz, HAL_WBM_SCATTER_BUFFER_SIZE) |
+ u32_encode_bits(0x1, HAL_WBM_LINK_DESC_IDLE_LIST_MODE);
+
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(ab),
+ val);
+
+ val = u32_encode_bits(reg_scatter_buf_sz * nsbufs,
+ HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(ab),
+ val);
+
+ val = u32_encode_bits(sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK,
+ BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_LSB(ab),
+ val);
+
+ val = u32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG) |
+ u32_encode_bits((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_MSB(ab),
+ val);
+
+ /* Setup head and tail pointers for the idle list */
+ val = u32_encode_bits(sbuf[nsbufs - 1].paddr, BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab),
+ val);
+
+ val = u32_encode_bits(((u64)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
+ u32_encode_bits((end_offset >> 2),
+ HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(ab),
+ val);
+
+ val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab),
+ val);
+
+ val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(ab),
+ val);
+
+ val = u32_encode_bits(((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
+ u32_encode_bits(0, HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(ab),
+ val);
+
+ val = 2 * tot_link_desc;
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(ab),
+ val);
+
+ /* Enable the SRNG */
+ val = u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE) |
+ u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab),
+ val);
+}
+
+int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
+ struct hal_srng *srng;
+ int ring_id;
+ u32 idx;
+ int i;
+ u32 reg_base;
+
+ ring_id = ath12k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
+ if (ring_id < 0)
+ return ring_id;
+
+ srng = &hal->srng_list[ring_id];
+
+ srng->ring_id = ring_id;
+ srng->ring_dir = srng_config->ring_dir;
+ srng->ring_base_paddr = params->ring_base_paddr;
+ srng->ring_base_vaddr = params->ring_base_vaddr;
+ srng->entry_size = srng_config->entry_size;
+ srng->num_entries = params->num_entries;
+ srng->ring_size = srng->entry_size * srng->num_entries;
+ srng->intr_batch_cntr_thres_entries =
+ params->intr_batch_cntr_thres_entries;
+ srng->intr_timer_thres_us = params->intr_timer_thres_us;
+ srng->flags = params->flags;
+ srng->msi_addr = params->msi_addr;
+ srng->msi2_addr = params->msi2_addr;
+ srng->msi_data = params->msi_data;
+ srng->msi2_data = params->msi2_data;
+ srng->initialized = 1;
+ spin_lock_init(&srng->lock);
+ lockdep_set_class(&srng->lock, &srng->lock_key);
+
+ for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
+ srng->hwreg_base[i] = srng_config->reg_start[i] +
+ (ring_num * srng_config->reg_size[i]);
+ }
+
+ memset(srng->ring_base_vaddr, 0,
+ (srng->entry_size * srng->num_entries) << 2);
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.hp = 0;
+ srng->u.src_ring.cached_tp = 0;
+ srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
+ srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ srng->u.src_ring.low_threshold = params->low_threshold *
+ srng->entry_size;
+ if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
+ if (!ab->hw_params->supports_shadow_regs)
+ srng->u.src_ring.hp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base);
+ else
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem);
+ } else {
+ idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
+ srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
+ idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ }
+ } else {
+ /* During initialization loop count in all the descriptors
+ * will be set to zero, and HW will set it to 1 on completing
+ * descriptor update in first loop, and increments it by 1 on
+ * subsequent loops (loop count wraps around after reaching
+ * 0xffff). The 'loop_cnt' in SW ring state is the expected
+ * loop count in descriptors updated by HW (to be processed
+ * by SW).
+ */
+ srng->u.dst_ring.loop_cnt = 1;
+ srng->u.dst_ring.tp = 0;
+ srng->u.dst_ring.cached_hp = 0;
+ srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
+ if (!ab->hw_params->supports_shadow_regs)
+ srng->u.dst_ring.tp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base +
+ (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
+ else
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base + HAL_REO1_RING_TP - HAL_REO1_RING_HP,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem);
+ } else {
+ /* For PMAC & DMAC rings, tail pointer updates will be done
+ * through FW by writing to a shared memory location
+ */
+ idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
+ srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
+ idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ }
+ }
+
+ if (srng_config->mac_type != ATH12K_HAL_SRNG_UMAC)
+ return ring_id;
+
+ ath12k_hal_srng_hw_init(ab, srng);
+
+ if (type == HAL_CE_DST) {
+ srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
+ ath12k_hal_ce_dst_setup(ab, srng, ring_num);
+ }
+
+ return ring_id;
+}
+
+static void ath12k_hal_srng_update_hp_tp_addr(struct ath12k_base *ab,
+ int shadow_cfg_idx,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct hal_srng *srng;
+ struct ath12k_hal *hal = &ab->hal;
+ int ring_id;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ ring_id = ath12k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
+ if (ring_id < 0)
+ return;
+
+ srng = &hal->srng_list[ring_id];
+
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+ else
+ srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+}
+
+int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+ int shadow_cfg_idx = hal->num_shadow_reg_configured;
+ u32 target_reg;
+
+ if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
+ return -EINVAL;
+
+ hal->num_shadow_reg_configured++;
+
+ target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
+ target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
+ ring_num;
+
+ /* For destination ring, shadow the TP */
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ target_reg += HAL_OFFSET_FROM_HP_TO_TP;
+
+ hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
+
+ /* update hp/tp addr to hal structure*/
+ ath12k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
+ ring_num);
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
+ target_reg,
+ HAL_SHADOW_REG(shadow_cfg_idx),
+ shadow_cfg_idx,
+ ring_type, ring_num);
+
+ return 0;
+}
+
+void ath12k_hal_srng_shadow_config(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ int ring_type, ring_num;
+
+ /* update all the non-CE srngs. */
+ for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ if (ring_type == HAL_CE_SRC ||
+ ring_type == HAL_CE_DST ||
+ ring_type == HAL_CE_DST_STATUS)
+ continue;
+
+ if (srng_config->mac_type == ATH12K_HAL_SRNG_DMAC ||
+ srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
+ continue;
+
+ for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
+ ath12k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
+ }
+}
+
+void ath12k_hal_srng_get_shadow_config(struct ath12k_base *ab,
+ u32 **cfg, u32 *len)
+{
+ struct ath12k_hal *hal = &ab->hal;
+
+ *len = hal->num_shadow_reg_configured;
+ *cfg = hal->shadow_reg_addr;
+}
+
+void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* check whether the ring is empty. Update the shadow
+ * HP only when then ring isn't' empty.
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
+ *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
+ ath12k_hal_srng_access_end(ab, srng);
+}
+
+static void ath12k_hal_register_srng_lock_keys(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_register_key(&hal->srng_list[ring_id].lock_key);
+}
+
+static void ath12k_hal_unregister_srng_lock_keys(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_unregister_key(&hal->srng_list[ring_id].lock_key);
+}
+
+int ath12k_hal_srng_init(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ int ret;
+
+ memset(hal, 0, sizeof(*hal));
+
+ ret = ab->hw_params->hal_ops->create_srng_config(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath12k_hal_alloc_cont_rdp(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath12k_hal_alloc_cont_wrp(ab);
+ if (ret)
+ goto err_free_cont_rdp;
+
+ ath12k_hal_register_srng_lock_keys(ab);
+
+ return 0;
+
+err_free_cont_rdp:
+ ath12k_hal_free_cont_rdp(ab);
+
+err_hal:
+ return ret;
+}
+
+void ath12k_hal_srng_deinit(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+
+ ath12k_hal_unregister_srng_lock_keys(ab);
+ ath12k_hal_free_cont_rdp(ab);
+ ath12k_hal_free_cont_wrp(ab);
+ kfree(hal->srng_config);
+ hal->srng_config = NULL;
+}
+
+void ath12k_hal_dump_srng_stats(struct ath12k_base *ab)
+{
+ struct hal_srng *srng;
+ struct ath12k_ext_irq_grp *irq_grp;
+ struct ath12k_ce_pipe *ce_pipe;
+ int i;
+
+ ath12k_err(ab, "Last interrupt received for each CE:\n");
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ ath12k_err(ab, "CE_id %d pipe_num %d %ums before\n",
+ i, ce_pipe->pipe_num,
+ jiffies_to_msecs(jiffies - ce_pipe->timestamp));
+ }
+
+ ath12k_err(ab, "\nLast interrupt received for each group:\n");
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ ath12k_err(ab, "group_id %d %ums before\n",
+ irq_grp->grp_id,
+ jiffies_to_msecs(jiffies - irq_grp->timestamp));
+ }
+
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
+ srng = &ab->hal.srng_list[i];
+
+ if (!srng->initialized)
+ continue;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath12k_err(ab,
+ "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.src_ring.hp,
+ srng->u.src_ring.reap_hp,
+ *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
+ srng->u.src_ring.last_tp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ else if (srng->ring_dir == HAL_SRNG_DIR_DST)
+ ath12k_err(ab,
+ "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.dst_ring.tp,
+ *srng->u.dst_ring.hp_addr,
+ srng->u.dst_ring.cached_hp,
+ srng->u.dst_ring.last_hp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
new file mode 100644
index 000000000000..dfbd8bce70e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -0,0 +1,1142 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HAL_H
+#define ATH12K_HAL_H
+
+#include "hal_desc.h"
+#include "rx_desc.h"
+
+struct ath12k_base;
+
+#define HAL_LINK_DESC_SIZE (32 << 2)
+#define HAL_LINK_DESC_ALIGN 128
+#define HAL_NUM_MPDUS_PER_LINK_DESC 6
+#define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7
+#define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6
+#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12
+#define HAL_MAX_AVAIL_BLK_RES 3
+
+#define HAL_RING_BASE_ALIGN 8
+
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
+/* TODO: Check with hw team on the supported scatter buf size */
+#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
+ HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
+
+/* TODO: 16 entries per radio times MAX_VAPS_SUPPORTED */
+#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 32
+#define HAL_DSCP_TID_TBL_SIZE 24
+
+/* calculate the register address from bar0 of shadow register x */
+#define HAL_SHADOW_BASE_ADDR 0x000008fc
+#define HAL_SHADOW_NUM_REGS 40
+#define HAL_HP_OFFSET_IN_REG_START 1
+#define HAL_OFFSET_FROM_HP_TO_TP 4
+
+#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
+
+/* WCSS Relative address */
+#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
+#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
+#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x01b80000
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x01b81000
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x01b82000
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x01b83000
+#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
+
+#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
+
+#define HAL_TCL_SW_CONFIG_BANK_ADDR 0x00a4408c
+
+/* SW2TCL(x) R0 ring configuration address */
+#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000020
+#define HAL_TCL1_RING_DSCP_TID_MAP 0x00000240
+#define HAL_TCL1_RING_BASE_LSB 0x00000900
+#define HAL_TCL1_RING_BASE_MSB 0x00000904
+#define HAL_TCL1_RING_ID(ab) ((ab)->hw_params->regs->hal_tcl1_ring_id)
+#define HAL_TCL1_RING_MISC(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_misc)
+#define HAL_TCL1_RING_TP_ADDR_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_tp_addr_lsb)
+#define HAL_TCL1_RING_TP_ADDR_MSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_tp_addr_msb)
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_consumer_int_setup_ix0)
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_consumer_int_setup_ix1)
+#define HAL_TCL1_RING_MSI1_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_lsb)
+#define HAL_TCL1_RING_MSI1_BASE_MSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_msb)
+#define HAL_TCL1_RING_MSI1_DATA(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_msi1_data)
+#define HAL_TCL2_RING_BASE_LSB 0x00000978
+#define HAL_TCL_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl_ring_base_lsb)
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_BASE_MSB_OFFSET \
+ (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_ID_OFFSET(ab) \
+ (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MISC_OFFSET(ab) \
+ (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB)
+
+/* SW2TCL(x) R2 ring pointers (head/tail) address */
+#define HAL_TCL1_RING_HP 0x00002000
+#define HAL_TCL1_RING_TP 0x00002004
+#define HAL_TCL2_RING_HP 0x00002008
+#define HAL_TCL_RING_HP 0x00002028
+
+#define HAL_TCL1_RING_TP_OFFSET \
+ (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
+
+/* TCL STATUS ring address */
+#define HAL_TCL_STATUS_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl_status_ring_base_lsb)
+#define HAL_TCL_STATUS_RING_HP 0x00002048
+
+/* PPE2TCL1 Ring address */
+#define HAL_TCL_PPE2TCL1_RING_BASE_LSB 0x00000c48
+#define HAL_TCL_PPE2TCL1_RING_HP 0x00002038
+
+/* WBM PPE Release Ring address */
+#define HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_ppe_rel_ring_base)
+#define HAL_WBM_PPE_RELEASE_RING_HP 0x00003020
+
+/* REO2SW(x) R0 ring configuration address */
+#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_MISC_CTRL_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_reo1_misc_ctrl_addr)
+#define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004
+#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
+#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
+#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_SW_COOKIE_CFG0(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg0)
+#define HAL_REO1_SW_COOKIE_CFG1(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg1)
+#define HAL_REO1_QDESC_LUT_BASE0(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base0)
+#define HAL_REO1_QDESC_LUT_BASE1(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base1)
+#define HAL_REO1_RING_BASE_LSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_base_lsb)
+#define HAL_REO1_RING_BASE_MSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_base_msb)
+#define HAL_REO1_RING_ID(ab) ((ab)->hw_params->regs->hal_reo1_ring_id)
+#define HAL_REO1_RING_MISC(ab) ((ab)->hw_params->regs->hal_reo1_ring_misc)
+#define HAL_REO1_RING_HP_ADDR_LSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_hp_addr_lsb)
+#define HAL_REO1_RING_HP_ADDR_MSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_hp_addr_msb)
+#define HAL_REO1_RING_PRODUCER_INT_SETUP(ab) \
+ ((ab)->hw_params->regs->hal_reo1_ring_producer_int_setup)
+#define HAL_REO1_RING_MSI1_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_reo1_ring_msi1_base_lsb)
+#define HAL_REO1_RING_MSI1_BASE_MSB(ab) \
+ ((ab)->hw_params->regs->hal_reo1_ring_msi1_base_msb)
+#define HAL_REO1_RING_MSI1_DATA(ab) ((ab)->hw_params->regs->hal_reo1_ring_msi1_data)
+#define HAL_REO2_RING_BASE_LSB(ab) ((ab)->hw_params->regs->hal_reo2_ring_base)
+#define HAL_REO1_AGING_THRESH_IX_0(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix0)
+#define HAL_REO1_AGING_THRESH_IX_1(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix1)
+#define HAL_REO1_AGING_THRESH_IX_2(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix2)
+#define HAL_REO1_AGING_THRESH_IX_3(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix3)
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
+#define HAL_REO1_RING_HP 0x00003048
+#define HAL_REO1_RING_TP 0x0000304c
+#define HAL_REO2_RING_HP 0x00003050
+
+#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
+
+/* REO2SW0 ring configuration address */
+#define HAL_REO_SW0_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_reo2_sw0_ring_base)
+
+/* REO2SW0 R2 ring pointer (head/tail) address */
+#define HAL_REO_SW0_RING_HP 0x00003088
+
+/* REO CMD R0 address */
+#define HAL_REO_CMD_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_reo_cmd_ring_base)
+
+/* REO CMD R2 address */
+#define HAL_REO_CMD_HP 0x00003020
+
+/* SW2REO R0 address */
+#define HAL_SW2REO_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_sw2reo_ring_base)
+#define HAL_SW2REO1_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_sw2reo1_ring_base)
+
+/* SW2REO R2 address */
+#define HAL_SW2REO_RING_HP 0x00003028
+#define HAL_SW2REO1_RING_HP 0x00003030
+
+/* CE ring R0 address */
+#define HAL_CE_SRC_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
+#define HAL_CE_DST_RING_CTRL 0x000000b0
+
+/* CE ring R2 address */
+#define HAL_CE_DST_RING_HP 0x00000400
+#define HAL_CE_DST_STATUS_RING_HP 0x00000408
+
+/* REO status address */
+#define HAL_REO_STATUS_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_reo_status_ring_base)
+#define HAL_REO_STATUS_HP 0x000030a8
+
+/* WBM Idle R0 address */
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_idle_ring_base_lsb)
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_wbm_idle_ring_misc_addr)
+#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_wbm_r0_idle_list_cntl_addr)
+#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_wbm_r0_idle_list_size_addr)
+#define HAL_WBM_SCATTERED_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_ring_base_lsb)
+#define HAL_WBM_SCATTERED_RING_BASE_MSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_ring_base_msb)
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_head_info_ix0)
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_head_info_ix1)
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_tail_info_ix0)
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_tail_info_ix1)
+#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_ptr_hp_addr)
+
+/* WBM Idle R2 address */
+#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b8
+
+/* SW2WBM R0 release address */
+#define HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_sw_release_ring_base_lsb)
+#define HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_sw1_release_ring_base_lsb)
+
+/* SW2WBM R2 release address */
+#define HAL_WBM_SW_RELEASE_RING_HP 0x00003010
+#define HAL_WBM_SW1_RELEASE_RING_HP 0x00003018
+
+/* WBM2SW R0 release address */
+#define HAL_WBM0_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm0_release_ring_base_lsb)
+
+#define HAL_WBM1_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm1_release_ring_base_lsb)
+
+/* WBM2SW R2 release address */
+#define HAL_WBM0_RELEASE_RING_HP 0x000030c8
+#define HAL_WBM1_RELEASE_RING_HP 0x000030d0
+
+/* WBM cookie config address and mask */
+#define HAL_WBM_SW_COOKIE_CFG0 0x00000040
+#define HAL_WBM_SW_COOKIE_CFG1 0x00000044
+#define HAL_WBM_SW_COOKIE_CFG2 0x00000090
+#define HAL_WBM_SW_COOKIE_CONVERT_CFG 0x00000094
+
+#define HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8)
+#define HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13)
+#define HAL_WBM_SW_COOKIE_CFG_ALIGN BIT(18)
+#define HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN BIT(0)
+#define HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN BIT(1)
+#define HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN BIT(3)
+
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN BIT(1)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN BIT(2)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN BIT(3)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN BIT(4)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN BIT(5)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN BIT(8)
+
+/* TCL ring feild mask and offset */
+#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE BIT(0)
+#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
+#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(23)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
+
+/* REO ring feild mask and offset */
+#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
+#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_REO1_MISC_CTL_FRAG_DST_RING GENMASK(20, 17)
+#define HAL_REO1_MISC_CTL_BAR_DST_RING GENMASK(24, 21)
+#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
+#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+#define HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8)
+#define HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13)
+#define HAL_REO1_SW_COOKIE_CFG_ALIGN BIT(18)
+#define HAL_REO1_SW_COOKIE_CFG_ENABLE BIT(19)
+#define HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE BIT(20)
+
+/* CE ring bit field mask and shift */
+#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
+
+#define HAL_ADDR_LSB_REG_MASK 0xffffffff
+
+#define HAL_ADDR_MSB_REG_SHIFT 32
+
+/* WBM ring bit field mask and shift */
+#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
+#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
+#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
+
+#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
+#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
+
+#define HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE BIT(0)
+
+#define BASE_ADDR_MATCH_TAG_VAL 0x5
+
+#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+#define HAL_RXDMA_RING_MAX_SIZE_BE 0x000fffff
+#define HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+
+#define HAL_WBM2SW_REL_ERR_RING_NUM 3
+/* Add any other errors here and return them in
+ * ath12k_hal_rx_desc_get_err().
+ */
+
+enum hal_srng_ring_id {
+ HAL_SRNG_RING_ID_REO2SW0 = 0,
+ HAL_SRNG_RING_ID_REO2SW1,
+ HAL_SRNG_RING_ID_REO2SW2,
+ HAL_SRNG_RING_ID_REO2SW3,
+ HAL_SRNG_RING_ID_REO2SW4,
+ HAL_SRNG_RING_ID_REO2SW5,
+ HAL_SRNG_RING_ID_REO2SW6,
+ HAL_SRNG_RING_ID_REO2SW7,
+ HAL_SRNG_RING_ID_REO2SW8,
+ HAL_SRNG_RING_ID_REO2TCL,
+ HAL_SRNG_RING_ID_REO2PPE,
+
+ HAL_SRNG_RING_ID_SW2REO = 16,
+ HAL_SRNG_RING_ID_SW2REO1,
+ HAL_SRNG_RING_ID_SW2REO2,
+ HAL_SRNG_RING_ID_SW2REO3,
+
+ HAL_SRNG_RING_ID_REO_CMD,
+ HAL_SRNG_RING_ID_REO_STATUS,
+
+ HAL_SRNG_RING_ID_SW2TCL1 = 24,
+ HAL_SRNG_RING_ID_SW2TCL2,
+ HAL_SRNG_RING_ID_SW2TCL3,
+ HAL_SRNG_RING_ID_SW2TCL4,
+ HAL_SRNG_RING_ID_SW2TCL5,
+ HAL_SRNG_RING_ID_SW2TCL6,
+ HAL_SRNG_RING_ID_PPE2TCL1 = 30,
+
+ HAL_SRNG_RING_ID_SW2TCL_CMD = 40,
+ HAL_SRNG_RING_ID_SW2TCL1_CMD,
+ HAL_SRNG_RING_ID_TCL_STATUS,
+
+ HAL_SRNG_RING_ID_CE0_SRC = 64,
+ HAL_SRNG_RING_ID_CE1_SRC,
+ HAL_SRNG_RING_ID_CE2_SRC,
+ HAL_SRNG_RING_ID_CE3_SRC,
+ HAL_SRNG_RING_ID_CE4_SRC,
+ HAL_SRNG_RING_ID_CE5_SRC,
+ HAL_SRNG_RING_ID_CE6_SRC,
+ HAL_SRNG_RING_ID_CE7_SRC,
+ HAL_SRNG_RING_ID_CE8_SRC,
+ HAL_SRNG_RING_ID_CE9_SRC,
+ HAL_SRNG_RING_ID_CE10_SRC,
+ HAL_SRNG_RING_ID_CE11_SRC,
+ HAL_SRNG_RING_ID_CE12_SRC,
+ HAL_SRNG_RING_ID_CE13_SRC,
+ HAL_SRNG_RING_ID_CE14_SRC,
+ HAL_SRNG_RING_ID_CE15_SRC,
+
+ HAL_SRNG_RING_ID_CE0_DST = 81,
+ HAL_SRNG_RING_ID_CE1_DST,
+ HAL_SRNG_RING_ID_CE2_DST,
+ HAL_SRNG_RING_ID_CE3_DST,
+ HAL_SRNG_RING_ID_CE4_DST,
+ HAL_SRNG_RING_ID_CE5_DST,
+ HAL_SRNG_RING_ID_CE6_DST,
+ HAL_SRNG_RING_ID_CE7_DST,
+ HAL_SRNG_RING_ID_CE8_DST,
+ HAL_SRNG_RING_ID_CE9_DST,
+ HAL_SRNG_RING_ID_CE10_DST,
+ HAL_SRNG_RING_ID_CE11_DST,
+ HAL_SRNG_RING_ID_CE12_DST,
+ HAL_SRNG_RING_ID_CE13_DST,
+ HAL_SRNG_RING_ID_CE14_DST,
+ HAL_SRNG_RING_ID_CE15_DST,
+
+ HAL_SRNG_RING_ID_CE0_DST_STATUS = 100,
+ HAL_SRNG_RING_ID_CE1_DST_STATUS,
+ HAL_SRNG_RING_ID_CE2_DST_STATUS,
+ HAL_SRNG_RING_ID_CE3_DST_STATUS,
+ HAL_SRNG_RING_ID_CE4_DST_STATUS,
+ HAL_SRNG_RING_ID_CE5_DST_STATUS,
+ HAL_SRNG_RING_ID_CE6_DST_STATUS,
+ HAL_SRNG_RING_ID_CE7_DST_STATUS,
+ HAL_SRNG_RING_ID_CE8_DST_STATUS,
+ HAL_SRNG_RING_ID_CE9_DST_STATUS,
+ HAL_SRNG_RING_ID_CE10_DST_STATUS,
+ HAL_SRNG_RING_ID_CE11_DST_STATUS,
+ HAL_SRNG_RING_ID_CE12_DST_STATUS,
+ HAL_SRNG_RING_ID_CE13_DST_STATUS,
+ HAL_SRNG_RING_ID_CE14_DST_STATUS,
+ HAL_SRNG_RING_ID_CE15_DST_STATUS,
+
+ HAL_SRNG_RING_ID_WBM_IDLE_LINK = 120,
+ HAL_SRNG_RING_ID_WBM_SW0_RELEASE,
+ HAL_SRNG_RING_ID_WBM_SW1_RELEASE,
+ HAL_SRNG_RING_ID_WBM_PPE_RELEASE = 123,
+
+ HAL_SRNG_RING_ID_WBM2SW0_RELEASE = 128,
+ HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW3_RELEASE, /* RX ERROR RING */
+ HAL_SRNG_RING_ID_WBM2SW4_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW5_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW6_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW7_RELEASE,
+
+ HAL_SRNG_RING_ID_UMAC_ID_END = 159,
+
+ /* Common DMAC rings shared by all LMACs */
+ HAL_SRNG_RING_ID_DMAC_CMN_ID_START = 160,
+ HAL_SRNG_SW2RXDMA_BUF0 = HAL_SRNG_RING_ID_DMAC_CMN_ID_START,
+ HAL_SRNG_SW2RXDMA_BUF1 = 161,
+ HAL_SRNG_SW2RXDMA_BUF2 = 162,
+
+ HAL_SRNG_SW2RXMON_BUF0 = 168,
+
+ HAL_SRNG_SW2TXMON_BUF0 = 176,
+
+ HAL_SRNG_RING_ID_DMAC_CMN_ID_END = 183,
+ HAL_SRNG_RING_ID_PMAC1_ID_START = 184,
+
+ HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0 = HAL_SRNG_RING_ID_PMAC1_ID_START,
+
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
+ HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+
+ HAL_SRNG_RING_ID_PMAC1_ID_END,
+};
+
+/* SRNG registers are split into two groups R0 and R2 */
+#define HAL_SRNG_REG_GRP_R0 0
+#define HAL_SRNG_REG_GRP_R2 1
+#define HAL_SRNG_NUM_REG_GRP 2
+
+/* TODO: number of PMACs */
+#define HAL_SRNG_NUM_PMACS 3
+#define HAL_SRNG_NUM_DMAC_RINGS (HAL_SRNG_RING_ID_DMAC_CMN_ID_END - \
+ HAL_SRNG_RING_ID_DMAC_CMN_ID_START)
+#define HAL_SRNG_RINGS_PER_PMAC (HAL_SRNG_RING_ID_PMAC1_ID_END - \
+ HAL_SRNG_RING_ID_PMAC1_ID_START)
+#define HAL_SRNG_NUM_PMAC_RINGS (HAL_SRNG_NUM_PMACS * HAL_SRNG_RINGS_PER_PMAC)
+#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_DMAC_CMN_ID_END + \
+ HAL_SRNG_NUM_PMAC_RINGS)
+
+enum hal_ring_type {
+ HAL_REO_DST,
+ HAL_REO_EXCEPTION,
+ HAL_REO_REINJECT,
+ HAL_REO_CMD,
+ HAL_REO_STATUS,
+ HAL_TCL_DATA,
+ HAL_TCL_CMD,
+ HAL_TCL_STATUS,
+ HAL_CE_SRC,
+ HAL_CE_DST,
+ HAL_CE_DST_STATUS,
+ HAL_WBM_IDLE_LINK,
+ HAL_SW2WBM_RELEASE,
+ HAL_WBM2SW_RELEASE,
+ HAL_RXDMA_BUF,
+ HAL_RXDMA_DST,
+ HAL_RXDMA_MONITOR_BUF,
+ HAL_RXDMA_MONITOR_STATUS,
+ HAL_RXDMA_MONITOR_DST,
+ HAL_RXDMA_MONITOR_DESC,
+ HAL_RXDMA_DIR_BUF,
+ HAL_PPE2TCL,
+ HAL_PPE_RELEASE,
+ HAL_TX_MONITOR_BUF,
+ HAL_TX_MONITOR_DST,
+ HAL_MAX_RING_TYPES,
+};
+
+#define HAL_RX_MAX_BA_WINDOW 256
+
+#define HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC (100 * 1000)
+#define HAL_DEFAULT_VO_REO_TIMEOUT_USEC (40 * 1000)
+
+/**
+ * enum hal_reo_cmd_type: Enum for REO command type
+ * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * earlier with a 'REO_FLUSH_CACHE' command
+ * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings
+ */
+enum hal_reo_cmd_type {
+ HAL_REO_CMD_GET_QUEUE_STATS = 0,
+ HAL_REO_CMD_FLUSH_QUEUE = 1,
+ HAL_REO_CMD_FLUSH_CACHE = 2,
+ HAL_REO_CMD_UNBLOCK_CACHE = 3,
+ HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4,
+ HAL_REO_CMD_UPDATE_RX_QUEUE = 5,
+};
+
+/**
+ * enum hal_reo_cmd_status: Enum for execution status of REO command
+ * @HAL_REO_CMD_SUCCESS: Command has successfully executed
+ * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue
+ * or cache was blocked
+ * @HAL_REO_CMD_FAILED: Command execution failed, could be due to
+ * invalid queue desc
+ * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_DRAIN:
+ */
+enum hal_reo_cmd_status {
+ HAL_REO_CMD_SUCCESS = 0,
+ HAL_REO_CMD_BLOCKED = 1,
+ HAL_REO_CMD_FAILED = 2,
+ HAL_REO_CMD_RESOURCE_BLOCKED = 3,
+ HAL_REO_CMD_DRAIN = 0xff,
+};
+
+struct hal_wbm_idle_scatter_list {
+ dma_addr_t paddr;
+ struct hal_wbm_link_desc *vaddr;
+};
+
+struct hal_srng_params {
+ dma_addr_t ring_base_paddr;
+ u32 *ring_base_vaddr;
+ int num_entries;
+ u32 intr_batch_cntr_thres_entries;
+ u32 intr_timer_thres_us;
+ u32 flags;
+ u32 max_buffer_len;
+ u32 low_threshold;
+ u32 high_threshold;
+ dma_addr_t msi_addr;
+ dma_addr_t msi2_addr;
+ u32 msi_data;
+ u32 msi2_data;
+
+ /* Add more params as needed */
+};
+
+enum hal_srng_dir {
+ HAL_SRNG_DIR_SRC,
+ HAL_SRNG_DIR_DST
+};
+
+/* srng flags */
+#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008
+#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010
+#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
+#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
+#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
+#define HAL_SRNG_FLAGS_HIGH_THRESH_INTR_EN 0x00080000
+#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
+
+#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
+
+/* Common SRNG ring structure for source and destination rings */
+struct hal_srng {
+ /* Unique SRNG ring ID */
+ u8 ring_id;
+
+ /* Ring initialization done */
+ u8 initialized;
+
+ /* Interrupt/MSI value assigned to this ring */
+ int irq;
+
+ /* Physical base address of the ring */
+ dma_addr_t ring_base_paddr;
+
+ /* Virtual base address of the ring */
+ u32 *ring_base_vaddr;
+
+ /* Number of entries in ring */
+ u32 num_entries;
+
+ /* Ring size */
+ u32 ring_size;
+
+ /* Ring size mask */
+ u32 ring_size_mask;
+
+ /* Size of ring entry */
+ u32 entry_size;
+
+ /* Interrupt timer threshold - in micro seconds */
+ u32 intr_timer_thres_us;
+
+ /* Interrupt batch counter threshold - in number of ring entries */
+ u32 intr_batch_cntr_thres_entries;
+
+ /* MSI Address */
+ dma_addr_t msi_addr;
+
+ /* MSI data */
+ u32 msi_data;
+
+ /* MSI2 Address */
+ dma_addr_t msi2_addr;
+
+ /* MSI2 data */
+ u32 msi2_data;
+
+ /* Misc flags */
+ u32 flags;
+
+ /* Lock for serializing ring index updates */
+ spinlock_t lock;
+
+ struct lock_class_key lock_key;
+
+ /* Start offset of SRNG register groups for this ring
+ * TBD: See if this is required - register address can be derived
+ * from ring ID
+ */
+ u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
+
+ u64 timestamp;
+
+ /* Source or Destination ring */
+ enum hal_srng_dir ring_dir;
+
+ union {
+ struct {
+ /* SW tail pointer */
+ u32 tp;
+
+ /* Shadow head pointer location to be updated by HW */
+ volatile u32 *hp_addr;
+
+ /* Cached head pointer */
+ u32 cached_hp;
+
+ /* Tail pointer location to be updated by SW - This
+ * will be a register address and need not be
+ * accessed through SW structure
+ */
+ u32 *tp_addr;
+
+ /* Current SW loop cnt */
+ u32 loop_cnt;
+
+ /* max transfer size */
+ u16 max_buffer_length;
+
+ /* head pointer at access end */
+ u32 last_hp;
+ } dst_ring;
+
+ struct {
+ /* SW head pointer */
+ u32 hp;
+
+ /* SW reap head pointer */
+ u32 reap_hp;
+
+ /* Shadow tail pointer location to be updated by HW */
+ u32 *tp_addr;
+
+ /* Cached tail pointer */
+ u32 cached_tp;
+
+ /* Head pointer location to be updated by SW - This
+ * will be a register address and need not be accessed
+ * through SW structure
+ */
+ u32 *hp_addr;
+
+ /* Low threshold - in number of ring entries */
+ u32 low_threshold;
+
+ /* tail pointer at access end */
+ u32 last_tp;
+ } src_ring;
+ } u;
+};
+
+/* Interrupt mitigation - Batch threshold in terms of numer of frames */
+#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
+#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
+#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
+
+/* Interrupt mitigation - timer threshold in us */
+#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256
+
+enum hal_srng_mac_type {
+ ATH12K_HAL_SRNG_UMAC,
+ ATH12K_HAL_SRNG_DMAC,
+ ATH12K_HAL_SRNG_PMAC
+};
+
+/* HW SRNG configuration table */
+struct hal_srng_config {
+ int start_ring_id;
+ u16 max_rings;
+ u16 entry_size;
+ u32 reg_start[HAL_SRNG_NUM_REG_GRP];
+ u16 reg_size[HAL_SRNG_NUM_REG_GRP];
+ enum hal_srng_mac_type mac_type;
+ enum hal_srng_dir ring_dir;
+ u32 max_size;
+};
+
+/**
+ * enum hal_rx_buf_return_buf_manager
+ *
+ * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
+ * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the chip 0 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the chip 1 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the chip 2 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
+ * @HAL_RX_BUF_RBM_SW0_BM: For ring 0 -- returned to host
+ * @HAL_RX_BUF_RBM_SW1_BM: For ring 1 -- returned to host
+ * @HAL_RX_BUF_RBM_SW2_BM: For ring 2 -- returned to host
+ * @HAL_RX_BUF_RBM_SW3_BM: For ring 3 -- returned to host
+ * @HAL_RX_BUF_RBM_SW4_BM: For ring 4 -- returned to host
+ * @HAL_RX_BUF_RBM_SW5_BM: For ring 5 -- returned to host
+ * @HAL_RX_BUF_RBM_SW6_BM: For ring 6 -- returned to host
+ */
+
+enum hal_rx_buf_return_buf_manager {
+ HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
+ HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_FW_BM,
+ HAL_RX_BUF_RBM_SW0_BM,
+ HAL_RX_BUF_RBM_SW1_BM,
+ HAL_RX_BUF_RBM_SW2_BM,
+ HAL_RX_BUF_RBM_SW3_BM,
+ HAL_RX_BUF_RBM_SW4_BM,
+ HAL_RX_BUF_RBM_SW5_BM,
+ HAL_RX_BUF_RBM_SW6_BM,
+};
+
+#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
+
+#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
+#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
+#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
+#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
+#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
+#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
+#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
+#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
+#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_CMD_UPD0_VLD BIT(9)
+#define HAL_REO_CMD_UPD0_ALDC BIT(10)
+#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_CMD_UPD0_AC BIT(13)
+#define HAL_REO_CMD_UPD0_BAR BIT(14)
+#define HAL_REO_CMD_UPD0_RETRY BIT(15)
+#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
+#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
+#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
+#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
+#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
+#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
+#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_CMD_UPD0_SVLD BIT(25)
+#define HAL_REO_CMD_UPD0_SSN BIT(26)
+#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
+#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
+#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
+#define HAL_REO_CMD_UPD0_PN BIT(30)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+#define HAL_REO_CMD_UPD1_VLD BIT(16)
+#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
+#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
+#define HAL_REO_CMD_UPD1_BAR BIT(23)
+#define HAL_REO_CMD_UPD1_RETRY BIT(24)
+#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
+#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
+#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
+#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
+#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+#define HAL_REO_CMD_UPD2_SVLD BIT(10)
+#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
+#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
+
+struct ath12k_hal_reo_cmd {
+ u32 addr_lo;
+ u32 flag;
+ u32 upd0;
+ u32 upd1;
+ u32 upd2;
+ u32 pn[4];
+ u16 rx_queue_num;
+ u16 min_rel;
+ u16 min_fwd;
+ u8 addr_hi;
+ u8 ac_list;
+ u8 blocking_idx;
+ u16 ba_window_size;
+ u8 pn_size;
+};
+
+enum hal_pn_type {
+ HAL_PN_TYPE_NONE,
+ HAL_PN_TYPE_WPA,
+ HAL_PN_TYPE_WAPI_EVEN,
+ HAL_PN_TYPE_WAPI_UNEVEN,
+};
+
+enum hal_ce_desc {
+ HAL_CE_DESC_SRC,
+ HAL_CE_DESC_DST,
+ HAL_CE_DESC_DST_STATUS,
+};
+
+#define HAL_HASH_ROUTING_RING_TCL 0
+#define HAL_HASH_ROUTING_RING_SW1 1
+#define HAL_HASH_ROUTING_RING_SW2 2
+#define HAL_HASH_ROUTING_RING_SW3 3
+#define HAL_HASH_ROUTING_RING_SW4 4
+#define HAL_HASH_ROUTING_RING_REL 5
+#define HAL_HASH_ROUTING_RING_FW 6
+
+struct hal_reo_status_header {
+ u16 cmd_num;
+ enum hal_reo_cmd_status cmd_status;
+ u16 cmd_exe_time;
+ u32 timestamp;
+};
+
+struct hal_reo_status_queue_stats {
+ u16 ssn;
+ u16 curr_idx;
+ u32 pn[4];
+ u32 last_rx_queue_ts;
+ u32 last_rx_dequeue_ts;
+ u32 rx_bitmap[8]; /* Bitmap from 0-255 */
+ u32 curr_mpdu_cnt;
+ u32 curr_msdu_cnt;
+ u16 fwd_due_to_bar_cnt;
+ u16 dup_cnt;
+ u32 frames_in_order_cnt;
+ u32 num_mpdu_processed_cnt;
+ u32 num_msdu_processed_cnt;
+ u32 total_num_processed_byte_cnt;
+ u32 late_rx_mpdu_cnt;
+ u32 reorder_hole_cnt;
+ u8 timeout_cnt;
+ u8 bar_rx_cnt;
+ u8 num_window_2k_jump_cnt;
+};
+
+struct hal_reo_status_flush_queue {
+ bool err_detected;
+};
+
+enum hal_reo_status_flush_cache_err_code {
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
+};
+
+struct hal_reo_status_flush_cache {
+ bool err_detected;
+ enum hal_reo_status_flush_cache_err_code err_code;
+ bool cache_controller_flush_status_hit;
+ u8 cache_controller_flush_status_desc_type;
+ u8 cache_controller_flush_status_client_id;
+ u8 cache_controller_flush_status_err;
+ u8 cache_controller_flush_status_cnt;
+};
+
+enum hal_reo_status_unblock_cache_type {
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
+ HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
+};
+
+struct hal_reo_status_unblock_cache {
+ bool err_detected;
+ enum hal_reo_status_unblock_cache_type unblock_type;
+};
+
+struct hal_reo_status_flush_timeout_list {
+ bool err_detected;
+ bool list_empty;
+ u16 release_desc_cnt;
+ u16 fwd_buf_cnt;
+};
+
+enum hal_reo_threshold_idx {
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
+};
+
+struct hal_reo_status_desc_thresh_reached {
+ enum hal_reo_threshold_idx threshold_idx;
+ u32 link_desc_counter0;
+ u32 link_desc_counter1;
+ u32 link_desc_counter2;
+ u32 link_desc_counter_sum;
+};
+
+struct hal_reo_status {
+ struct hal_reo_status_header uniform_hdr;
+ u8 loop_cnt;
+ union {
+ struct hal_reo_status_queue_stats queue_stats;
+ struct hal_reo_status_flush_queue flush_queue;
+ struct hal_reo_status_flush_cache flush_cache;
+ struct hal_reo_status_unblock_cache unblock_cache;
+ struct hal_reo_status_flush_timeout_list timeout_list;
+ struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
+ } u;
+};
+
+/* HAL context to be used to access SRNG APIs (currently used by data path
+ * and transport (CE) modules)
+ */
+struct ath12k_hal {
+ /* HAL internal state for all SRNG rings.
+ */
+ struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
+
+ /* SRNG configuration table */
+ struct hal_srng_config *srng_config;
+
+ /* Remote pointer memory for HW/FW updates */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } rdp;
+
+ /* Shared memory for ring pointer updates from host to FW */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } wrp;
+
+ /* Available REO blocking resources bitmap */
+ u8 avail_blk_resource;
+
+ u8 current_blk_index;
+
+ /* shadow register configuration */
+ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
+ int num_shadow_reg_configured;
+};
+
+/* Maps WBM ring number and Return Buffer Manager Id per TCL ring */
+struct ath12k_hal_tcl_to_wbm_rbm_map {
+ u8 wbm_ring_num;
+ u8 rbm_id;
+};
+
+struct hal_ops {
+ bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc);
+ bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_copy_end_tlv)(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc);
+ u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len);
+ struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_mpdu_start_offset)(void);
+ u32 (*rx_desc_get_msdu_end_offset)(void);
+ bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
+ u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
+ bool (*rx_desc_is_mcbc)(struct hal_rx_desc *desc);
+ void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr);
+ u16 (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc);
+ void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype);
+ int (*create_srng_config)(struct ath12k_base *ab);
+ bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc);
+ bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc);
+ bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
+ bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
+ u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
+};
+
+extern const struct hal_ops hal_qcn9274_ops;
+extern const struct hal_ops hal_wcn7850_ops;
+
+u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
+void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
+ int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type);
+void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map);
+void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+
+dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
+ struct hal_srng *srng);
+dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr);
+u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type);
+void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
+ u32 len, u32 id, u8 byte_swap_data);
+void ath12k_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, dma_addr_t paddr);
+u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc);
+int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type);
+int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type);
+void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params);
+void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng);
+int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
+ struct hal_srng *srng);
+int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+void ath12k_hal_srng_access_begin(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng);
+int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params);
+int ath12k_hal_srng_init(struct ath12k_base *ath12k);
+void ath12k_hal_srng_deinit(struct ath12k_base *ath12k);
+void ath12k_hal_dump_srng_stats(struct ath12k_base *ab);
+void ath12k_hal_srng_get_shadow_config(struct ath12k_base *ab,
+ u32 **cfg, u32 *len);
+int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num);
+void ath12k_hal_srng_shadow_config(struct ath12k_base *ab);
+void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
+ struct hal_srng *srng);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
new file mode 100644
index 000000000000..2250ca2d19a3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -0,0 +1,2961 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include "core.h"
+
+#ifndef ATH12K_HAL_DESC_H
+#define ATH12K_HAL_DESC_H
+
+#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0)
+
+#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0)
+#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(11, 8)
+#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 12)
+
+struct ath12k_buffer_addr {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+/* ath12k_buffer_addr
+ *
+ * buffer_addr_31_0
+ * Address (lower 32 bits) of the MSDU buffer or MSDU_EXTENSION
+ * descriptor or Link descriptor
+ *
+ * buffer_addr_39_32
+ * Address (upper 8 bits) of the MSDU buffer or MSDU_EXTENSION
+ * descriptor or Link descriptor
+ *
+ * return_buffer_manager (RBM)
+ * Consumer: WBM
+ * Producer: SW/FW
+ * Indicates to which buffer manager the buffer or MSDU_EXTENSION
+ * descriptor or link descriptor that is being pointed to shall be
+ * returned after the frame has been processed. It is used by WBM
+ * for routing purposes.
+ *
+ * Values are defined in enum %HAL_RX_BUF_RBM_
+ *
+ * sw_buffer_cookie
+ * Cookie field exclusively used by SW. HW ignores the contents,
+ * accept that it passes the programmed value on to other
+ * descriptors together with the physical address.
+ *
+ * Field can be used by SW to for example associate the buffers
+ * physical address with the virtual address.
+ *
+ * NOTE1:
+ * The three most significant bits can have a special meaning
+ * in case this struct is embedded in a TX_MPDU_DETAILS STRUCT,
+ * and field transmit_bw_restriction is set
+ *
+ * In case of NON punctured transmission:
+ * Sw_buffer_cookie[19:17] = 3'b000: 20 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b001: 40 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b010: 80 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b011: 160 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b101: 240 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b100: 320 MHz TX only
+ * Sw_buffer_cookie[19:18] = 2'b11: reserved
+ *
+ * In case of punctured transmission:
+ * Sw_buffer_cookie[19:16] = 4'b0000: pattern 0 only
+ * Sw_buffer_cookie[19:16] = 4'b0001: pattern 1 only
+ * Sw_buffer_cookie[19:16] = 4'b0010: pattern 2 only
+ * Sw_buffer_cookie[19:16] = 4'b0011: pattern 3 only
+ * Sw_buffer_cookie[19:16] = 4'b0100: pattern 4 only
+ * Sw_buffer_cookie[19:16] = 4'b0101: pattern 5 only
+ * Sw_buffer_cookie[19:16] = 4'b0110: pattern 6 only
+ * Sw_buffer_cookie[19:16] = 4'b0111: pattern 7 only
+ * Sw_buffer_cookie[19:16] = 4'b1000: pattern 8 only
+ * Sw_buffer_cookie[19:16] = 4'b1001: pattern 9 only
+ * Sw_buffer_cookie[19:16] = 4'b1010: pattern 10 only
+ * Sw_buffer_cookie[19:16] = 4'b1011: pattern 11 only
+ * Sw_buffer_cookie[19:18] = 2'b11: reserved
+ *
+ * Note: a punctured transmission is indicated by the presence
+ * of TLV TX_PUNCTURE_SETUP embedded in the scheduler TLV
+ *
+ * Sw_buffer_cookie[20:17]: Tid: The TID field in the QoS control
+ * field
+ *
+ * Sw_buffer_cookie[16]: Mpdu_qos_control_valid: This field
+ * indicates MPDUs with a QoS control field.
+ *
+ */
+
+enum hal_tlv_tag {
+ HAL_MACTX_CBF_START = 0 /* 0x0 */,
+ HAL_PHYRX_DATA = 1 /* 0x1 */,
+ HAL_PHYRX_CBF_DATA_RESP = 2 /* 0x2 */,
+ HAL_PHYRX_ABORT_REQUEST = 3 /* 0x3 */,
+ HAL_PHYRX_USER_ABORT_NOTIFICATION = 4 /* 0x4 */,
+ HAL_MACTX_DATA_RESP = 5 /* 0x5 */,
+ HAL_MACTX_CBF_DATA = 6 /* 0x6 */,
+ HAL_MACTX_CBF_DONE = 7 /* 0x7 */,
+ HAL_PHYRX_LMR_DATA_RESP = 8 /* 0x8 */,
+ HAL_RXPCU_TO_UCODE_START = 9 /* 0x9 */,
+ HAL_RXPCU_TO_UCODE_DELIMITER_FOR_FULL_MPDU = 10 /* 0xa */,
+ HAL_RXPCU_TO_UCODE_FULL_MPDU_DATA = 11 /* 0xb */,
+ HAL_RXPCU_TO_UCODE_FCS_STATUS = 12 /* 0xc */,
+ HAL_RXPCU_TO_UCODE_MPDU_DELIMITER = 13 /* 0xd */,
+ HAL_RXPCU_TO_UCODE_DELIMITER_FOR_MPDU_HEADER = 14 /* 0xe */,
+ HAL_RXPCU_TO_UCODE_MPDU_HEADER_DATA = 15 /* 0xf */,
+ HAL_RXPCU_TO_UCODE_END = 16 /* 0x10 */,
+ HAL_MACRX_CBF_READ_REQUEST = 32 /* 0x20 */,
+ HAL_MACRX_CBF_DATA_REQUEST = 33 /* 0x21 */,
+ HAL_MACRXXPECT_NDP_RECEPTION = 34 /* 0x22 */,
+ HAL_MACRX_FREEZE_CAPTURE_CHANNEL = 35 /* 0x23 */,
+ HAL_MACRX_NDP_TIMEOUT = 36 /* 0x24 */,
+ HAL_MACRX_ABORT_ACK = 37 /* 0x25 */,
+ HAL_MACRX_REQ_IMPLICIT_FB = 38 /* 0x26 */,
+ HAL_MACRX_CHAIN_MASK = 39 /* 0x27 */,
+ HAL_MACRX_NAP_USER = 40 /* 0x28 */,
+ HAL_MACRX_ABORT_REQUEST = 41 /* 0x29 */,
+ HAL_PHYTX_OTHER_TRANSMIT_INFO16 = 42 /* 0x2a */,
+ HAL_PHYTX_ABORT_ACK = 43 /* 0x2b */,
+ HAL_PHYTX_ABORT_REQUEST = 44 /* 0x2c */,
+ HAL_PHYTX_PKT_END = 45 /* 0x2d */,
+ HAL_PHYTX_PPDU_HEADER_INFO_REQUEST = 46 /* 0x2e */,
+ HAL_PHYTX_REQUEST_CTRL_INFO = 47 /* 0x2f */,
+ HAL_PHYTX_DATA_REQUEST = 48 /* 0x30 */,
+ HAL_PHYTX_BF_CV_LOADING_DONE = 49 /* 0x31 */,
+ HAL_PHYTX_NAP_ACK = 50 /* 0x32 */,
+ HAL_PHYTX_NAP_DONE = 51 /* 0x33 */,
+ HAL_PHYTX_OFF_ACK = 52 /* 0x34 */,
+ HAL_PHYTX_ON_ACK = 53 /* 0x35 */,
+ HAL_PHYTX_SYNTH_OFF_ACK = 54 /* 0x36 */,
+ HAL_PHYTX_DEBUG16 = 55 /* 0x37 */,
+ HAL_MACTX_ABORT_REQUEST = 56 /* 0x38 */,
+ HAL_MACTX_ABORT_ACK = 57 /* 0x39 */,
+ HAL_MACTX_PKT_END = 58 /* 0x3a */,
+ HAL_MACTX_PRE_PHY_DESC = 59 /* 0x3b */,
+ HAL_MACTX_BF_PARAMS_COMMON = 60 /* 0x3c */,
+ HAL_MACTX_BF_PARAMS_PER_USER = 61 /* 0x3d */,
+ HAL_MACTX_PREFETCH_CV = 62 /* 0x3e */,
+ HAL_MACTX_USER_DESC_COMMON = 63 /* 0x3f */,
+ HAL_MACTX_USER_DESC_PER_USER = 64 /* 0x40 */,
+ HAL_XAMPLE_USER_TLV_16 = 65 /* 0x41 */,
+ HAL_XAMPLE_TLV_16 = 66 /* 0x42 */,
+ HAL_MACTX_PHY_OFF = 67 /* 0x43 */,
+ HAL_MACTX_PHY_ON = 68 /* 0x44 */,
+ HAL_MACTX_SYNTH_OFF = 69 /* 0x45 */,
+ HAL_MACTXXPECT_CBF_COMMON = 70 /* 0x46 */,
+ HAL_MACTXXPECT_CBF_PER_USER = 71 /* 0x47 */,
+ HAL_MACTX_PHY_DESC = 72 /* 0x48 */,
+ HAL_MACTX_L_SIG_A = 73 /* 0x49 */,
+ HAL_MACTX_L_SIG_B = 74 /* 0x4a */,
+ HAL_MACTX_HT_SIG = 75 /* 0x4b */,
+ HAL_MACTX_VHT_SIG_A = 76 /* 0x4c */,
+ HAL_MACTX_VHT_SIG_B_SU20 = 77 /* 0x4d */,
+ HAL_MACTX_VHT_SIG_B_SU40 = 78 /* 0x4e */,
+ HAL_MACTX_VHT_SIG_B_SU80 = 79 /* 0x4f */,
+ HAL_MACTX_VHT_SIG_B_SU160 = 80 /* 0x50 */,
+ HAL_MACTX_VHT_SIG_B_MU20 = 81 /* 0x51 */,
+ HAL_MACTX_VHT_SIG_B_MU40 = 82 /* 0x52 */,
+ HAL_MACTX_VHT_SIG_B_MU80 = 83 /* 0x53 */,
+ HAL_MACTX_VHT_SIG_B_MU160 = 84 /* 0x54 */,
+ HAL_MACTX_SERVICE = 85 /* 0x55 */,
+ HAL_MACTX_HE_SIG_A_SU = 86 /* 0x56 */,
+ HAL_MACTX_HE_SIG_A_MU_DL = 87 /* 0x57 */,
+ HAL_MACTX_HE_SIG_A_MU_UL = 88 /* 0x58 */,
+ HAL_MACTX_HE_SIG_B1_MU = 89 /* 0x59 */,
+ HAL_MACTX_HE_SIG_B2_MU = 90 /* 0x5a */,
+ HAL_MACTX_HE_SIG_B2_OFDMA = 91 /* 0x5b */,
+ HAL_MACTX_DELETE_CV = 92 /* 0x5c */,
+ HAL_MACTX_MU_UPLINK_COMMON = 93 /* 0x5d */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP = 94 /* 0x5e */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO = 95 /* 0x5f */,
+ HAL_MACTX_PHY_NAP = 96 /* 0x60 */,
+ HAL_MACTX_DEBUG = 97 /* 0x61 */,
+ HAL_PHYRX_ABORT_ACK = 98 /* 0x62 */,
+ HAL_PHYRX_GENERATED_CBF_DETAILS = 99 /* 0x63 */,
+ HAL_PHYRX_RSSI_LEGACY = 100 /* 0x64 */,
+ HAL_PHYRX_RSSI_HT = 101 /* 0x65 */,
+ HAL_PHYRX_USER_INFO = 102 /* 0x66 */,
+ HAL_PHYRX_PKT_END = 103 /* 0x67 */,
+ HAL_PHYRX_DEBUG = 104 /* 0x68 */,
+ HAL_PHYRX_CBF_TRANSFER_DONE = 105 /* 0x69 */,
+ HAL_PHYRX_CBF_TRANSFER_ABORT = 106 /* 0x6a */,
+ HAL_PHYRX_L_SIG_A = 107 /* 0x6b */,
+ HAL_PHYRX_L_SIG_B = 108 /* 0x6c */,
+ HAL_PHYRX_HT_SIG = 109 /* 0x6d */,
+ HAL_PHYRX_VHT_SIG_A = 110 /* 0x6e */,
+ HAL_PHYRX_VHT_SIG_B_SU20 = 111 /* 0x6f */,
+ HAL_PHYRX_VHT_SIG_B_SU40 = 112 /* 0x70 */,
+ HAL_PHYRX_VHT_SIG_B_SU80 = 113 /* 0x71 */,
+ HAL_PHYRX_VHT_SIG_B_SU160 = 114 /* 0x72 */,
+ HAL_PHYRX_VHT_SIG_B_MU20 = 115 /* 0x73 */,
+ HAL_PHYRX_VHT_SIG_B_MU40 = 116 /* 0x74 */,
+ HAL_PHYRX_VHT_SIG_B_MU80 = 117 /* 0x75 */,
+ HAL_PHYRX_VHT_SIG_B_MU160 = 118 /* 0x76 */,
+ HAL_PHYRX_HE_SIG_A_SU = 119 /* 0x77 */,
+ HAL_PHYRX_HE_SIG_A_MU_DL = 120 /* 0x78 */,
+ HAL_PHYRX_HE_SIG_A_MU_UL = 121 /* 0x79 */,
+ HAL_PHYRX_HE_SIG_B1_MU = 122 /* 0x7a */,
+ HAL_PHYRX_HE_SIG_B2_MU = 123 /* 0x7b */,
+ HAL_PHYRX_HE_SIG_B2_OFDMA = 124 /* 0x7c */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO = 125 /* 0x7d */,
+ HAL_PHYRX_COMMON_USER_INFO = 126 /* 0x7e */,
+ HAL_PHYRX_DATA_DONE = 127 /* 0x7f */,
+ HAL_COEX_TX_REQ = 128 /* 0x80 */,
+ HAL_DUMMY = 129 /* 0x81 */,
+ HALXAMPLE_TLV_32_NAME = 130 /* 0x82 */,
+ HAL_MPDU_LIMIT = 131 /* 0x83 */,
+ HAL_NA_LENGTH_END = 132 /* 0x84 */,
+ HAL_OLE_BUF_STATUS = 133 /* 0x85 */,
+ HAL_PCU_PPDU_SETUP_DONE = 134 /* 0x86 */,
+ HAL_PCU_PPDU_SETUP_END = 135 /* 0x87 */,
+ HAL_PCU_PPDU_SETUP_INIT = 136 /* 0x88 */,
+ HAL_PCU_PPDU_SETUP_START = 137 /* 0x89 */,
+ HAL_PDG_FES_SETUP = 138 /* 0x8a */,
+ HAL_PDG_RESPONSE = 139 /* 0x8b */,
+ HAL_PDG_TX_REQ = 140 /* 0x8c */,
+ HAL_SCH_WAIT_INSTR = 141 /* 0x8d */,
+ HAL_TQM_FLOWMPTY_STATUS = 143 /* 0x8f */,
+ HAL_TQM_FLOW_NOTMPTY_STATUS = 144 /* 0x90 */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST = 145 /* 0x91 */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST_STATUS = 146 /* 0x92 */,
+ HAL_TQM_GEN_MPDUS = 147 /* 0x93 */,
+ HAL_TQM_GEN_MPDUS_STATUS = 148 /* 0x94 */,
+ HAL_TQM_REMOVE_MPDU = 149 /* 0x95 */,
+ HAL_TQM_REMOVE_MPDU_STATUS = 150 /* 0x96 */,
+ HAL_TQM_REMOVE_MSDU = 151 /* 0x97 */,
+ HAL_TQM_REMOVE_MSDU_STATUS = 152 /* 0x98 */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT = 153 /* 0x99 */,
+ HAL_TQM_WRITE_CMD = 154 /* 0x9a */,
+ HAL_OFDMA_TRIGGER_DETAILS = 155 /* 0x9b */,
+ HAL_TX_DATA = 156 /* 0x9c */,
+ HAL_TX_FES_SETUP = 157 /* 0x9d */,
+ HAL_RX_PACKET = 158 /* 0x9e */,
+ HALXPECTED_RESPONSE = 159 /* 0x9f */,
+ HAL_TX_MPDU_END = 160 /* 0xa0 */,
+ HAL_TX_MPDU_START = 161 /* 0xa1 */,
+ HAL_TX_MSDU_END = 162 /* 0xa2 */,
+ HAL_TX_MSDU_START = 163 /* 0xa3 */,
+ HAL_TX_SW_MODE_SETUP = 164 /* 0xa4 */,
+ HAL_TXPCU_BUFFER_STATUS = 165 /* 0xa5 */,
+ HAL_TXPCU_USER_BUFFER_STATUS = 166 /* 0xa6 */,
+ HAL_DATA_TO_TIME_CONFIG = 167 /* 0xa7 */,
+ HALXAMPLE_USER_TLV_32 = 168 /* 0xa8 */,
+ HAL_MPDU_INFO = 169 /* 0xa9 */,
+ HAL_PDG_USER_SETUP = 170 /* 0xaa */,
+ HAL_TX_11AH_SETUP = 171 /* 0xab */,
+ HAL_REO_UPDATE_RX_REO_QUEUE_STATUS = 172 /* 0xac */,
+ HAL_TX_PEER_ENTRY = 173 /* 0xad */,
+ HAL_TX_RAW_OR_NATIVE_FRAME_SETUP = 174 /* 0xae */,
+ HALXAMPLE_USER_TLV_44 = 175 /* 0xaf */,
+ HAL_TX_FLUSH = 176 /* 0xb0 */,
+ HAL_TX_FLUSH_REQ = 177 /* 0xb1 */,
+ HAL_TQM_WRITE_CMD_STATUS = 178 /* 0xb2 */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS = 179 /* 0xb3 */,
+ HAL_TQM_GET_MSDU_FLOW_STATS = 180 /* 0xb4 */,
+ HALXAMPLE_USER_CTLV_44 = 181 /* 0xb5 */,
+ HAL_TX_FES_STATUS_START = 182 /* 0xb6 */,
+ HAL_TX_FES_STATUS_USER_PPDU = 183 /* 0xb7 */,
+ HAL_TX_FES_STATUS_USER_RESPONSE = 184 /* 0xb8 */,
+ HAL_TX_FES_STATUS_END = 185 /* 0xb9 */,
+ HAL_RX_TRIG_INFO = 186 /* 0xba */,
+ HAL_RXPCU_TX_SETUP_CLEAR = 187 /* 0xbb */,
+ HAL_RX_FRAME_BITMAP_REQ = 188 /* 0xbc */,
+ HAL_RX_FRAME_BITMAP_ACK = 189 /* 0xbd */,
+ HAL_COEX_RX_STATUS = 190 /* 0xbe */,
+ HAL_RX_START_PARAM = 191 /* 0xbf */,
+ HAL_RX_PPDU_START = 192 /* 0xc0 */,
+ HAL_RX_PPDU_END = 193 /* 0xc1 */,
+ HAL_RX_MPDU_START = 194 /* 0xc2 */,
+ HAL_RX_MPDU_END = 195 /* 0xc3 */,
+ HAL_RX_MSDU_START = 196 /* 0xc4 */,
+ HAL_RX_MSDU_END = 197 /* 0xc5 */,
+ HAL_RX_ATTENTION = 198 /* 0xc6 */,
+ HAL_RECEIVED_RESPONSE_INFO = 199 /* 0xc7 */,
+ HAL_RX_PHY_SLEEP = 200 /* 0xc8 */,
+ HAL_RX_HEADER = 201 /* 0xc9 */,
+ HAL_RX_PEER_ENTRY = 202 /* 0xca */,
+ HAL_RX_FLUSH = 203 /* 0xcb */,
+ HAL_RX_RESPONSE_REQUIRED_INFO = 204 /* 0xcc */,
+ HAL_RX_FRAMELESS_BAR_DETAILS = 205 /* 0xcd */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS_STATUS = 206 /* 0xce */,
+ HAL_TQM_GET_MSDU_FLOW_STATS_STATUS = 207 /* 0xcf */,
+ HAL_TX_CBF_INFO = 208 /* 0xd0 */,
+ HAL_PCU_PPDU_SETUP_USER = 209 /* 0xd1 */,
+ HAL_RX_MPDU_PCU_START = 210 /* 0xd2 */,
+ HAL_RX_PM_INFO = 211 /* 0xd3 */,
+ HAL_RX_USER_PPDU_END = 212 /* 0xd4 */,
+ HAL_RX_PRE_PPDU_START = 213 /* 0xd5 */,
+ HAL_RX_PREAMBLE = 214 /* 0xd6 */,
+ HAL_TX_FES_SETUP_COMPLETE = 215 /* 0xd7 */,
+ HAL_TX_LAST_MPDU_FETCHED = 216 /* 0xd8 */,
+ HAL_TXDMA_STOP_REQUEST = 217 /* 0xd9 */,
+ HAL_RXPCU_SETUP = 218 /* 0xda */,
+ HAL_RXPCU_USER_SETUP = 219 /* 0xdb */,
+ HAL_TX_FES_STATUS_ACK_OR_BA = 220 /* 0xdc */,
+ HAL_TQM_ACKED_MPDU = 221 /* 0xdd */,
+ HAL_COEX_TX_RESP = 222 /* 0xde */,
+ HAL_COEX_TX_STATUS = 223 /* 0xdf */,
+ HAL_MACTX_COEX_PHY_CTRL = 224 /* 0xe0 */,
+ HAL_COEX_STATUS_BROADCAST = 225 /* 0xe1 */,
+ HAL_RESPONSE_START_STATUS = 226 /* 0xe2 */,
+ HAL_RESPONSEND_STATUS = 227 /* 0xe3 */,
+ HAL_CRYPTO_STATUS = 228 /* 0xe4 */,
+ HAL_RECEIVED_TRIGGER_INFO = 229 /* 0xe5 */,
+ HAL_COEX_TX_STOP_CTRL = 230 /* 0xe6 */,
+ HAL_RX_PPDU_ACK_REPORT = 231 /* 0xe7 */,
+ HAL_RX_PPDU_NO_ACK_REPORT = 232 /* 0xe8 */,
+ HAL_SCH_COEX_STATUS = 233 /* 0xe9 */,
+ HAL_SCHEDULER_COMMAND_STATUS = 234 /* 0xea */,
+ HAL_SCHEDULER_RX_PPDU_NO_RESPONSE_STATUS = 235 /* 0xeb */,
+ HAL_TX_FES_STATUS_PROT = 236 /* 0xec */,
+ HAL_TX_FES_STATUS_START_PPDU = 237 /* 0xed */,
+ HAL_TX_FES_STATUS_START_PROT = 238 /* 0xee */,
+ HAL_TXPCU_PHYTX_DEBUG32 = 239 /* 0xef */,
+ HAL_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 = 240 /* 0xf0 */,
+ HAL_TX_MPDU_COUNT_TRANSFERND = 241 /* 0xf1 */,
+ HAL_WHO_ANCHOR_OFFSET = 242 /* 0xf2 */,
+ HAL_WHO_ANCHOR_VALUE = 243 /* 0xf3 */,
+ HAL_WHO_CCE_INFO = 244 /* 0xf4 */,
+ HAL_WHO_COMMIT = 245 /* 0xf5 */,
+ HAL_WHO_COMMIT_DONE = 246 /* 0xf6 */,
+ HAL_WHO_FLUSH = 247 /* 0xf7 */,
+ HAL_WHO_L2_LLC = 248 /* 0xf8 */,
+ HAL_WHO_L2_PAYLOAD = 249 /* 0xf9 */,
+ HAL_WHO_L3_CHECKSUM = 250 /* 0xfa */,
+ HAL_WHO_L3_INFO = 251 /* 0xfb */,
+ HAL_WHO_L4_CHECKSUM = 252 /* 0xfc */,
+ HAL_WHO_L4_INFO = 253 /* 0xfd */,
+ HAL_WHO_MSDU = 254 /* 0xfe */,
+ HAL_WHO_MSDU_MISC = 255 /* 0xff */,
+ HAL_WHO_PACKET_DATA = 256 /* 0x100 */,
+ HAL_WHO_PACKET_HDR = 257 /* 0x101 */,
+ HAL_WHO_PPDU_END = 258 /* 0x102 */,
+ HAL_WHO_PPDU_START = 259 /* 0x103 */,
+ HAL_WHO_TSO = 260 /* 0x104 */,
+ HAL_WHO_WMAC_HEADER_PV0 = 261 /* 0x105 */,
+ HAL_WHO_WMAC_HEADER_PV1 = 262 /* 0x106 */,
+ HAL_WHO_WMAC_IV = 263 /* 0x107 */,
+ HAL_MPDU_INFO_END = 264 /* 0x108 */,
+ HAL_MPDU_INFO_BITMAP = 265 /* 0x109 */,
+ HAL_TX_QUEUE_EXTENSION = 266 /* 0x10a */,
+ HAL_SCHEDULER_SELFGEN_RESPONSE_STATUS = 267 /* 0x10b */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT_STATUS = 268 /* 0x10c */,
+ HAL_TQM_ACKED_MPDU_STATUS = 269 /* 0x10d */,
+ HAL_TQM_ADD_MSDU_STATUS = 270 /* 0x10e */,
+ HAL_TQM_LIST_GEN_DONE = 271 /* 0x10f */,
+ HAL_WHO_TERMINATE = 272 /* 0x110 */,
+ HAL_TX_LAST_MPDU_END = 273 /* 0x111 */,
+ HAL_TX_CV_DATA = 274 /* 0x112 */,
+ HAL_PPDU_TX_END = 275 /* 0x113 */,
+ HAL_PROT_TX_END = 276 /* 0x114 */,
+ HAL_MPDU_INFO_GLOBAL_END = 277 /* 0x115 */,
+ HAL_TQM_SCH_INSTR_GLOBAL_END = 278 /* 0x116 */,
+ HAL_RX_PPDU_END_USER_STATS = 279 /* 0x117 */,
+ HAL_RX_PPDU_END_USER_STATS_EXT = 280 /* 0x118 */,
+ HAL_REO_GET_QUEUE_STATS = 281 /* 0x119 */,
+ HAL_REO_FLUSH_QUEUE = 282 /* 0x11a */,
+ HAL_REO_FLUSH_CACHE = 283 /* 0x11b */,
+ HAL_REO_UNBLOCK_CACHE = 284 /* 0x11c */,
+ HAL_REO_GET_QUEUE_STATS_STATUS = 285 /* 0x11d */,
+ HAL_REO_FLUSH_QUEUE_STATUS = 286 /* 0x11e */,
+ HAL_REO_FLUSH_CACHE_STATUS = 287 /* 0x11f */,
+ HAL_REO_UNBLOCK_CACHE_STATUS = 288 /* 0x120 */,
+ HAL_TQM_FLUSH_CACHE = 289 /* 0x121 */,
+ HAL_TQM_UNBLOCK_CACHE = 290 /* 0x122 */,
+ HAL_TQM_FLUSH_CACHE_STATUS = 291 /* 0x123 */,
+ HAL_TQM_UNBLOCK_CACHE_STATUS = 292 /* 0x124 */,
+ HAL_RX_PPDU_END_STATUS_DONE = 293 /* 0x125 */,
+ HAL_RX_STATUS_BUFFER_DONE = 294 /* 0x126 */,
+ HAL_TX_DATA_SYNC = 297 /* 0x129 */,
+ HAL_PHYRX_CBF_READ_REQUEST_ACK = 298 /* 0x12a */,
+ HAL_TQM_GET_MPDU_HEAD_INFO = 299 /* 0x12b */,
+ HAL_TQM_SYNC_CMD = 300 /* 0x12c */,
+ HAL_TQM_GET_MPDU_HEAD_INFO_STATUS = 301 /* 0x12d */,
+ HAL_TQM_SYNC_CMD_STATUS = 302 /* 0x12e */,
+ HAL_TQM_THRESHOLD_DROP_NOTIFICATION_STATUS = 303 /* 0x12f */,
+ HAL_TQM_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 304 /* 0x130 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST = 305 /* 0x131 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST_STATUS = 306 /* 0x132 */,
+ HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 307 /* 0x133 */,
+ HAL_SCHEDULER_RX_SIFS_RESPONSE_TRIGGER_STATUS = 308 /* 0x134 */,
+ HALXAMPLE_USER_TLV_32_NAME = 309 /* 0x135 */,
+ HAL_RX_PPDU_START_USER_INFO = 310 /* 0x136 */,
+ HAL_RX_RING_MASK = 311 /* 0x137 */,
+ HAL_COEX_MAC_NAP = 312 /* 0x138 */,
+ HAL_RXPCU_PPDU_END_INFO = 313 /* 0x139 */,
+ HAL_WHO_MESH_CONTROL = 314 /* 0x13a */,
+ HAL_PDG_SW_MODE_BW_START = 315 /* 0x13b */,
+ HAL_PDG_SW_MODE_BW_END = 316 /* 0x13c */,
+ HAL_PDG_WAIT_FOR_MAC_REQUEST = 317 /* 0x13d */,
+ HAL_PDG_WAIT_FOR_PHY_REQUEST = 318 /* 0x13e */,
+ HAL_SCHEDULER_END = 319 /* 0x13f */,
+ HAL_RX_PPDU_START_DROPPED = 320 /* 0x140 */,
+ HAL_RX_PPDU_END_DROPPED = 321 /* 0x141 */,
+ HAL_RX_PPDU_END_STATUS_DONE_DROPPED = 322 /* 0x142 */,
+ HAL_RX_MPDU_START_DROPPED = 323 /* 0x143 */,
+ HAL_RX_MSDU_START_DROPPED = 324 /* 0x144 */,
+ HAL_RX_MSDU_END_DROPPED = 325 /* 0x145 */,
+ HAL_RX_MPDU_END_DROPPED = 326 /* 0x146 */,
+ HAL_RX_ATTENTION_DROPPED = 327 /* 0x147 */,
+ HAL_TXPCU_USER_SETUP = 328 /* 0x148 */,
+ HAL_RXPCU_USER_SETUP_EXT = 329 /* 0x149 */,
+ HAL_CMD_PART_0_END = 330 /* 0x14a */,
+ HAL_MACTX_SYNTH_ON = 331 /* 0x14b */,
+ HAL_SCH_CRITICAL_TLV_REFERENCE = 332 /* 0x14c */,
+ HAL_TQM_MPDU_GLOBAL_START = 333 /* 0x14d */,
+ HALXAMPLE_TLV_32 = 334 /* 0x14e */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW = 335 /* 0x14f */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD = 336 /* 0x150 */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW_STATUS = 337 /* 0x151 */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD_STATUS = 338 /* 0x152 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE = 339 /* 0x153 */,
+ HAL_TQM_MPDU_QUEUEMPTY_STATUS = 340 /* 0x154 */,
+ HAL_TQM_2_SCH_MPDU_AVAILABLE = 341 /* 0x155 */,
+ HAL_PDG_TRIG_RESPONSE = 342 /* 0x156 */,
+ HAL_TRIGGER_RESPONSE_TX_DONE = 343 /* 0x157 */,
+ HAL_ABORT_FROM_PHYRX_DETAILS = 344 /* 0x158 */,
+ HAL_SCH_TQM_CMD_WRAPPER = 345 /* 0x159 */,
+ HAL_MPDUS_AVAILABLE = 346 /* 0x15a */,
+ HAL_RECEIVED_RESPONSE_INFO_PART2 = 347 /* 0x15b */,
+ HAL_PHYRX_TX_START_TIMING = 348 /* 0x15c */,
+ HAL_TXPCU_PREAMBLE_DONE = 349 /* 0x15d */,
+ HAL_NDP_PREAMBLE_DONE = 350 /* 0x15e */,
+ HAL_SCH_TQM_CMD_WRAPPER_RBO_DROP = 351 /* 0x15f */,
+ HAL_SCH_TQM_CMD_WRAPPER_CONT_DROP = 352 /* 0x160 */,
+ HAL_MACTX_CLEAR_PREV_TX_INFO = 353 /* 0x161 */,
+ HAL_TX_PUNCTURE_SETUP = 354 /* 0x162 */,
+ HAL_R2R_STATUS_END = 355 /* 0x163 */,
+ HAL_MACTX_PREFETCH_CV_COMMON = 356 /* 0x164 */,
+ HAL_END_OF_FLUSH_MARKER = 357 /* 0x165 */,
+ HAL_MACTX_MU_UPLINK_COMMON_PUNC = 358 /* 0x166 */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP_PUNC = 359 /* 0x167 */,
+ HAL_RECEIVED_RESPONSE_USER_7_0 = 360 /* 0x168 */,
+ HAL_RECEIVED_RESPONSE_USER_15_8 = 361 /* 0x169 */,
+ HAL_RECEIVED_RESPONSE_USER_23_16 = 362 /* 0x16a */,
+ HAL_RECEIVED_RESPONSE_USER_31_24 = 363 /* 0x16b */,
+ HAL_RECEIVED_RESPONSE_USER_36_32 = 364 /* 0x16c */,
+ HAL_TX_LOOPBACK_SETUP = 365 /* 0x16d */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_RU_DETAILS = 366 /* 0x16e */,
+ HAL_SCH_WAIT_INSTR_TX_PATH = 367 /* 0x16f */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO_TX2TX = 368 /* 0x170 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFOMUPHY_SETUP = 369 /* 0x171 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFOVM_DETAILS = 370 /* 0x172 */,
+ HAL_TX_WUR_DATA = 371 /* 0x173 */,
+ HAL_RX_PPDU_END_START = 372 /* 0x174 */,
+ HAL_RX_PPDU_END_MIDDLE = 373 /* 0x175 */,
+ HAL_RX_PPDU_END_LAST = 374 /* 0x176 */,
+ HAL_MACTX_BACKOFF_BASED_TRANSMISSION = 375 /* 0x177 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO_DL_OFDMA_TX = 376 /* 0x178 */,
+ HAL_SRP_INFO = 377 /* 0x179 */,
+ HAL_OBSS_SR_INFO = 378 /* 0x17a */,
+ HAL_SCHEDULER_SW_MSG_STATUS = 379 /* 0x17b */,
+ HAL_HWSCH_RXPCU_MAC_INFO_ANNOUNCEMENT = 380 /* 0x17c */,
+ HAL_RXPCU_SETUP_COMPLETE = 381 /* 0x17d */,
+ HAL_SNOOP_PPDU_START = 382 /* 0x17e */,
+ HAL_SNOOP_MPDU_USR_DBG_INFO = 383 /* 0x17f */,
+ HAL_SNOOP_MSDU_USR_DBG_INFO = 384 /* 0x180 */,
+ HAL_SNOOP_MSDU_USR_DATA = 385 /* 0x181 */,
+ HAL_SNOOP_MPDU_USR_STAT_INFO = 386 /* 0x182 */,
+ HAL_SNOOP_PPDU_END = 387 /* 0x183 */,
+ HAL_SNOOP_SPARE = 388 /* 0x184 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_MU_RSSI_COMMON = 390 /* 0x186 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_MU_RSSI_USER = 391 /* 0x187 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO_SCH_DETAILS = 392 /* 0x188 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_108PVM_DETAILS = 393 /* 0x189 */,
+ HAL_SCH_TLV_WRAPPER = 394 /* 0x18a */,
+ HAL_SCHEDULER_STATUS_WRAPPER = 395 /* 0x18b */,
+ HAL_MPDU_INFO_6X = 396 /* 0x18c */,
+ HAL_MACTX_11AZ_USER_DESC_PER_USER = 397 /* 0x18d */,
+ HAL_MACTX_U_SIGHT_SU_MU = 398 /* 0x18e */,
+ HAL_MACTX_U_SIGHT_TB = 399 /* 0x18f */,
+ HAL_PHYRX_U_SIGHT_SU_MU = 403 /* 0x193 */,
+ HAL_PHYRX_U_SIGHT_TB = 404 /* 0x194 */,
+ HAL_MACRX_LMR_READ_REQUEST = 408 /* 0x198 */,
+ HAL_MACRX_LMR_DATA_REQUEST = 409 /* 0x199 */,
+ HAL_PHYRX_LMR_TRANSFER_DONE = 410 /* 0x19a */,
+ HAL_PHYRX_LMR_TRANSFER_ABORT = 411 /* 0x19b */,
+ HAL_PHYRX_LMR_READ_REQUEST_ACK = 412 /* 0x19c */,
+ HAL_MACRX_SECURE_LTF_SEQ_PTR = 413 /* 0x19d */,
+ HAL_PHYRX_USER_INFO_MU_UL = 414 /* 0x19e */,
+ HAL_MPDU_QUEUE_OVERVIEW = 415 /* 0x19f */,
+ HAL_SCHEDULER_NAV_INFO = 416 /* 0x1a0 */,
+ HAL_LMR_PEER_ENTRY = 418 /* 0x1a2 */,
+ HAL_LMR_MPDU_START = 419 /* 0x1a3 */,
+ HAL_LMR_DATA = 420 /* 0x1a4 */,
+ HAL_LMR_MPDU_END = 421 /* 0x1a5 */,
+ HAL_REO_GET_QUEUE_1K_STATS_STATUS = 422 /* 0x1a6 */,
+ HAL_RX_FRAME_1K_BITMAP_ACK = 423 /* 0x1a7 */,
+ HAL_TX_FES_STATUS_1K_BA = 424 /* 0x1a8 */,
+ HAL_TQM_ACKED_1K_MPDU = 425 /* 0x1a9 */,
+ HAL_MACRX_INBSS_OBSS_IND = 426 /* 0x1aa */,
+ HAL_PHYRX_LOCATION = 427 /* 0x1ab */,
+ HAL_MLO_TX_NOTIFICATION_SU = 428 /* 0x1ac */,
+ HAL_MLO_TX_NOTIFICATION_MU = 429 /* 0x1ad */,
+ HAL_MLO_TX_REQ_SU = 430 /* 0x1ae */,
+ HAL_MLO_TX_REQ_MU = 431 /* 0x1af */,
+ HAL_MLO_TX_RESP = 432 /* 0x1b0 */,
+ HAL_MLO_RX_NOTIFICATION = 433 /* 0x1b1 */,
+ HAL_MLO_BKOFF_TRUNC_REQ = 434 /* 0x1b2 */,
+ HAL_MLO_TBTT_NOTIFICATION = 435 /* 0x1b3 */,
+ HAL_MLO_MESSAGE = 436 /* 0x1b4 */,
+ HAL_MLO_TS_SYNC_MSG = 437 /* 0x1b5 */,
+ HAL_MLO_FES_SETUP = 438 /* 0x1b6 */,
+ HAL_MLO_PDG_FES_SETUP_SU = 439 /* 0x1b7 */,
+ HAL_MLO_PDG_FES_SETUP_MU = 440 /* 0x1b8 */,
+ HAL_MPDU_INFO_1K_BITMAP = 441 /* 0x1b9 */,
+ HAL_MON_BUF_ADDR = 442 /* 0x1ba */,
+ HAL_TX_FRAG_STATE = 443 /* 0x1bb */,
+ HAL_MACTXHT_SIG_USR_OFDMA = 446 /* 0x1be */,
+ HAL_PHYRXHT_SIG_CMN_PUNC = 448 /* 0x1c0 */,
+ HAL_PHYRXHT_SIG_CMN_OFDMA = 450 /* 0x1c2 */,
+ HAL_PHYRXHT_SIG_USR_OFDMA = 454 /* 0x1c6 */,
+ HAL_PHYRX_PKT_END_PART1 = 456 /* 0x1c8 */,
+ HAL_MACTXXPECT_NDP_RECEPTION = 457 /* 0x1c9 */,
+ HAL_MACTX_SECURE_LTF_SEQ_PTR = 458 /* 0x1ca */,
+ HAL_MLO_PDG_BKOFF_TRUNC_NOTIFY = 460 /* 0x1cc */,
+ HAL_PHYRX_11AZ_INTEGRITY_DATA = 461 /* 0x1cd */,
+ HAL_PHYTX_LOCATION = 462 /* 0x1ce */,
+ HAL_PHYTX_11AZ_INTEGRITY_DATA = 463 /* 0x1cf */,
+ HAL_MACTXHT_SIG_USR_SU = 466 /* 0x1d2 */,
+ HAL_MACTXHT_SIG_USR_MU_MIMO = 467 /* 0x1d3 */,
+ HAL_PHYRXHT_SIG_USR_SU = 468 /* 0x1d4 */,
+ HAL_PHYRXHT_SIG_USR_MU_MIMO = 469 /* 0x1d5 */,
+ HAL_PHYRX_GENERIC_U_SIG = 470 /* 0x1d6 */,
+ HAL_PHYRX_GENERICHT_SIG = 471 /* 0x1d7 */,
+ HAL_OVERWRITE_RESP_START = 472 /* 0x1d8 */,
+ HAL_OVERWRITE_RESP_PREAMBLE_INFO = 473 /* 0x1d9 */,
+ HAL_OVERWRITE_RESP_FRAME_INFO = 474 /* 0x1da */,
+ HAL_OVERWRITE_RESP_END = 475 /* 0x1db */,
+ HAL_RXPCUARLY_RX_INDICATION = 476 /* 0x1dc */,
+ HAL_MON_DROP = 477 /* 0x1dd */,
+ HAL_MACRX_MU_UPLINK_COMMON_SNIFF = 478 /* 0x1de */,
+ HAL_MACRX_MU_UPLINK_USER_SETUP_SNIFF = 479 /* 0x1df */,
+ HAL_MACRX_MU_UPLINK_USER_SEL_SNIFF = 480 /* 0x1e0 */,
+ HAL_MACRX_MU_UPLINK_FCS_STATUS_SNIFF = 481 /* 0x1e1 */,
+ HAL_MACTX_PREFETCH_CV_DMA = 482 /* 0x1e2 */,
+ HAL_MACTX_PREFETCH_CV_PER_USER = 483 /* 0x1e3 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_ALL_SIGB_DETAILS = 484 /* 0x1e4 */,
+ HAL_MACTX_BF_PARAMS_UPDATE_COMMON = 485 /* 0x1e5 */,
+ HAL_MACTX_BF_PARAMS_UPDATE_PER_USER = 486 /* 0x1e6 */,
+ HAL_RANGING_USER_DETAILS = 487 /* 0x1e7 */,
+ HAL_PHYTX_CV_CORR_STATUS = 488 /* 0x1e8 */,
+ HAL_PHYTX_CV_CORR_COMMON = 489 /* 0x1e9 */,
+ HAL_PHYTX_CV_CORR_USER = 490 /* 0x1ea */,
+ HAL_MACTX_CV_CORR_COMMON = 491 /* 0x1eb */,
+ HAL_MACTX_CV_CORR_MAC_INFO_GROUP = 492 /* 0x1ec */,
+ HAL_BW_PUNCTUREVAL_WRAPPER = 493 /* 0x1ed */,
+ HAL_MACTX_RX_NOTIFICATION_FOR_PHY = 494 /* 0x1ee */,
+ HAL_MACTX_TX_NOTIFICATION_FOR_PHY = 495 /* 0x1ef */,
+ HAL_MACTX_MU_UPLINK_COMMON_PER_BW = 496 /* 0x1f0 */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP_PER_BW = 497 /* 0x1f1 */,
+ HAL_RX_PPDU_END_USER_STATS_EXT2 = 498 /* 0x1f2 */,
+ HAL_FW2SW_MON = 499 /* 0x1f3 */,
+ HAL_WSI_DIRECT_MESSAGE = 500 /* 0x1f4 */,
+ HAL_MACTXMLSR_PRE_SWITCH = 501 /* 0x1f5 */,
+ HAL_MACTXMLSR_SWITCH = 502 /* 0x1f6 */,
+ HAL_MACTXMLSR_SWITCH_BACK = 503 /* 0x1f7 */,
+ HAL_PHYTXMLSR_SWITCH_ACK = 504 /* 0x1f8 */,
+ HAL_PHYTXMLSR_SWITCH_BACK_ACK = 505 /* 0x1f9 */,
+ HAL_SPARE_REUSE_TAG_0 = 506 /* 0x1fa */,
+ HAL_SPARE_REUSE_TAG_1 = 507 /* 0x1fb */,
+ HAL_SPARE_REUSE_TAG_2 = 508 /* 0x1fc */,
+ HAL_SPARE_REUSE_TAG_3 = 509 /* 0x1fd */,
+ /* FIXME: Assign correct value for HAL_TCL_DATA_CMD */
+ HAL_TCL_DATA_CMD = 510,
+ HAL_TLV_BASE = 511 /* 0x1ff */,
+};
+
+#define HAL_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+#define HAL_TLV_USR_ID GENMASK(31, 26)
+
+#define HAL_TLV_ALIGN 4
+
+struct hal_tlv_hdr {
+ __le32 tl;
+ u8 value[];
+} __packed;
+
+#define HAL_TLV_64_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_64_HDR_LEN GENMASK(21, 10)
+
+struct hal_tlv_64_hdr {
+ u64 tl;
+ u8 value[];
+} __packed;
+
+#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
+#define RX_MPDU_DESC_INFO0_FRAG_FLAG BIT(8)
+#define RX_MPDU_DESC_INFO0_MPDU_RETRY BIT(9)
+#define RX_MPDU_DESC_INFO0_AMPDU_FLAG BIT(10)
+#define RX_MPDU_DESC_INFO0_BAR_FRAME BIT(11)
+#define RX_MPDU_DESC_INFO0_VALID_PN BIT(12)
+#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(13)
+#define RX_MPDU_DESC_INFO0_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_DESC_INFO0_SRC_INFO GENMASK(26, 15)
+#define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27)
+#define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28)
+
+/* TODO revisit after meta data is concluded */
+#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+
+struct rx_mpdu_desc {
+ __le32 info0; /* %RX_MPDU_DESC_INFO */
+ __le32 peer_meta_data;
+} __packed;
+
+/* rx_mpdu_desc
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * msdu_count
+ * The number of MSDUs within the MPDU
+ *
+ * fragment_flag
+ * When set, this MPDU is a fragment and REO should forward this
+ * fragment MPDU to the REO destination ring without any reorder
+ * checks, pn checks or bitmap update. This implies that REO is
+ * forwarding the pointer to the MSDU link descriptor.
+ *
+ * mpdu_retry_bit
+ * The retry bit setting from the MPDU header of the received frame
+ *
+ * ampdu_flag
+ * Indicates the MPDU was received as part of an A-MPDU.
+ *
+ * bar_frame
+ * Indicates the received frame is a BAR frame. After processing,
+ * this frame shall be pushed to SW or deleted.
+ *
+ * valid_pn
+ * When not set, REO will not perform a PN sequence number check.
+ *
+ * raw_mpdu
+ * Field only valid when first_msdu_in_mpdu_flag is set. Indicates
+ * the contents in the MSDU buffer contains a 'RAW' MPDU. This
+ * 'RAW' MPDU might be spread out over multiple MSDU buffers.
+ *
+ * more_fragment_flag
+ * The More Fragment bit setting from the MPDU header of the
+ * received frame
+ *
+ * src_info
+ * Source (Virtual) device/interface info associated with this peer.
+ * This field gets passed on by REO to PPE in the EDMA descriptor.
+ *
+ * mpdu_qos_control_valid
+ * When set, the MPDU has a QoS control field
+ *
+ * tid
+ * Field only valid when mpdu_qos_control_valid is set
+ */
+
+enum hal_rx_msdu_desc_reo_dest_ind {
+ HAL_RX_MSDU_DESC_REO_DEST_IND_TCL,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW1,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW2,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW3,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW4,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_RELEASE,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_FW,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW5,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW6,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW7,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW8,
+};
+
+#define RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU BIT(0)
+#define RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU BIT(1)
+#define RX_MSDU_DESC_INFO0_MSDU_CONTINUATION BIT(2)
+#define RX_MSDU_DESC_INFO0_MSDU_LENGTH GENMASK(16, 3)
+#define RX_MSDU_DESC_INFO0_MSDU_DROP BIT(17)
+#define RX_MSDU_DESC_INFO0_VALID_SA BIT(18)
+#define RX_MSDU_DESC_INFO0_VALID_DA BIT(19)
+#define RX_MSDU_DESC_INFO0_DA_MCBC BIT(20)
+#define RX_MSDU_DESC_INFO0_L3_HDR_PAD_MSB BIT(21)
+#define RX_MSDU_DESC_INFO0_TCP_UDP_CHKSUM_FAIL BIT(22)
+#define RX_MSDU_DESC_INFO0_IP_CHKSUM_FAIL BIT(23)
+#define RX_MSDU_DESC_INFO0_FROM_DS BIT(24)
+#define RX_MSDU_DESC_INFO0_TO_DS BIT(25)
+#define RX_MSDU_DESC_INFO0_INTRA_BSS BIT(26)
+#define RX_MSDU_DESC_INFO0_DST_CHIP_ID GENMASK(28, 27)
+#define RX_MSDU_DESC_INFO0_DECAP_FORMAT GENMASK(30, 29)
+
+#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
+ (u32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
+
+struct rx_msdu_desc {
+ __le32 info0;
+} __packed;
+
+/* rx_msdu_desc
+ *
+ * first_msdu_in_mpdu
+ * Indicates first msdu in mpdu.
+ *
+ * last_msdu_in_mpdu
+ * Indicates last msdu in mpdu. This flag can be true only when
+ * 'Msdu_continuation' set to 0. This implies that when an msdu
+ * is spread out over multiple buffers and thus msdu_continuation
+ * is set, only for the very last buffer of the msdu, can the
+ * 'last_msdu_in_mpdu' be set.
+ *
+ * When both first_msdu_in_mpdu and last_msdu_in_mpdu are set,
+ * the MPDU that this MSDU belongs to only contains a single MSDU.
+ *
+ * msdu_continuation
+ * When set, this MSDU buffer was not able to hold the entire MSDU.
+ * The next buffer will therefor contain additional information
+ * related to this MSDU.
+ *
+ * msdu_length
+ * Field is only valid in combination with the 'first_msdu_in_mpdu'
+ * being set. Full MSDU length in bytes after decapsulation. This
+ * field is still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation Or in case of RAW
+ * MPDUs, it indicates the length of the entire MPDU (without FCS
+ * field).
+ *
+ * msdu_drop
+ * Indicates that REO shall drop this MSDU and not forward it to
+ * any other ring.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for this MSDU.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for this MSDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates the DA address
+ * is a Multicast or Broadcast address for this MSDU.
+ *
+ * l3_header_padding_msb
+ * Passed on from 'RX_MSDU_END' TLV (only the MSB is reported as
+ * the LSB is always zero). Number of bytes padded to make sure
+ * that the L3 header will always start of a Dword boundary
+ *
+ * tcp_udp_checksum_fail
+ * Passed on from 'RX_ATTENTION' TLV
+ * Indicates that the computed checksum did not match the checksum
+ * in the TCP/UDP header.
+ *
+ * ip_checksum_fail
+ * Passed on from 'RX_ATTENTION' TLV
+ * Indicates that the computed checksum did not match the checksum
+ * in the IP header.
+ *
+ * from_DS
+ * Set if the 'from DS' bit is set in the frame control.
+ *
+ * to_DS
+ * Set if the 'to DS' bit is set in the frame control.
+ *
+ * intra_bss
+ * This packet needs intra-BSS routing by SW as the 'vdev_id'
+ * for the destination is the same as the 'vdev_id' that this
+ * MSDU was got in.
+ *
+ * dest_chip_id
+ * If intra_bss is set, copied by RXOLE/RXDMA from 'ADDR_SEARCH_ENTRY'
+ * to support intra-BSS routing with multi-chip multi-link operation.
+ * This indicates into which chip's TCL the packet should be queued.
+ *
+ * decap_format
+ * Indicates the format after decapsulation:
+ */
+
+#define RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND GENMASK(4, 0)
+#define RX_MSDU_EXT_DESC_INFO0_SERVICE_CODE GENMASK(13, 5)
+#define RX_MSDU_EXT_DESC_INFO0_PRIORITY_VALID BIT(14)
+#define RX_MSDU_EXT_DESC_INFO0_DATA_OFFSET GENMASK(26, 15)
+#define RX_MSDU_EXT_DESC_INFO0_SRC_LINK_ID GENMASK(29, 27)
+
+struct rx_msdu_ext_desc {
+ __le32 info0;
+} __packed;
+
+/* rx_msdu_ext_desc
+ *
+ * reo_destination_indication
+ * The ID of the REO exit ring where the MSDU frame shall push
+ * after (MPDU level) reordering has finished.
+ *
+ * service_code
+ * Opaque service code between PPE and Wi-Fi
+ *
+ * priority_valid
+ *
+ * data_offset
+ * The offset to Rx packet data within the buffer (including
+ * Rx DMA offset programming and L3 header padding inserted
+ * by Rx OLE).
+ *
+ * src_link_id
+ * Set to the link ID of the PMAC that received the frame
+ */
+
+enum hal_reo_dest_ring_buffer_type {
+ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
+};
+
+enum hal_reo_dest_ring_push_reason {
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
+};
+
+enum hal_reo_dest_ring_error_code {
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
+ HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
+ HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
+ HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
+ HAL_REO_DEST_RING_ERROR_CODE_MAX,
+};
+
+#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE BIT(0)
+#define HAL_REO_DEST_RING_INFO0_PUSH_REASON GENMASK(2, 1)
+#define HAL_REO_DEST_RING_INFO0_ERROR_CODE GENMASK(7, 3)
+#define HAL_REO_DEST_RING_INFO0_MSDU_DATA_SIZE GENMASK(11, 8)
+#define HAL_REO_DEST_RING_INFO0_SW_EXCEPTION BIT(12)
+#define HAL_REO_DEST_RING_INFO0_SRC_LINK_ID GENMASK(15, 13)
+#define HAL_REO_DEST_RING_INFO0_SIGNATURE GENMASK(19, 16)
+#define HAL_REO_DEST_RING_INFO0_RING_ID GENMASK(27, 20)
+#define HAL_REO_DEST_RING_INFO0_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_reo_dest_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ __le32 buf_va_lo;
+ __le32 buf_va_hi;
+ __le32 info0; /* %HAL_REO_DEST_RING_INFO0_ */
+} __packed;
+
+/* hal_reo_dest_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * rx_msdu_info
+ * General information related to the MSDU that is passed
+ * on from RXDMA all the way to the REO destination ring.
+ *
+ * buf_va_lo
+ * Field only valid if Reo_dest_buffer_type is set to MSDU_buf_address
+ * Lower 32 bits of the 64-bit virtual address corresponding
+ * to Buf_or_link_desc_addr_info
+ *
+ * buf_va_hi
+ * Address (upper 32 bits) of the REO queue descriptor.
+ * Upper 32 bits of the 64-bit virtual address corresponding
+ * to Buf_or_link_desc_addr_info
+ *
+ * buffer_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * captured_msdu_data_size
+ * The number of following REO_DESTINATION STRUCTs that have
+ * been replaced with msdu_data extracted from the msdu_buffer
+ * and copied into the ring for easy FW/SW access.
+ *
+ * sw_exception
+ * This field has the same setting as the SW_exception field
+ * in the corresponding REO_entrance_ring descriptor.
+ * When set, the REO entrance descriptor is generated by FW,
+ * and the MPDU was processed in the following way:
+ * - NO re-order function is needed.
+ * - MPDU delinking is determined by the setting of Entrance
+ * ring field: SW_excection_mpdu_delink
+ * - Destination ring selection is based on the setting of
+ * the Entrance ring field SW_exception_destination _ring_valid
+ *
+ * src_link_id
+ * Set to the link ID of the PMAC that received the frame
+ *
+ * signature
+ * Set to value 0x8 when msdu capture mode is enabled for this ring
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+#define HAL_REO_TO_PPE_RING_INFO0_DATA_LENGTH GENMASK(15, 0)
+#define HAL_REO_TO_PPE_RING_INFO0_DATA_OFFSET GENMASK(23, 16)
+#define HAL_REO_TO_PPE_RING_INFO0_POOL_ID GENMASK(28, 24)
+#define HAL_REO_TO_PPE_RING_INFO0_PREHEADER BIT(29)
+#define HAL_REO_TO_PPE_RING_INFO0_TSO_EN BIT(30)
+#define HAL_REO_TO_PPE_RING_INFO0_MORE BIT(31)
+
+struct hal_reo_to_ppe_ring {
+ __le32 buffer_addr;
+ __le32 info0; /* %HAL_REO_TO_PPE_RING_INFO0_ */
+} __packed;
+
+/* hal_reo_to_ppe_ring
+ *
+ * Producer: REO
+ * Consumer: PPE
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * data_length
+ * Length of valid data in bytes
+ *
+ * data_offset
+ * Offset to the data from buffer pointer. Can be used to
+ * strip header in the data for tunnel termination etc.
+ *
+ * pool_id
+ * REO has global configuration register for this field.
+ * It may have several free buffer pools, each
+ * RX-Descriptor ring can fetch free buffer from specific
+ * buffer pool; pool id will indicate which pool the buffer
+ * will be released to; POOL_ID Zero returned to SW
+ *
+ * preheader
+ * Disabled: 0 (Default)
+ * Enabled: 1
+ *
+ * tso_en
+ * Disabled: 0 (Default)
+ * Enabled: 1
+ *
+ * more
+ * More Segments followed
+ */
+
+enum hal_reo_entr_rxdma_push_reason {
+ HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ROUTING_INSTRUCTION,
+ HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_RX_FLUSH,
+};
+
+enum hal_reo_entr_rxdma_ecode {
+ HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
+};
+
+enum hal_rx_reo_dest_ring {
+ HAL_RX_REO_DEST_RING_TCL,
+ HAL_RX_REO_DEST_RING_SW1,
+ HAL_RX_REO_DEST_RING_SW2,
+ HAL_RX_REO_DEST_RING_SW3,
+ HAL_RX_REO_DEST_RING_SW4,
+ HAL_RX_REO_DEST_RING_RELEASE,
+ HAL_RX_REO_DEST_RING_FW,
+ HAL_RX_REO_DEST_RING_SW5,
+ HAL_RX_REO_DEST_RING_SW6,
+ HAL_RX_REO_DEST_RING_SW7,
+ HAL_RX_REO_DEST_RING_SW8,
+};
+
+#define HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_ENTR_RING_INFO0_MPDU_BYTE_COUNT GENMASK(21, 8)
+#define HAL_REO_ENTR_RING_INFO0_DEST_IND GENMASK(26, 22)
+#define HAL_REO_ENTR_RING_INFO0_FRAMELESS_BAR BIT(27)
+
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE GENMASK(6, 2)
+#define HAL_REO_ENTR_RING_INFO1_MPDU_FRAG_NUM GENMASK(10, 7)
+#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPTION BIT(11)
+#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPT_MPDU_DELINK BIT(12)
+#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPTION_RING_VLD BIT(13)
+#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPTION_RING GENMASK(18, 14)
+#define HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM GENMASK(30, 19)
+
+#define HAL_REO_ENTR_RING_INFO2_PHY_PPDU_ID GENMASK(15, 0)
+#define HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID GENMASK(18, 16)
+#define HAL_REO_ENTR_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_REO_ENTR_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_reo_entrance_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ __le32 queue_addr_lo;
+ __le32 info0; /* %HAL_REO_ENTR_RING_INFO0_ */
+ __le32 info1; /* %HAL_REO_ENTR_RING_INFO1_ */
+ __le32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+
+} __packed;
+
+/* hal_reo_entrance_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * mpdu_byte_count
+ * An approximation of the number of bytes received in this MPDU.
+ * Used to keeps stats on the amount of data flowing
+ * through a queue.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * frameless_bar
+ * Indicates that this REO entrance ring struct contains BAR info
+ * from a multi TID BAR frame. The original multi TID BAR frame
+ * itself contained all the REO info for the first TID, but all
+ * the subsequent TID info and their linkage to the REO descriptors
+ * is passed down as 'frameless' BAR info.
+ *
+ * The only fields valid in this descriptor when this bit is set
+ * are queue_addr_lo, queue_addr_hi, mpdu_sequence_number,
+ * bar_frame and peer_meta_data.
+ *
+ * rxdma_push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * mpdu_fragment_number
+ * Field only valid when Reo_level_mpdu_frame_info.
+ * Rx_mpdu_desc_info_details.Fragment_flag is set.
+ *
+ * sw_exception
+ * When not set, REO is performing all its default MPDU processing
+ * operations,
+ * When set, this REO entrance descriptor is generated by FW, and
+ * should be processed as an exception. This implies:
+ * NO re-order function is needed.
+ * MPDU delinking is determined by the setting of field
+ * SW_excection_mpdu_delink
+ *
+ * sw_exception_mpdu_delink
+ * Field only valid when SW_exception is set.
+ * 1'b0: REO should NOT delink the MPDU, and thus pass this
+ * MPDU on to the destination ring as is. This implies that
+ * in the REO_DESTINATION_RING struct field
+ * Buf_or_link_desc_addr_info should point to an MSDU link
+ * descriptor
+ * 1'b1: REO should perform the normal MPDU delink into MSDU operations.
+ *
+ * sw_exception_dest_ring
+ * Field only valid when fields SW_exception and SW
+ * exception_destination_ring_valid are set. values are defined
+ * in %HAL_RX_REO_DEST_RING_.
+ *
+ * mpdu_seq_number
+ * The field can have two different meanings based on the setting
+ * of sub-field Reo level mpdu frame info.
+ * Rx_mpdu_desc_info_details. BAR_frame
+ * 'BAR_frame' is NOT set:
+ * The MPDU sequence number of the received frame.
+ * 'BAR_frame' is set.
+ * The MPDU Start sequence number from the BAR frame
+ *
+ * phy_ppdu_id
+ * A PPDU counter value that PHY increments for every PPDU received
+ *
+ * src_link_id
+ * Set to the link ID of the PMAC that received the frame
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0)
+#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16)
+
+struct hal_reo_cmd_hdr {
+ __le32 info0;
+} __packed;
+
+#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8)
+
+struct hal_reo_get_queue_stats {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 queue_addr_lo;
+ __le32 info0;
+ __le32 rsvd0[6];
+ __le32 tlv64_pad;
+} __packed;
+
+/* hal_reo_get_queue_stats
+ * Producer: SW
+ * Consumer: REO
+ *
+ * cmd
+ * Details for command execution tracking purposes.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * clear_stats
+ * Clear stats settings. When set, Clear the stats after
+ * generating the status.
+ *
+ * Following stats will be cleared.
+ * Timeout_count
+ * Forward_due_to_bar_count
+ * Duplicate_count
+ * Frames_in_order_count
+ * BAR_received_count
+ * MPDU_Frames_processed_count
+ * MSDU_Frames_processed_count
+ * Total_processed_byte_count
+ * Late_receive_MPDU_count
+ * window_jump_2k
+ * Hole_count
+ */
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR BIT(8)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX GENMASK(10, 9)
+
+struct hal_reo_flush_queue {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 desc_addr_lo;
+ __le32 info0;
+ __le32 rsvd0[6];
+} __packed;
+
+#define HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS BIT(8)
+#define HAL_REO_FLUSH_CACHE_INFO0_RELEASE_BLOCK_IDX BIT(9)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX GENMASK(11, 10)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
+
+struct hal_reo_flush_cache {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 cache_addr_lo;
+ __le32 info0;
+ __le32 rsvd0[6];
+} __packed;
+
+#define HAL_TCL_DATA_CMD_INFO0_CMD_TYPE BIT(0)
+#define HAL_TCL_DATA_CMD_INFO0_DESC_TYPE BIT(1)
+#define HAL_TCL_DATA_CMD_INFO0_BANK_ID GENMASK(7, 2)
+#define HAL_TCL_DATA_CMD_INFO0_TX_NOTIFY_FRAME GENMASK(10, 8)
+#define HAL_TCL_DATA_CMD_INFO0_HDR_LEN_READ_SEL BIT(11)
+#define HAL_TCL_DATA_CMD_INFO0_BUF_TIMESTAMP GENMASK(30, 12)
+#define HAL_TCL_DATA_CMD_INFO0_BUF_TIMESTAMP_VLD BIT(31)
+
+#define HAL_TCL_DATA_CMD_INFO1_CMD_NUM GENMASK(31, 16)
+
+#define HAL_TCL_DATA_CMD_INFO2_DATA_LEN GENMASK(15, 0)
+#define HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN BIT(16)
+#define HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN BIT(17)
+#define HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN BIT(18)
+#define HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN BIT(19)
+#define HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN BIT(20)
+#define HAL_TCL_DATA_CMD_INFO2_TO_FW BIT(21)
+#define HAL_TCL_DATA_CMD_INFO2_PKT_OFFSET GENMASK(31, 23)
+
+#define HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE BIT(0)
+#define HAL_TCL_DATA_CMD_INFO3_FLOW_OVERRIDE_EN BIT(1)
+#define HAL_TCL_DATA_CMD_INFO3_CLASSIFY_INFO_SEL GENMASK(3, 2)
+#define HAL_TCL_DATA_CMD_INFO3_TID GENMASK(7, 4)
+#define HAL_TCL_DATA_CMD_INFO3_FLOW_OVERRIDE BIT(8)
+#define HAL_TCL_DATA_CMD_INFO3_PMAC_ID GENMASK(10, 9)
+#define HAL_TCL_DATA_CMD_INFO3_MSDU_COLOR GENMASK(12, 11)
+#define HAL_TCL_DATA_CMD_INFO3_VDEV_ID GENMASK(31, 24)
+
+#define HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX GENMASK(19, 0)
+#define HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM GENMASK(23, 20)
+#define HAL_TCL_DATA_CMD_INFO4_IDX_LOOKUP_OVERRIDE BIT(24)
+
+#define HAL_TCL_DATA_CMD_INFO5_RING_ID GENMASK(27, 20)
+#define HAL_TCL_DATA_CMD_INFO5_LOOPING_COUNT GENMASK(31, 28)
+
+enum hal_encrypt_type {
+ HAL_ENCRYPT_TYPE_WEP_40,
+ HAL_ENCRYPT_TYPE_WEP_104,
+ HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
+ HAL_ENCRYPT_TYPE_WEP_128,
+ HAL_ENCRYPT_TYPE_TKIP_MIC,
+ HAL_ENCRYPT_TYPE_WAPI,
+ HAL_ENCRYPT_TYPE_CCMP_128,
+ HAL_ENCRYPT_TYPE_OPEN,
+ HAL_ENCRYPT_TYPE_CCMP_256,
+ HAL_ENCRYPT_TYPE_GCMP_128,
+ HAL_ENCRYPT_TYPE_AES_GCMP_256,
+ HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
+};
+
+enum hal_tcl_encap_type {
+ HAL_TCL_ENCAP_TYPE_RAW,
+ HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
+ HAL_TCL_ENCAP_TYPE_ETHERNET,
+ HAL_TCL_ENCAP_TYPE_802_3 = 3,
+};
+
+enum hal_tcl_desc_type {
+ HAL_TCL_DESC_TYPE_BUFFER,
+ HAL_TCL_DESC_TYPE_EXT_DESC,
+};
+
+enum hal_wbm_htt_tx_comp_status {
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX,
+};
+
+struct hal_tcl_data_cmd {
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 info5;
+} __packed;
+
+/* hal_tcl_data_cmd
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * tcl_cmd_type
+ * used to select the type of TCL Command decriptor
+ *
+ * desc_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * bank_id
+ * used to select one of the TCL register banks for fields removed
+ * from 'TCL_DATA_CMD' that do not change often within one virtual
+ * device or a set of virtual devices:
+ *
+ * tx_notify_frame
+ * TCL copies this value to 'TQM_ENTRANCE_RING' field FW_tx_notify_frame.
+ *
+ * hdr_length_read_sel
+ * used to select the per 'encap_type' register set for MSDU header
+ * read length
+ *
+ * buffer_timestamp
+ * buffer_timestamp_valid
+ * Frame system entrance timestamp. It shall be filled by first
+ * module (SW, TCL or TQM) that sees the frames first.
+ *
+ * cmd_num
+ * This number can be used to match against status.
+ *
+ * data_length
+ * MSDU length in case of direct descriptor. Length of link
+ * extension descriptor in case of Link extension descriptor.
+ *
+ * *_checksum_en
+ * Enable checksum replacement for ipv4, udp_over_ipv4, ipv6,
+ * udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6.
+ *
+ * to_fw
+ * Forward packet to FW along with classification result. The
+ * packet will not be forward to TQM when this bit is set.
+ * 1'b0: Use classification result to forward the packet.
+ * 1'b1: Override classification result & forward packet only to fw
+ *
+ * packet_offset
+ * Packet offset from Metadata in case of direct buffer descriptor.
+ *
+ * hlos_tid_overwrite
+ *
+ * When set, TCL shall ignore the IP DSCP and VLAN PCP
+ * fields and use HLOS_TID as the final TID. Otherwise TCL
+ * shall consider the DSCP and PCP fields as well as HLOS_TID
+ * and choose a final TID based on the configured priority
+ *
+ * flow_override_enable
+ * TCL uses this to select the flow pointer from the peer table,
+ * which can be overridden by SW for pre-encrypted raw WiFi packets
+ * that cannot be parsed for UDP or for other MLO
+ * 0 - FP_PARSE_IP: Use the flow-pointer based on parsing the IPv4
+ * or IPv6 header.
+ * 1 - FP_USE_OVERRIDE: Use the who_classify_info_sel and
+ * flow_override fields to select the flow-pointer
+ *
+ * who_classify_info_sel
+ * Field only valid when flow_override_enable is set to FP_USE_OVERRIDE.
+ * This field is used to select one of the 'WHO_CLASSIFY_INFO's in the
+ * peer table in case more than 2 flows are mapped to a single TID.
+ * 0: To choose Flow 0 and 1 of any TID use this value.
+ * 1: To choose Flow 2 and 3 of any TID use this value.
+ * 2: To choose Flow 4 and 5 of any TID use this value.
+ * 3: To choose Flow 6 and 7 of any TID use this value.
+ *
+ * If who_classify_info sel is not in sync with the num_tx_classify_info
+ * field from address search, then TCL will set 'who_classify_info_sel'
+ * to 0 use flows 0 and 1.
+ *
+ * hlos_tid
+ * HLOS MSDU priority
+ * Field is used when HLOS_TID_overwrite is set.
+ *
+ * flow_override
+ * Field only valid when flow_override_enable is set to FP_USE_OVERRIDE
+ * TCL uses this to select the flow pointer from the peer table,
+ * which can be overridden by SW for pre-encrypted raw WiFi packets
+ * that cannot be parsed for UDP or for other MLO
+ * 0 - FP_USE_NON_UDP: Use the non-UDP flow pointer (flow 0)
+ * 1 - FP_USE_UDP: Use the UDP flow pointer (flow 1)
+ *
+ * pmac_id
+ * TCL uses this PMAC_ID in address search, i.e, while
+ * finding matching entry for the packet in AST corresponding
+ * to given PMAC_ID
+ *
+ * If PMAC ID is all 1s (=> value 3), it indicates wildcard
+ * match for any PMAC
+ *
+ * vdev_id
+ * Virtual device ID to check against the address search entry to
+ * avoid security issues from transmitting packets from an incorrect
+ * virtual device
+ *
+ * search_index
+ * The index that will be used for index based address or
+ * flow search. The field is valid when 'search_type' is 1 or 2.
+ *
+ * cache_set_num
+ *
+ * Cache set number that should be used to cache the index
+ * based search results, for address and flow search. This
+ * value should be equal to LSB four bits of the hash value of
+ * match data, in case of search index points to an entry which
+ * may be used in content based search also. The value can be
+ * anything when the entry pointed by search index will not be
+ * used for content based search.
+ *
+ * index_loop_override
+ * When set, address search and packet routing is forced to use
+ * 'search_index' instead of following the register configuration
+ * seleced by Bank_id.
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ *
+ * looping_count
+ *
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TCL_DESC_LEN sizeof(struct hal_tcl_data_cmd)
+
+#define HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO GENMASK(31, 0)
+
+#define HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI GENMASK(7, 0)
+#define HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE BIT(8)
+#define HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE GENMASK(10, 9)
+#define HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE GENMASK(14, 11)
+#define HAL_TX_MSDU_EXT_INFO1_BUF_LEN GENMASK(31, 16)
+
+struct hal_tx_msdu_ext_desc {
+ __le32 rsvd0[6];
+ __le32 info0;
+ __le32 info1;
+ __le32 rsvd1[10];
+};
+
+struct hal_tcl_gse_cmd {
+ __le32 ctrl_buf_addr_lo;
+ __le32 info0;
+ __le32 meta_data[2];
+ __le32 rsvd0[2];
+ __le32 info1;
+} __packed;
+
+/* hal_tcl_gse_cmd
+ *
+ * ctrl_buf_addr_lo, ctrl_buf_addr_hi
+ * Address of a control buffer containing additional info needed
+ * for this command execution.
+ *
+ * meta_data
+ * Meta data to be returned in the status descriptor
+ */
+
+enum hal_tcl_cache_op_res {
+ HAL_TCL_CACHE_OP_RES_DONE,
+ HAL_TCL_CACHE_OP_RES_NOT_FOUND,
+ HAL_TCL_CACHE_OP_RES_TIMEOUT,
+};
+
+struct hal_tcl_status_ring {
+ __le32 info0;
+ __le32 msdu_byte_count;
+ __le32 msdu_timestamp;
+ __le32 meta_data[2];
+ __le32 info1;
+ __le32 rsvd0;
+ __le32 info2;
+} __packed;
+
+/* hal_tcl_status_ring
+ *
+ * msdu_cnt
+ * msdu_byte_count
+ * MSDU count of Entry and MSDU byte count for entry 1.
+ *
+ */
+
+#define HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_SRC_DESC_ADDR_INFO_HASH_EN BIT(8)
+#define HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP BIT(9)
+#define HAL_CE_SRC_DESC_ADDR_INFO_DEST_SWAP BIT(10)
+#define HAL_CE_SRC_DESC_ADDR_INFO_GATHER BIT(11)
+#define HAL_CE_SRC_DESC_ADDR_INFO_LEN GENMASK(31, 16)
+
+#define HAL_CE_SRC_DESC_META_INFO_DATA GENMASK(15, 0)
+
+#define HAL_CE_SRC_DESC_FLAGS_RING_ID GENMASK(27, 20)
+#define HAL_CE_SRC_DESC_FLAGS_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_src_desc {
+ __le32 buffer_addr_low;
+ __le32 buffer_addr_info; /* %HAL_CE_SRC_DESC_ADDR_INFO_ */
+ __le32 meta_info; /* %HAL_CE_SRC_DESC_META_INFO_ */
+ __le32 flags; /* %HAL_CE_SRC_DESC_FLAGS_ */
+} __packed;
+
+/* hal_ce_srng_src_desc
+ *
+ * buffer_addr_lo
+ * LSB 32 bits of the 40 Bit Pointer to the source buffer
+ *
+ * buffer_addr_hi
+ * MSB 8 bits of the 40 Bit Pointer to the source buffer
+ *
+ * toeplitz_en
+ * Enable generation of 32-bit Toeplitz-LFSR hash for
+ * data transfer. In case of gather field in first source
+ * ring entry of the gather copy cycle in taken into account.
+ *
+ * src_swap
+ * Treats source memory organization as big-endian. For
+ * each dword read (4 bytes), the byte 0 is swapped with byte 3
+ * and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * dest_swap
+ * Treats destination memory organization as big-endian.
+ * For each dword write (4 bytes), the byte 0 is swapped with
+ * byte 3 and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * gather
+ * Enables gather of multiple copy engine source
+ * descriptors to one destination.
+ *
+ * ce_res_0
+ * Reserved
+ *
+ *
+ * length
+ * Length of the buffer in units of octets of the current
+ * descriptor
+ *
+ * fw_metadata
+ * Meta data used by FW.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_1
+ * Reserved
+ *
+ * ce_res_2
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_DEST_DESC_ADDR_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DEST_DESC_ADDR_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dest_desc {
+ __le32 buffer_addr_low;
+ __le32 buffer_addr_info; /* %HAL_CE_DEST_DESC_ADDR_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dest_desc
+ *
+ * dst_buffer_low
+ * LSB 32 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * dst_buffer_high
+ * MSB 8 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * ce_res_4
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DST_STATUS_DESC_FLAGS_HASH_EN BIT(8)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_BYTE_SWAP BIT(9)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_DEST_SWAP BIT(10)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16)
+
+#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(15, 0)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dst_status_desc {
+ __le32 flags; /* %HAL_CE_DST_STATUS_DESC_FLAGS_ */
+ __le32 toeplitz_hash0;
+ __le32 toeplitz_hash1;
+ __le32 meta_info; /* HAL_CE_DST_STATUS_DESC_META_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dst_status_desc
+ *
+ * ce_res_5
+ * Reserved
+ *
+ * toeplitz_en
+ *
+ * src_swap
+ * Source memory buffer swapped
+ *
+ * dest_swap
+ * Destination memory buffer swapped
+ *
+ * gather
+ * Gather of multiple copy engine source descriptors to one
+ * destination enabled
+ *
+ * ce_res_6
+ * Reserved
+ *
+ * length
+ * Sum of all the Lengths of the source descriptor in the
+ * gather chain
+ *
+ * toeplitz_hash_0
+ * 32 LS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * toeplitz_hash_1
+ * 32 MS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * fw_metadata
+ * Meta data used by FW
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_7
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TX_RATE_STATS_INFO0_VALID BIT(0)
+#define HAL_TX_RATE_STATS_INFO0_BW GENMASK(3, 1)
+#define HAL_TX_RATE_STATS_INFO0_PKT_TYPE GENMASK(7, 4)
+#define HAL_TX_RATE_STATS_INFO0_STBC BIT(8)
+#define HAL_TX_RATE_STATS_INFO0_LDPC BIT(9)
+#define HAL_TX_RATE_STATS_INFO0_SGI GENMASK(11, 10)
+#define HAL_TX_RATE_STATS_INFO0_MCS GENMASK(15, 12)
+#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX BIT(16)
+#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU GENMASK(28, 17)
+
+enum hal_tx_rate_stats_bw {
+ HAL_TX_RATE_STATS_BW_20,
+ HAL_TX_RATE_STATS_BW_40,
+ HAL_TX_RATE_STATS_BW_80,
+ HAL_TX_RATE_STATS_BW_160,
+};
+
+enum hal_tx_rate_stats_pkt_type {
+ HAL_TX_RATE_STATS_PKT_TYPE_11A,
+ HAL_TX_RATE_STATS_PKT_TYPE_11B,
+ HAL_TX_RATE_STATS_PKT_TYPE_11N,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AC,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AX,
+ HAL_TX_RATE_STATS_PKT_TYPE_11BA,
+ HAL_TX_RATE_STATS_PKT_TYPE_11BE,
+};
+
+enum hal_tx_rate_stats_sgi {
+ HAL_TX_RATE_STATS_SGI_08US,
+ HAL_TX_RATE_STATS_SGI_04US,
+ HAL_TX_RATE_STATS_SGI_16US,
+ HAL_TX_RATE_STATS_SGI_32US,
+};
+
+struct hal_tx_rate_stats {
+ __le32 info0;
+ __le32 tsf;
+} __packed;
+
+struct hal_wbm_link_desc {
+ struct ath12k_buffer_addr buf_addr_info;
+} __packed;
+
+/* hal_wbm_link_desc
+ *
+ * Producer: WBM
+ * Consumer: WBM
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ */
+
+enum hal_wbm_rel_src_module {
+ HAL_WBM_REL_SRC_MODULE_TQM,
+ HAL_WBM_REL_SRC_MODULE_RXDMA,
+ HAL_WBM_REL_SRC_MODULE_REO,
+ HAL_WBM_REL_SRC_MODULE_FW,
+ HAL_WBM_REL_SRC_MODULE_SW,
+};
+
+enum hal_wbm_rel_desc_type {
+ HAL_WBM_REL_DESC_TYPE_REL_MSDU,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
+ HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
+};
+
+/* hal_wbm_rel_desc_type
+ *
+ * msdu_buffer
+ * The address points to an MSDU buffer
+ *
+ * msdu_link_descriptor
+ * The address points to an Tx MSDU link descriptor
+ *
+ * mpdu_link_descriptor
+ * The address points to an MPDU link descriptor
+ *
+ * msdu_ext_descriptor
+ * The address points to an MSDU extension descriptor
+ *
+ * queue_ext_descriptor
+ * The address points to an TQM queue extension descriptor. WBM should
+ * treat this is the same way as a link descriptor.
+ */
+
+enum hal_wbm_rel_bm_act {
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
+ HAL_WBM_REL_BM_ACT_REL_MSDU,
+};
+
+/* hal_wbm_rel_bm_act
+ *
+ * put_in_idle_list
+ * Put the buffer or descriptor back in the idle list. In case of MSDU or
+ * MDPU link descriptor, BM does not need to check to release any
+ * individual MSDU buffers.
+ *
+ * release_msdu_list
+ * This BM action can only be used in combination with desc_type being
+ * msdu_link_descriptor. Field first_msdu_index points out which MSDU
+ * pointer in the MSDU link descriptor is the first of an MPDU that is
+ * released. BM shall release all the MSDU buffers linked to this first
+ * MSDU buffer pointer. All related MSDU buffer pointer entries shall be
+ * set to value 0, which represents the 'NULL' pointer. When all MSDU
+ * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
+ * descriptor itself shall also be released.
+ */
+#define HAL_WBM_COMPL_RX_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_COMPL_RX_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_COMPL_RX_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_COMPL_RX_INFO0_RBM GENMASK(12, 9)
+#define HAL_WBM_COMPL_RX_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_COMPL_RX_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_COMPL_RX_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_COMPL_RX_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_COMPL_RX_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_COMPL_RX_INFO1_PHY_ADDR_HI GENMASK(7, 0)
+#define HAL_WBM_COMPL_RX_INFO1_SW_COOKIE GENMASK(27, 8)
+#define HAL_WBM_COMPL_RX_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_completion_ring_rx {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le32 info0;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ __le32 phy_addr_lo;
+ __le32 info1;
+} __packed;
+
+#define HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_COMPL_TX_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_COMPL_TX_INFO0_RBM GENMASK(12, 9)
+#define HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON GENMASK(16, 13)
+#define HAL_WBM_COMPL_TX_INFO0_RBM_OVERRIDE_VLD BIT(17)
+#define HAL_WBM_COMPL_TX_INFO0_SW_COOKIE_LO GENMASK(29, 18)
+#define HAL_WBM_COMPL_TX_INFO0_CC_DONE BIT(30)
+#define HAL_WBM_COMPL_TX_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0)
+#define HAL_WBM_COMPL_TX_INFO1_TRANSMIT_COUNT GENMASK(30, 24)
+#define HAL_WBM_COMPL_TX_INFO1_SW_REL_DETAILS_VALID BIT(31)
+
+#define HAL_WBM_COMPL_TX_INFO2_ACK_FRAME_RSSI GENMASK(7, 0)
+#define HAL_WBM_COMPL_TX_INFO2_FIRST_MSDU BIT(8)
+#define HAL_WBM_COMPL_TX_INFO2_LAST_MSDU BIT(9)
+#define HAL_WBM_COMPL_TX_INFO2_FW_TX_NOTIF_FRAME GENMASK(12, 10)
+#define HAL_WBM_COMPL_TX_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13)
+
+#define HAL_WBM_COMPL_TX_INFO3_PEER_ID GENMASK(15, 0)
+#define HAL_WBM_COMPL_TX_INFO3_TID GENMASK(19, 16)
+#define HAL_WBM_COMPL_TX_INFO3_SW_COOKIE_HI GENMASK(27, 20)
+#define HAL_WBM_COMPL_TX_INFO3_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_completion_ring_tx {
+ __le32 buf_va_lo;
+ __le32 buf_va_hi;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ struct hal_tx_rate_stats rate_stats;
+ __le32 info3;
+} __packed;
+
+#define HAL_WBM_RELEASE_TX_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_TX_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_TX_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_TX_INFO0_FIRST_MSDU_IDX GENMASK(12, 9)
+#define HAL_WBM_RELEASE_TX_INFO0_TQM_RELEASE_REASON GENMASK(18, 13)
+#define HAL_WBM_RELEASE_TX_INFO0_RBM_OVERRIDE_VLD BIT(17)
+#define HAL_WBM_RELEASE_TX_INFO0_SW_BUFFER_COOKIE_11_0 GENMASK(29, 18)
+#define HAL_WBM_RELEASE_TX_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_TX_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0)
+#define HAL_WBM_RELEASE_TX_INFO1_TRANSMIT_COUNT GENMASK(30, 24)
+#define HAL_WBM_RELEASE_TX_INFO1_SW_REL_DETAILS_VALID BIT(31)
+
+#define HAL_WBM_RELEASE_TX_INFO2_ACK_FRAME_RSSI GENMASK(7, 0)
+#define HAL_WBM_RELEASE_TX_INFO2_FIRST_MSDU BIT(8)
+#define HAL_WBM_RELEASE_TX_INFO2_LAST_MSDU BIT(9)
+#define HAL_WBM_RELEASE_TX_INFO2_FW_TX_NOTIF_FRAME GENMASK(12, 10)
+#define HAL_WBM_RELEASE_TX_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13)
+
+#define HAL_WBM_RELEASE_TX_INFO3_PEER_ID GENMASK(15, 0)
+#define HAL_WBM_RELEASE_TX_INFO3_TID GENMASK(19, 16)
+#define HAL_WBM_RELEASE_TX_INFO3_SW_BUFFER_COOKIE_19_12 GENMASK(27, 20)
+#define HAL_WBM_RELEASE_TX_INFO3_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_release_ring_tx {
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ struct hal_tx_rate_stats rate_stats;
+ __le32 info3;
+} __packed;
+
+#define HAL_WBM_RELEASE_RX_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_RX_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_RX_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_RX_INFO0_FIRST_MSDU_IDX GENMASK(12, 9)
+#define HAL_WBM_RELEASE_RX_INFO0_CC_STATUS BIT(16)
+#define HAL_WBM_RELEASE_RX_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_RELEASE_RX_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_RELEASE_RX_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_RELEASE_RX_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_RELEASE_RX_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_RX_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_WBM_RELEASE_RX_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_release_ring_rx {
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+#define HAL_WBM_RELEASE_RX_CC_INFO0_RBM GENMASK(12, 9)
+#define HAL_WBM_RELEASE_RX_CC_INFO1_COOKIE GENMASK(27, 8)
+/* Used when hw cc is success */
+struct hal_wbm_release_ring_cc_rx {
+ __le32 buf_va_lo;
+ __le32 buf_va_hi;
+ __le32 info0;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ __le32 buf_pa_lo;
+ __le32 info1;
+} __packed;
+
+#define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_RELEASE_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_INFO3_FIRST_MSDU BIT(0)
+#define HAL_WBM_RELEASE_INFO3_LAST_MSDU BIT(1)
+#define HAL_WBM_RELEASE_INFO3_CONTINUATION BIT(2)
+
+#define HAL_WBM_RELEASE_INFO5_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_release_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 info5;
+} __packed;
+
+/* hal_wbm_release_ring
+ *
+ * Producer: SW/TQM/RXDMA/REO/SWITCH
+ * Consumer: WBM/SW/FW
+ *
+ * HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * for software based completions.
+ *
+ * buf_addr_info
+ * Details of the physical address of the buffer or link descriptor.
+ *
+ * release_source_module
+ * Indicates which module initiated the release of this buffer/descriptor.
+ * Values are defined in enum %HAL_WBM_REL_SRC_MODULE_.
+ *
+ * buffer_or_desc_type
+ * Field only valid when WBM is marked as the return_buffer_manager in
+ * the Released_Buffer_address_info. Indicates that type of buffer or
+ * descriptor is being released. Values are in enum %HAL_WBM_REL_DESC_TYPE.
+ *
+ * wbm_internal_error
+ * Is set when WBM got a buffer pointer but the action was to push it to
+ * the idle link descriptor ring or do link related activity OR
+ * Is set when WBM got a link buffer pointer but the action was to push it
+ * to the buffer descriptor ring.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Buffer Manager Ring has looped
+ * around the ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+/**
+ * enum hal_wbm_tqm_rel_reason - TQM release reason code
+ * @HAL_WBM_TQM_REL_REASON_FRAME_ACKED: ACK or BACK received for the frame
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: Command remove_mpdus initiated by SW
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: Command remove transmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX: Command remove untransmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: Command remove aged msdus or
+ * mpdus.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1: Remove command initiated by
+ * fw with fw_reason1.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2: Remove command initiated by
+ * fw with fw_reason2.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
+ * fw with fw_reason3.
+ */
+enum hal_wbm_tqm_rel_reason {
+ HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+};
+
+struct hal_wbm_buffer_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+};
+
+enum hal_mon_end_reason {
+ HAL_MON_STATUS_BUFFER_FULL,
+ HAL_MON_FLUSH_DETECTED,
+ HAL_MON_END_OF_PPDU,
+ HAL_MON_PPDU_TRUNCATED,
+};
+
+#define HAL_SW_MONITOR_RING_INFO0_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_SW_MONITOR_RING_INFO0_RXDMA_ERROR_CODE GENMASK(6, 2)
+#define HAL_SW_MONITOR_RING_INFO0_MPDU_FRAGMENT_NUMBER GENMASK(10, 7)
+#define HAL_SW_MONITOR_RING_INFO0_FRAMELESS_BAR BIT(11)
+#define HAL_SW_MONITOR_RING_INFO0_STATUS_BUF_COUNT GENMASK(15, 12)
+#define HAL_SW_MONITOR_RING_INFO0_END_OF_PPDU BIT(16)
+
+#define HAL_SW_MONITOR_RING_INFO1_PHY_PPDU_ID GENMASK(15, 0)
+#define HAL_SW_MONITOR_RING_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_SW_MONITOR_RING_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_sw_monitor_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct ath12k_buffer_addr status_buff_addr_info;
+ __le32 info0; /* %HAL_SW_MONITOR_RING_INFO0 */
+ __le32 info1; /* %HAL_SW_MONITOR_RING_INFO1 */
+} __packed;
+
+/* hal_sw_monitor_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * Details related to the MPDU being pushed to SW, valid
+ * only if end_of_ppdu is set to 0.
+ *
+ * status_buff_addr_info
+ * Details of the physical address of the first status
+ * buffer used for the PPDU (either the PPDU that included the
+ * MPDU being pushed to SW if end_of_ppdu = 0, or the PPDU
+ * whose end is indicated through end_of_ppdu = 1)
+ *
+ * rxdma_push_reason
+ * Indicates why RXDMA pushed the frame to this ring
+ *
+ * <enum 0 rxdma_error_detected> RXDMA detected an error an
+ * pushed this frame to this queue
+ *
+ * <enum 1 rxdma_routing_instruction> RXDMA pushed the
+ * frame to this queue per received routing instructions. No
+ * error within RXDMA was detected
+ *
+ * <enum 2 rxdma_rx_flush> RXDMA received an RX_FLUSH. As a
+ * result the MSDU link descriptor might not have the
+ * last_msdu_in_mpdu_flag set, but instead WBM might just see a
+ * NULL pointer in the MSDU link descriptor. This is to be
+ * considered a normal condition for this scenario.
+ *
+ * rxdma_error_code
+ * Field only valid when rxdma_push_reason is set to
+ * 'rxdma_error_detected.'
+ *
+ * <enum 0 rxdma_overflow_err>MPDU frame is not complete
+ * due to a FIFO overflow error in RXPCU.
+ *
+ * <enum 1 rxdma_mpdu_length_err>MPDU frame is not complete
+ * due to receiving incomplete MPDU from the PHY
+ *
+ * <enum 3 rxdma_decrypt_err>CRYPTO reported a decryption
+ * error or CRYPTO received an encrypted frame, but did not get
+ * a valid corresponding key id in the peer entry.
+ *
+ * <enum 4 rxdma_tkip_mic_err>CRYPTO reported a TKIP MIC
+ * error
+ *
+ * <enum 5 rxdma_unecrypted_err>CRYPTO reported an
+ * unencrypted frame error when encrypted was expected
+ *
+ * <enum 6 rxdma_msdu_len_err>RX OLE reported an MSDU
+ * length error
+ *
+ * <enum 7 rxdma_msdu_limit_err>RX OLE reported that max
+ * number of MSDUs allowed in an MPDU got exceeded
+ *
+ * <enum 8 rxdma_wifi_parse_err>RX OLE reported a parsing
+ * error
+ *
+ * <enum 9 rxdma_amsdu_parse_err>RX OLE reported an A-MSDU
+ * parsing error
+ *
+ * <enum 10 rxdma_sa_timeout_err>RX OLE reported a timeout
+ * during SA search
+ *
+ * <enum 11 rxdma_da_timeout_err>RX OLE reported a timeout
+ * during DA search
+ *
+ * <enum 12 rxdma_flow_timeout_err>RX OLE reported a
+ * timeout during flow search
+ *
+ * <enum 13 rxdma_flush_request>RXDMA received a flush
+ * request
+ *
+ * <enum 14 rxdma_amsdu_fragment_err>Rx PCU reported A-MSDU
+ * present as well as a fragmented MPDU.
+ *
+ * mpdu_fragment_number
+ * Field only valid when Reo_level_mpdu_frame_info.
+ * Rx_mpdu_desc_info_details.Fragment_flag is set and
+ * end_of_ppdu is set to 0.
+ *
+ * The fragment number from the 802.11 header.
+ *
+ * Note that the sequence number is embedded in the field:
+ * Reo_level_mpdu_frame_info. Rx_mpdu_desc_info_details.
+ * Mpdu_sequence_number
+ *
+ * frameless_bar
+ * When set, this SW monitor ring struct contains BAR info
+ * from a multi TID BAR frame. The original multi TID BAR frame
+ * itself contained all the REO info for the first TID, but all
+ * the subsequent TID info and their linkage to the REO
+ * descriptors is passed down as 'frameless' BAR info.
+ *
+ * The only fields valid in this descriptor when this bit
+ * is within the
+ *
+ * Reo_level_mpdu_frame_info:
+ * Within Rx_mpdu_desc_info_details:
+ * Mpdu_Sequence_number
+ * BAR_frame
+ * Peer_meta_data
+ * All other fields shall be set to 0.
+ *
+ * status_buf_count
+ * A count of status buffers used so far for the PPDU
+ * (either the PPDU that included the MPDU being pushed to SW
+ * if end_of_ppdu = 0, or the PPDU whose end is indicated
+ * through end_of_ppdu = 1)
+ *
+ * end_of_ppdu
+ * Some hw RXDMA can be configured to generate a separate
+ * 'SW_MONITOR_RING' descriptor at the end of a PPDU (either
+ * through an 'RX_PPDU_END' TLV or through an 'RX_FLUSH') to
+ * demarcate PPDUs.
+ *
+ * For such a descriptor, this bit is set to 1 and fields
+ * Reo_level_mpdu_frame_info, mpdu_fragment_number and
+ * Frameless_bar are all set to 0.
+ *
+ * Otherwise this bit is set to 0.
+ *
+ * phy_ppdu_id
+ * A PPDU counter value that PHY increments for every PPDU
+ * received
+ *
+ * The counter value wraps around. Some hw RXDMA can be
+ * configured to copy this from the RX_PPDU_START TLV for every
+ * output descriptor.
+ *
+ * ring_id
+ * For debugging.
+ * This field is filled in by the SRNG module.
+ * It help to identify the ring that is being looked
+ *
+ * looping_count
+ * For debugging.
+ * This field is filled in by the SRNG module.
+ *
+ * A count value that indicates the number of times the
+ * producer of entries into this Ring has looped around the
+ * ring.
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ */
+
+enum hal_desc_owner {
+ HAL_DESC_OWNER_WBM,
+ HAL_DESC_OWNER_SW,
+ HAL_DESC_OWNER_TQM,
+ HAL_DESC_OWNER_RXDMA,
+ HAL_DESC_OWNER_REO,
+ HAL_DESC_OWNER_SWITCH,
+};
+
+enum hal_desc_buf_type {
+ HAL_DESC_BUF_TYPE_TX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_HEAD,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_TX_FLOW,
+ HAL_DESC_BUF_TYPE_TX_BUFFER,
+ HAL_DESC_BUF_TYPE_RX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_RX_BUFFER,
+ HAL_DESC_BUF_TYPE_IDLE_LINK,
+};
+
+#define HAL_DESC_REO_OWNED 4
+#define HAL_DESC_REO_QUEUE_DESC 8
+#define HAL_DESC_REO_QUEUE_EXT_DESC 9
+#define HAL_DESC_REO_NON_QOS_TID 16
+
+#define HAL_DESC_HDR_INFO0_OWNER GENMASK(3, 0)
+#define HAL_DESC_HDR_INFO0_BUF_TYPE GENMASK(7, 4)
+#define HAL_DESC_HDR_INFO0_DBG_RESERVED GENMASK(31, 8)
+
+struct hal_desc_header {
+ __le32 info0;
+} __packed;
+
+struct hal_rx_mpdu_link_ptr {
+ struct ath12k_buffer_addr addr_info;
+} __packed;
+
+struct hal_rx_msdu_details {
+ struct ath12k_buffer_addr buf_addr_info;
+ struct rx_msdu_desc rx_msdu_info;
+ struct rx_msdu_ext_desc rx_msdu_ext_info;
+} __packed;
+
+#define HAL_RX_MSDU_LNK_INFO0_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_RX_MSDU_LNK_INFO0_FIRST_MSDU_LNK BIT(16)
+
+struct hal_rx_msdu_link {
+ struct hal_desc_header desc_hdr;
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ __le32 pn[4];
+ struct hal_rx_msdu_details msdu_link[6];
+} __packed;
+
+struct hal_rx_reo_queue_ext {
+ struct hal_desc_header desc_hdr;
+ __le32 rsvd;
+ struct hal_rx_mpdu_link_ptr mpdu_link[15];
+} __packed;
+
+/* hal_rx_reo_queue_ext
+ * Consumer: REO
+ * Producer: REO
+ *
+ * descriptor_header
+ * Details about which module owns this struct.
+ *
+ * mpdu_link
+ * Pointer to the next MPDU_link descriptor in the MPDU queue.
+ */
+
+enum hal_rx_reo_queue_pn_size {
+ HAL_RX_REO_QUEUE_PN_SIZE_24,
+ HAL_RX_REO_QUEUE_PN_SIZE_48,
+ HAL_RX_REO_QUEUE_PN_SIZE_128,
+};
+
+#define HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER GENMASK(15, 0)
+
+#define HAL_RX_REO_QUEUE_INFO0_VLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER GENMASK(2, 1)
+#define HAL_RX_REO_QUEUE_INFO0_DIS_DUP_DETECTION BIT(3)
+#define HAL_RX_REO_QUEUE_INFO0_SOFT_REORDER_EN BIT(4)
+#define HAL_RX_REO_QUEUE_INFO0_AC GENMASK(6, 5)
+#define HAL_RX_REO_QUEUE_INFO0_BAR BIT(7)
+#define HAL_RX_REO_QUEUE_INFO0_RETRY BIT(8)
+#define HAL_RX_REO_QUEUE_INFO0_CHECK_2K_MODE BIT(9)
+#define HAL_RX_REO_QUEUE_INFO0_OOR_MODE BIT(10)
+#define HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE GENMASK(20, 11)
+#define HAL_RX_REO_QUEUE_INFO0_PN_CHECK BIT(21)
+#define HAL_RX_REO_QUEUE_INFO0_EVEN_PN BIT(22)
+#define HAL_RX_REO_QUEUE_INFO0_UNEVEN_PN BIT(23)
+#define HAL_RX_REO_QUEUE_INFO0_PN_HANDLE_ENABLE BIT(24)
+#define HAL_RX_REO_QUEUE_INFO0_PN_SIZE GENMASK(26, 25)
+#define HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG BIT(27)
+
+#define HAL_RX_REO_QUEUE_INFO1_SVLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO1_SSN GENMASK(12, 1)
+#define HAL_RX_REO_QUEUE_INFO1_CURRENT_IDX GENMASK(22, 13)
+#define HAL_RX_REO_QUEUE_INFO1_SEQ_2K_ERR BIT(23)
+#define HAL_RX_REO_QUEUE_INFO1_PN_ERR BIT(24)
+#define HAL_RX_REO_QUEUE_INFO1_PN_VALID BIT(31)
+
+#define HAL_RX_REO_QUEUE_INFO2_MPDU_COUNT GENMASK(6, 0)
+#define HAL_RX_REO_QUEUE_INFO2_MSDU_COUNT (31, 7)
+
+#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10)
+#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0)
+#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24)
+
+#define HAL_RX_REO_QUEUE_INFO5_LATE_RX_MPDU_COUNT GENMASK(11, 0)
+#define HAL_RX_REO_QUEUE_INFO5_WINDOW_JUMP_2K GENMASK(15, 12)
+#define HAL_RX_REO_QUEUE_INFO5_HOLE_COUNT GENMASK(31, 16)
+
+struct hal_rx_reo_queue {
+ struct hal_desc_header desc_hdr;
+ __le32 rx_queue_num;
+ __le32 info0;
+ __le32 info1;
+ __le32 pn[4];
+ __le32 last_rx_enqueue_timestamp;
+ __le32 last_rx_dequeue_timestamp;
+ __le32 next_aging_queue[2];
+ __le32 prev_aging_queue[2];
+ __le32 rx_bitmap[9];
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 processed_mpdus;
+ __le32 processed_msdus;
+ __le32 processed_total_bytes;
+ __le32 info5;
+ __le32 rsvd[2];
+ struct hal_rx_reo_queue_ext ext_desc[];
+} __packed;
+
+/* hal_rx_reo_queue
+ *
+ * descriptor_header
+ * Details about which module owns this struct. Note that sub field
+ * Buffer_type shall be set to receive_reo_queue_descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link descriptor belongs.
+ *
+ * vld
+ * Valid bit indicating a session is established and the queue descriptor
+ * is valid.
+ * associated_link_descriptor_counter
+ * Indicates which of the 3 link descriptor counters shall be incremented
+ * or decremented when link descriptors are added or removed from this
+ * flow queue.
+ * disable_duplicate_detection
+ * When set, do not perform any duplicate detection.
+ * soft_reorder_enable
+ * When set, REO has been instructed to not perform the actual re-ordering
+ * of frames for this queue, but just to insert the reorder opcodes.
+ * ac
+ * Indicates the access category of the queue descriptor.
+ * bar
+ * Indicates if BAR has been received.
+ * retry
+ * Retry bit is checked if this bit is set.
+ * chk_2k_mode
+ * Indicates what type of operation is expected from Reo when the received
+ * frame SN falls within the 2K window.
+ * oor_mode
+ * Indicates what type of operation is expected when the received frame
+ * falls within the OOR window.
+ * ba_window_size
+ * Indicates the negotiated (window size + 1). Max of 256 bits.
+ *
+ * A value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-BA
+ * session, with window size of 0). The 3 values here are the main values
+ * validated, but other values should work as well.
+ *
+ * A BA window size of 0 (=> one frame entry bitmat), means that there is
+ * no additional rx_reo_queue_ext desc. following rx_reo_queue in memory.
+ * A BA window size of 1 - 105, means that there is 1 rx_reo_queue_ext.
+ * A BA window size of 106 - 210, means that there are 2 rx_reo_queue_ext.
+ * A BA window size of 211 - 256, means that there are 3 rx_reo_queue_ext.
+ * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable,
+ * pn_size
+ * REO shall perform the PN increment check, even number check, uneven
+ * number check, PN error check and size of the PN field check.
+ * ignore_ampdu_flag
+ * REO shall ignore the ampdu_flag on entrance descriptor for this queue.
+ *
+ * svld
+ * Sequence number in next field is valid one.
+ * ssn
+ * Starting Sequence number of the session.
+ * current_index
+ * Points to last forwarded packet
+ * seq_2k_error_detected_flag
+ * REO has detected a 2k error jump in the sequence number and from that
+ * moment forward, all new frames are forwarded directly to FW, without
+ * duplicate detect, reordering, etc.
+ * pn_error_detected_flag
+ * REO has detected a PN error.
+ */
+
+#define HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD BIT(9)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC BIT(13)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR BIT(14)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY BIT(15)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE BIT(17)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN BIT(21)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_ERR BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN BIT(30)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_VLD BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER GENMASK(18, 17)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_AC GENMASK(22, 21)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_BAR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RETRY BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+
+struct hal_reo_update_rx_queue {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 queue_addr_lo;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 pn[4];
+} __packed;
+
+#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
+
+struct hal_reo_unblock_cache {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 info0;
+ __le32 rsvd[7];
+} __packed;
+
+enum hal_reo_exec_status {
+ HAL_REO_EXEC_STATUS_SUCCESS,
+ HAL_REO_EXEC_STATUS_BLOCKED,
+ HAL_REO_EXEC_STATUS_FAILED,
+ HAL_REO_EXEC_STATUS_RESOURCE_BLOCKED,
+};
+
+#define HAL_REO_STATUS_HDR_INFO0_STATUS_NUM GENMASK(15, 0)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME GENMASK(25, 16)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS GENMASK(27, 26)
+
+struct hal_reo_status_hdr {
+ __le32 info0;
+ __le32 timestamp;
+} __packed;
+
+/* hal_reo_status_hdr
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_num
+ * The value in this field is equal to value of the reo command
+ * number. This field helps to correlate the statuses with the REO
+ * commands.
+ *
+ * execution_time (in us)
+ * The amount of time REO took to excecute the command. Note that
+ * this time does not include the duration of the command waiting
+ * in the command ring, before the execution started.
+ *
+ * execution_status
+ * Execution status of the command. Values are defined in
+ * enum %HAL_REO_EXEC_STATUS_.
+ */
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX GENMASK(21, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT GENMASK(6, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT GENMASK(31, 7)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_WINDOW_JMP2K GENMASK(3, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT GENMASK(15, 10)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT GENMASK(23, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT GENMASK(31, 24)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT GENMASK(27, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT GENMASK(31, 28)
+
+struct hal_reo_get_queue_stats_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 pn[4];
+ __le32 last_rx_enqueue_timestamp;
+ __le32 last_rx_dequeue_timestamp;
+ __le32 rx_bitmap[9];
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 num_mpdu_frames;
+ __le32 num_msdu_frames;
+ __le32 total_bytes;
+ __le32 info4;
+ __le32 info5;
+} __packed;
+
+/* hal_reo_get_queue_stats_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * ssn
+ * Starting Sequence number of the session, this changes whenever
+ * window moves (can be filled by SW then maintained by REO).
+ *
+ * current_index
+ * Points to last forwarded packet.
+ *
+ * pn
+ * Bits of the PN number.
+ *
+ * last_rx_enqueue_timestamp
+ * last_rx_dequeue_timestamp
+ * Timestamp of arrival of the last MPDU for this queue and
+ * Timestamp of forwarding an MPDU accordingly.
+ *
+ * rx_bitmap
+ * When a bit is set, the corresponding frame is currently held
+ * in the re-order queue. The bitmap is Fully managed by HW.
+ *
+ * current_mpdu_count
+ * current_msdu_count
+ * The number of MPDUs and MSDUs in the queue.
+ *
+ * timeout_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Forwarding reason is timeout.
+ *
+ * forward_due_to_bar_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Fwd reason is reception of BAR.
+ *
+ * duplicate_count
+ * The number of duplicate frames that have been detected.
+ *
+ * frames_in_order_count
+ * The number of frames that have been received in order (without
+ * a hole that prevented them from being forwarded immediately).
+ *
+ * bar_received_count
+ * The number of times a BAR frame is received.
+ *
+ * mpdu_frames_processed_count
+ * msdu_frames_processed_count
+ * The total number of MPDU/MSDU frames that have been processed.
+ *
+ * total_bytes
+ * An approximation of the number of bytes received for this queue.
+ *
+ * late_receive_mpdu_count
+ * The number of MPDUs received after the window had already moved
+ * on. The 'late' sequence window is defined as
+ * (Window SSN - 256) - (Window SSN - 1).
+ *
+ * window_jump_2k
+ * The number of times the window moved more than 2K
+ *
+ * hole_count
+ * The number of times a hole was created in the receive bitmap.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28)
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_RSVD GENMASK(31, 1)
+#define HAL_REO_FLUSH_QUEUE_INFO1_RSVD GENMASK(27, 0)
+
+struct hal_reo_flush_queue_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 rsvd0[21];
+ __le32 info1;
+} __packed;
+
+/* hal_reo_flush_queue_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status of blocking resource
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - Error detected. The resource to be used for blocking was
+ * already in use.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE GENMASK(2, 1)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT BIT(8)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE GENMASK(11, 9)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID GENMASK(15, 12)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR GENMASK(17, 16)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT GENMASK(25, 18)
+
+struct hal_reo_flush_cache_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 rsvd0[21];
+ __le32 info1;
+} __packed;
+
+/* hal_reo_flush_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status for blocking resource handling
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - An error in the blocking resource management was detected
+ *
+ * block_error_details
+ * only valid when error_detected is set
+ *
+ * 0 - No blocking related errors found
+ * 1 - Blocking resource is already in use
+ * 2 - Resource requested to be unblocked, was not blocked
+ *
+ * cache_controller_flush_status_hit
+ * The status that the cache controller returned on executing the
+ * flush command.
+ *
+ * 0 - miss; 1 - hit
+ *
+ * cache_controller_flush_status_desc_type
+ * Flush descriptor type
+ *
+ * cache_controller_flush_status_client_id
+ * Module who made the flush request
+ *
+ * In REO, this is always 0
+ *
+ * cache_controller_flush_status_error
+ * Error condition
+ *
+ * 0 - No error found
+ * 1 - HW interface is still busy
+ * 2 - Line currently locked. Used for one line flush command
+ * 3 - At least one line is still locked.
+ * Used for cache flush command.
+ *
+ * cache_controller_flush_count
+ * The number of lines that were actually flushed out
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE BIT(1)
+
+struct hal_reo_unblock_cache_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 rsvd0[21];
+ __le32 info1;
+} __packed;
+
+/* hal_reo_unblock_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - The blocking resource was not in use, and therefore it could
+ * not be unblocked.
+ *
+ * unblock_type
+ * Reference to the type of unblock command
+ * 0 - Unblock a blocking resource
+ * 1 - The entire cache usage is unblock
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY BIT(1)
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT GENMASK(15, 0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT GENMASK(31, 16)
+
+struct hal_reo_flush_timeout_list_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 info1;
+ __le32 rsvd0[20];
+ __le32 info2;
+} __packed;
+
+/* hal_reo_flush_timeout_list_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - Command not properly executed and returned with error
+ *
+ * timeout_list_empty
+ * When set, REO has depleted the timeout list and all entries are
+ * gone.
+ *
+ * release_desc_count
+ * Producer: SW; Consumer: REO
+ * The number of link descriptor released
+ *
+ * forward_buf_count
+ * Producer: SW; Consumer: REO
+ * The number of buffers forwarded to the REO destination rings
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX GENMASK(1, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(25, 0)
+
+struct hal_reo_desc_thresh_reached_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 rsvd0[17];
+ __le32 info5;
+} __packed;
+
+/* hal_reo_desc_thresh_reached_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * threshold_index
+ * The index of the threshold register whose value got reached
+ *
+ * link_descriptor_counter0
+ * link_descriptor_counter1
+ * link_descriptor_counter2
+ * link_descriptor_counter_sum
+ * Value of the respective counters at generation of this message
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_DATA_LENGTH GENMASK(13, 0)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_L4_CSUM_STATUS BIT(14)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_L3_CSUM_STATUS BIT(15)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_PID GENMASK(27, 24)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_QDISC BIT(28)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_MULTICAST BIT(29)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_MORE BIT(30)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_VALID_TOGGLE BIT(31)
+
+struct hal_tcl_entrance_from_ppe_ring {
+ __le32 buffer_addr;
+ __le32 info0;
+} __packed;
+
+struct hal_mon_buf_ring {
+ __le32 paddr_lo;
+ __le32 paddr_hi;
+ __le64 cookie;
+};
+
+/* hal_mon_buf_ring
+ * Producer : SW
+ * Consumer : Monitor
+ *
+ * paddr_lo
+ * Lower 32-bit physical address of the buffer pointer from the source ring.
+ * paddr_hi
+ * bit range 7-0 : upper 8 bit of the physical address.
+ * bit range 31-8 : reserved.
+ * cookie
+ * Consumer: RxMon/TxMon 64 bit cookie of the buffers.
+ */
+
+#define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0)
+
+#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0)
+#define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16)
+#define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17)
+#define HAL_MON_DEST_INFO0_INITIATOR BIT(18)
+#define HAL_MON_DEST_INFO0_EMPTY_DESC BIT(19)
+#define HAL_MON_DEST_INFO0_RING_ID GENMASK(27, 20)
+#define HAL_MON_DEST_INFO0_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_mon_dest_desc {
+ __le32 cookie;
+ __le32 reserved;
+ __le32 ppdu_id;
+ __le32 info0;
+};
+
+/* hal_mon_dest_ring
+ * Producer : TxMon/RxMon
+ * Consumer : SW
+ * cookie
+ * bit 0 -17 buf_id to track the skb's vaddr.
+ * ppdu_id
+ * Phy ppdu_id
+ * end_offset
+ * The offset into status buffer where DMA ended, ie., offset to the last
+ * TLV + last TLV size.
+ * flush_detected
+ * Indicates whether 'tx_flush' or 'rx_flush' occurred.
+ * end_of_ppdu
+ * Indicates end of ppdu.
+ * pmac_id
+ * Indicates PMAC that received from frame.
+ * empty_descriptor
+ * This descriptor is written on flush or end of ppdu or end of status
+ * buffer.
+ * ring_id
+ * updated by SRNG.
+ * looping_count
+ * updated by SRNG.
+ */
+
+#endif /* ATH12K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
new file mode 100644
index 000000000000..ee61a6462fdc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "debug.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "hal_desc.h"
+#include "hif.h"
+
+static void ath12k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
+ u8 owner, u8 buffer_type, u32 magic)
+{
+ hdr->info0 = le32_encode_bits(owner, HAL_DESC_HDR_INFO0_OWNER) |
+ le32_encode_bits(buffer_type, HAL_DESC_HDR_INFO0_BUF_TYPE);
+
+ /* Magic pattern in reserved bits for debugging */
+ hdr->info0 |= le32_encode_bits(magic, HAL_DESC_HDR_INFO0_DBG_RESERVED);
+}
+
+static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
+ struct ath12k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_get_queue_stats *desc;
+
+ tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
+ u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ memset_startat(desc, 0, queue_addr_lo);
+
+ desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+
+ desc->queue_addr_lo = cpu_to_le32(cmd->addr_lo);
+ desc->info0 = le32_encode_bits(cmd->addr_hi,
+ HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI);
+ if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
+ desc->info0 |= cpu_to_le32(HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS);
+
+ return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
+}
+
+static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
+ struct hal_tlv_64_hdr *tlv,
+ struct ath12k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_flush_cache *desc;
+ u8 avail_slot = ffz(hal->avail_blk_resource);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
+ return -ENOSPC;
+
+ hal->current_blk_index = avail_slot;
+ }
+
+ tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
+ u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+
+ desc = (struct hal_reo_flush_cache *)tlv->value;
+ memset_startat(desc, 0, cache_addr_lo);
+
+ desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+
+ desc->cache_addr_lo = cpu_to_le32(cmd->addr_lo);
+ desc->info0 = le32_encode_bits(cmd->addr_hi,
+ HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE);
+ desc->info0 |=
+ le32_encode_bits(avail_slot,
+ HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX);
+ }
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL);
+
+ return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
+}
+
+static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
+ struct ath12k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_update_rx_queue *desc;
+
+ tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
+ u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+
+ desc = (struct hal_reo_update_rx_queue *)tlv->value;
+ memset_startat(desc, 0, queue_addr_lo);
+
+ desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+
+ desc->queue_addr_lo = cpu_to_le32(cmd->addr_lo);
+ desc->info0 =
+ le32_encode_bits(cmd->addr_hi,
+ HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_AC),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN);
+
+ desc->info1 =
+ le32_encode_bits(cmd->rx_queue_num,
+ HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD),
+ HAL_REO_UPD_RX_QUEUE_INFO1_VLD) |
+ le32_encode_bits(u32_get_bits(cmd->upd1, HAL_REO_CMD_UPD1_ALDC),
+ HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION),
+ HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN),
+ HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN) |
+ le32_encode_bits(u32_get_bits(cmd->upd1, HAL_REO_CMD_UPD1_AC),
+ HAL_REO_UPD_RX_QUEUE_INFO1_AC) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR),
+ HAL_REO_UPD_RX_QUEUE_INFO1_BAR) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE),
+ HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY),
+ HAL_REO_UPD_RX_QUEUE_INFO1_RETRY) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE),
+ HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK),
+ HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE),
+ HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG),
+ HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG);
+
+ if (cmd->pn_size == 24)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
+ else if (cmd->pn_size == 48)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
+ else if (cmd->pn_size == 128)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
+
+ if (cmd->ba_window_size < 1)
+ cmd->ba_window_size = 1;
+
+ if (cmd->ba_window_size == 1)
+ cmd->ba_window_size++;
+
+ desc->info2 =
+ le32_encode_bits(cmd->ba_window_size - 1,
+ HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE) |
+ le32_encode_bits(cmd->pn_size, HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE) |
+ le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD),
+ HAL_REO_UPD_RX_QUEUE_INFO2_SVLD) |
+ le32_encode_bits(u32_get_bits(cmd->upd2, HAL_REO_CMD_UPD2_SSN),
+ HAL_REO_UPD_RX_QUEUE_INFO2_SSN) |
+ le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR),
+ HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR) |
+ le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR),
+ HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR);
+
+ return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
+}
+
+int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd)
+{
+ struct hal_tlv_64_hdr *reo_desc;
+ int ret;
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+ reo_desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_desc) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ switch (type) {
+ case HAL_REO_CMD_GET_QUEUE_STATS:
+ ret = ath12k_hal_reo_cmd_queue_stats(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_CACHE:
+ ret = ath12k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_UPDATE_RX_QUEUE:
+ ret = ath12k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_QUEUE:
+ case HAL_REO_CMD_UNBLOCK_CACHE:
+ case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
+ ath12k_warn(ab, "Unsupported reo command %d\n", type);
+ ret = -ENOTSUPP;
+ break;
+ default:
+ ath12k_warn(ab, "Unknown reo command %d\n", type);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
+ dma_addr_t paddr, u32 cookie, u8 manager)
+{
+ u32 paddr_lo, paddr_hi;
+
+ paddr_lo = lower_32_bits(paddr);
+ paddr_hi = upper_32_bits(paddr);
+ binfo->info0 = le32_encode_bits(paddr_lo, BUFFER_ADDR_INFO0_ADDR);
+ binfo->info1 = le32_encode_bits(paddr_hi, BUFFER_ADDR_INFO1_ADDR) |
+ le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE) |
+ le32_encode_bits(manager, BUFFER_ADDR_INFO1_RET_BUF_MGR);
+}
+
+void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
+ dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm)
+{
+ *paddr = (((u64)le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_ADDR)) << 32) |
+ le32_get_bits(binfo->info0, BUFFER_ADDR_INFO0_ADDR);
+ *cookie = le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
+ *rbm = le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_RET_BUF_MGR);
+}
+
+void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm)
+{
+ struct hal_rx_msdu_details *msdu;
+ u32 val;
+ int i;
+
+ *num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ msdu = &link->msdu_link[0];
+ *rbm = le32_get_bits(msdu->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_RET_BUF_MGR);
+
+ for (i = 0; i < *num_msdus; i++) {
+ msdu = &link->msdu_link[i];
+
+ val = le32_get_bits(msdu->buf_addr_info.info0,
+ BUFFER_ADDR_INFO0_ADDR);
+ if (val == 0) {
+ *num_msdus = i;
+ break;
+ }
+ *msdu_cookies = le32_get_bits(msdu->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+ msdu_cookies++;
+ }
+}
+
+int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
+ struct hal_reo_dest_ring *desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ enum hal_reo_dest_ring_push_reason push_reason;
+ enum hal_reo_dest_ring_error_code err_code;
+ u32 cookie, val;
+
+ push_reason = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_PUSH_REASON);
+ err_code = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_ERROR_CODE);
+ ab->soc_stats.reo_error[err_code]++;
+
+ if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
+ push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ ath12k_warn(ab, "expected error push reason code, received %d\n",
+ push_reason);
+ return -EINVAL;
+ }
+
+ val = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE);
+ if (val != HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
+ ath12k_warn(ab, "expected buffer type link_desc");
+ return -EINVAL;
+ }
+
+ ath12k_hal_rx_reo_ent_paddr_get(ab, &desc->buf_addr_info, paddr, &cookie);
+ *desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
+
+ return 0;
+}
+
+int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info)
+{
+ struct hal_wbm_release_ring *wbm_desc = desc;
+ struct hal_wbm_release_ring_cc_rx *wbm_cc_desc = desc;
+ enum hal_wbm_rel_desc_type type;
+ enum hal_wbm_rel_src_module rel_src;
+ bool hw_cc_done;
+ u64 desc_va;
+ u32 val;
+
+ type = le32_get_bits(wbm_desc->info0, HAL_WBM_RELEASE_INFO0_DESC_TYPE);
+ /* We expect only WBM_REL buffer type */
+ if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rel_src = le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
+ if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
+ rel_src != HAL_WBM_REL_SRC_MODULE_REO)
+ return -EINVAL;
+
+ /* The format of wbm rel ring desc changes based on the
+ * hw cookie conversion status
+ */
+ hw_cc_done = le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_RX_INFO0_CC_STATUS);
+
+ if (!hw_cc_done) {
+ val = le32_get_bits(wbm_desc->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_RET_BUF_MGR);
+ if (val != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ return -EINVAL;
+ }
+
+ rel_info->cookie = le32_get_bits(wbm_desc->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ rel_info->rx_desc = NULL;
+ } else {
+ val = le32_get_bits(wbm_cc_desc->info0,
+ HAL_WBM_RELEASE_RX_CC_INFO0_RBM);
+ if (val != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ return -EINVAL;
+ }
+
+ rel_info->cookie = le32_get_bits(wbm_cc_desc->info1,
+ HAL_WBM_RELEASE_RX_CC_INFO1_COOKIE);
+
+ desc_va = ((u64)le32_to_cpu(wbm_cc_desc->buf_va_hi) << 32 |
+ le32_to_cpu(wbm_cc_desc->buf_va_lo));
+ rel_info->rx_desc =
+ (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+ }
+
+ rel_info->err_rel_src = rel_src;
+ rel_info->hw_cc_done = hw_cc_done;
+
+ rel_info->first_msdu = le32_get_bits(wbm_desc->info3,
+ HAL_WBM_RELEASE_INFO3_FIRST_MSDU);
+ rel_info->last_msdu = le32_get_bits(wbm_desc->info3,
+ HAL_WBM_RELEASE_INFO3_LAST_MSDU);
+ rel_info->continuation = le32_get_bits(wbm_desc->info3,
+ HAL_WBM_RELEASE_INFO3_CONTINUATION);
+
+ if (rel_info->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
+ rel_info->push_reason =
+ le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON);
+ rel_info->err_code =
+ le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE);
+ } else {
+ rel_info->push_reason =
+ le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON);
+ rel_info->err_code =
+ le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE);
+ }
+
+ return 0;
+}
+
+void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
+ struct ath12k_buffer_addr *buff_addr,
+ dma_addr_t *paddr, u32 *cookie)
+{
+ *paddr = ((u64)(le32_get_bits(buff_addr->info1,
+ BUFFER_ADDR_INFO1_ADDR)) << 32) |
+ le32_get_bits(buff_addr->info0, BUFFER_ADDR_INFO0_ADDR);
+
+ *cookie = le32_get_bits(buff_addr->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
+}
+
+void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
+ struct hal_wbm_release_ring *dst_desc,
+ struct hal_wbm_release_ring *src_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ dst_desc->buf_addr_info = src_desc->buf_addr_info;
+ dst_desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) |
+ le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) |
+ le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_RELEASE_INFO0_DESC_TYPE);
+}
+
+void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_get_queue_stats_status *desc =
+ (struct hal_reo_get_queue_stats_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "Queue stats status:\n");
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "header: cmd_num %d status %d\n",
+ status->uniform_hdr.cmd_num,
+ status->uniform_hdr.cmd_status);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "ssn %u cur_idx %u\n",
+ le32_get_bits(desc->info0,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN),
+ le32_get_bits(desc->info0,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
+ desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ desc->last_rx_enqueue_timestamp,
+ desc->last_rx_dequeue_timestamp);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
+ desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
+ desc->rx_bitmap[6], desc->rx_bitmap[7]);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "count: cur_mpdu %u cur_msdu %u\n",
+ le32_get_bits(desc->info1,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT),
+ le32_get_bits(desc->info1,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "fwd_timeout %u fwd_bar %u dup_count %u\n",
+ le32_get_bits(desc->info2,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT),
+ le32_get_bits(desc->info2,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT),
+ le32_get_bits(desc->info2,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "frames_in_order %u bar_rcvd %u\n",
+ le32_get_bits(desc->info3,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT),
+ le32_get_bits(desc->info3,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
+ desc->num_mpdu_frames, desc->num_msdu_frames,
+ desc->total_bytes);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "late_rcvd %u win_jump_2k %u hole_cnt %u\n",
+ le32_get_bits(desc->info4,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU),
+ le32_get_bits(desc->info2,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_WINDOW_JMP2K),
+ le32_get_bits(desc->info4,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "looping count %u\n",
+ le32_get_bits(desc->info5,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT));
+}
+
+void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_flush_queue_status *desc =
+ (struct hal_reo_flush_queue_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+ status->u.flush_queue.err_detected =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED);
+}
+
+void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_reo_flush_cache_status *desc =
+ (struct hal_reo_flush_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ status->u.flush_cache.err_detected =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR);
+ status->u.flush_cache.err_code =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE);
+ if (!status->u.flush_cache.err_code)
+ hal->avail_blk_resource |= BIT(hal->current_blk_index);
+
+ status->u.flush_cache.cache_controller_flush_status_hit =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT);
+
+ status->u.flush_cache.cache_controller_flush_status_desc_type =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE);
+ status->u.flush_cache.cache_controller_flush_status_client_id =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID);
+ status->u.flush_cache.cache_controller_flush_status_err =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR);
+ status->u.flush_cache.cache_controller_flush_status_cnt =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT);
+}
+
+void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_reo_unblock_cache_status *desc =
+ (struct hal_reo_unblock_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ status->u.unblock_cache.err_detected =
+ le32_get_bits(desc->info0,
+ HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR);
+ status->u.unblock_cache.unblock_type =
+ le32_get_bits(desc->info0,
+ HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE);
+
+ if (!status->u.unblock_cache.err_detected &&
+ status->u.unblock_cache.unblock_type ==
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
+ hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
+}
+
+void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_flush_timeout_list_status *desc =
+ (struct hal_reo_flush_timeout_list_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ status->u.timeout_list.err_detected =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR);
+ status->u.timeout_list.list_empty =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY);
+
+ status->u.timeout_list.release_desc_cnt =
+ le32_get_bits(desc->info1,
+ HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT);
+ status->u.timeout_list.fwd_buf_cnt =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT);
+}
+
+void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_desc_thresh_reached_status *desc =
+ (struct hal_reo_desc_thresh_reached_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ status->u.desc_thresh_reached.threshold_idx =
+ le32_get_bits(desc->info0,
+ HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX);
+
+ status->u.desc_thresh_reached.link_desc_counter0 =
+ le32_get_bits(desc->info1,
+ HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0);
+
+ status->u.desc_thresh_reached.link_desc_counter1 =
+ le32_get_bits(desc->info2,
+ HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1);
+
+ status->u.desc_thresh_reached.link_desc_counter2 =
+ le32_get_bits(desc->info3,
+ HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2);
+
+ status->u.desc_thresh_reached.link_desc_counter_sum =
+ le32_get_bits(desc->info4,
+ HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM);
+}
+
+void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_status_hdr *desc =
+ (struct hal_reo_status_hdr *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+}
+
+u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
+{
+ u32 num_ext_desc;
+
+ if (ba_window_size <= 1) {
+ if (tid != HAL_DESC_REO_NON_QOS_TID)
+ num_ext_desc = 1;
+ else
+ num_ext_desc = 0;
+ } else if (ba_window_size <= 105) {
+ num_ext_desc = 1;
+ } else if (ba_window_size <= 210) {
+ num_ext_desc = 2;
+ } else {
+ num_ext_desc = 3;
+ }
+
+ return sizeof(struct hal_rx_reo_queue) +
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+}
+
+void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
+ int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type)
+{
+ struct hal_rx_reo_queue_ext *ext_desc;
+
+ memset(qdesc, 0, sizeof(*qdesc));
+
+ ath12k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+ qdesc->rx_queue_num = le32_encode_bits(tid, HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER);
+
+ qdesc->info0 =
+ le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_VLD) |
+ le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER) |
+ le32_encode_bits(ath12k_tid_to_ac(tid), HAL_RX_REO_QUEUE_INFO0_AC);
+
+ if (ba_window_size < 1)
+ ba_window_size = 1;
+
+ if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
+ ba_window_size++;
+
+ if (ba_window_size == 1)
+ qdesc->info0 |= le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_RETRY);
+
+ qdesc->info0 |= le32_encode_bits(ba_window_size - 1,
+ HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE);
+ switch (type) {
+ case HAL_PN_TYPE_NONE:
+ case HAL_PN_TYPE_WAPI_EVEN:
+ case HAL_PN_TYPE_WAPI_UNEVEN:
+ break;
+ case HAL_PN_TYPE_WPA:
+ qdesc->info0 |=
+ le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_PN_CHECK) |
+ le32_encode_bits(HAL_RX_REO_QUEUE_PN_SIZE_48,
+ HAL_RX_REO_QUEUE_INFO0_PN_SIZE);
+ break;
+ }
+
+ /* TODO: Set Ignore ampdu flags based on BA window size and/or
+ * AMPDU capabilities
+ */
+ qdesc->info0 |= le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG);
+
+ qdesc->info1 |= le32_encode_bits(0, HAL_RX_REO_QUEUE_INFO1_SVLD);
+
+ if (start_seq <= 0xfff)
+ qdesc->info1 = le32_encode_bits(start_seq,
+ HAL_RX_REO_QUEUE_INFO1_SSN);
+
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ return;
+
+ ext_desc = qdesc->ext_desc;
+
+ /* TODO: HW queue descriptors are currently allocated for max BA
+ * window size for all QOS TIDs so that same descriptor can be used
+ * later when ADDBA request is received. This should be changed to
+ * allocate HW queue descriptors based on BA window size being
+ * negotiated (0 for non BA cases), and reallocate when BA window
+ * size changes and also send WMI message to FW to change the REO
+ * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+ */
+ memset(ext_desc, 0, 3 * sizeof(*ext_desc));
+ ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+ ext_desc++;
+ ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+ ext_desc++;
+ ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+}
+
+void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_64_hdr *tlv;
+ struct hal_reo_get_queue_stats *desc;
+ int i, cmd_num = 1;
+ int entry_size;
+ u8 *entry;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath12k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
+ ath12k_hal_srng_get_params(ab, srng, &params);
+ entry = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_64_hdr *)entry;
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ desc->cmd.info0 = le32_encode_bits(cmd_num++,
+ HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
+ entry += entry_size;
+ }
+}
+
+void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+
+ val = ath12k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val |= u32_encode_bits(1, HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE) |
+ u32_encode_bits(1, HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ val = ath12k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab));
+
+ val &= ~(HAL_REO1_MISC_CTL_FRAG_DST_RING |
+ HAL_REO1_MISC_CTL_BAR_DST_RING);
+ val |= u32_encode_bits(HAL_SRNG_RING_ID_REO2SW0,
+ HAL_REO1_MISC_CTL_FRAG_DST_RING);
+ val |= u32_encode_bits(HAL_SRNG_RING_ID_REO2SW0,
+ HAL_REO1_MISC_CTL_BAR_DST_RING);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab), val);
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_VO_REO_TIMEOUT_USEC);
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
new file mode 100644
index 000000000000..fcfb6c819047
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -0,0 +1,704 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HAL_RX_H
+#define ATH12K_HAL_RX_H
+
+struct hal_rx_wbm_rel_info {
+ u32 cookie;
+ enum hal_wbm_rel_src_module err_rel_src;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 err_code;
+ bool first_msdu;
+ bool last_msdu;
+ bool continuation;
+ void *rx_desc;
+ bool hw_cc_done;
+};
+
+#define HAL_INVALID_PEERID 0xffff
+#define VHT_SIG_SU_NSS_MASK 0x7
+
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_NSS 8
+
+#define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \
+ le32_get_bits((__val), GENMASK(7, 0))
+
+#define HAL_RX_MPDU_INFO_PN_GET_BYTE2(__val) \
+ le32_get_bits((__val), GENMASK(15, 8))
+
+#define HAL_RX_MPDU_INFO_PN_GET_BYTE3(__val) \
+ le32_get_bits((__val), GENMASK(23, 16))
+
+#define HAL_RX_MPDU_INFO_PN_GET_BYTE4(__val) \
+ le32_get_bits((__val), GENMASK(31, 24))
+
+struct hal_rx_mon_status_tlv_hdr {
+ u32 hdr;
+ u8 value[];
+};
+
+enum hal_rx_su_mu_coding {
+ HAL_RX_SU_MU_CODING_BCC,
+ HAL_RX_SU_MU_CODING_LDPC,
+ HAL_RX_SU_MU_CODING_MAX,
+};
+
+enum hal_rx_gi {
+ HAL_RX_GI_0_8_US,
+ HAL_RX_GI_0_4_US,
+ HAL_RX_GI_1_6_US,
+ HAL_RX_GI_3_2_US,
+ HAL_RX_GI_MAX,
+};
+
+enum hal_rx_bw {
+ HAL_RX_BW_20MHZ,
+ HAL_RX_BW_40MHZ,
+ HAL_RX_BW_80MHZ,
+ HAL_RX_BW_160MHZ,
+ HAL_RX_BW_MAX,
+};
+
+enum hal_rx_preamble {
+ HAL_RX_PREAMBLE_11A,
+ HAL_RX_PREAMBLE_11B,
+ HAL_RX_PREAMBLE_11N,
+ HAL_RX_PREAMBLE_11AC,
+ HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_MAX,
+};
+
+enum hal_rx_reception_type {
+ HAL_RX_RECEPTION_TYPE_SU,
+ HAL_RX_RECEPTION_TYPE_MU_MIMO,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
+ HAL_RX_RECEPTION_TYPE_MAX,
+};
+
+enum hal_rx_legacy_rate {
+ HAL_RX_LEGACY_RATE_1_MBPS,
+ HAL_RX_LEGACY_RATE_2_MBPS,
+ HAL_RX_LEGACY_RATE_5_5_MBPS,
+ HAL_RX_LEGACY_RATE_6_MBPS,
+ HAL_RX_LEGACY_RATE_9_MBPS,
+ HAL_RX_LEGACY_RATE_11_MBPS,
+ HAL_RX_LEGACY_RATE_12_MBPS,
+ HAL_RX_LEGACY_RATE_18_MBPS,
+ HAL_RX_LEGACY_RATE_24_MBPS,
+ HAL_RX_LEGACY_RATE_36_MBPS,
+ HAL_RX_LEGACY_RATE_48_MBPS,
+ HAL_RX_LEGACY_RATE_54_MBPS,
+ HAL_RX_LEGACY_RATE_INVALID,
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
+#define HAL_TLV_STATUS_PPDU_DONE 1
+#define HAL_TLV_STATUS_BUF_DONE 2
+#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
+#define HAL_RX_FCS_LEN 4
+
+enum hal_rx_mon_status {
+ HAL_RX_MON_STATUS_PPDU_NOT_DONE,
+ HAL_RX_MON_STATUS_PPDU_DONE,
+ HAL_RX_MON_STATUS_BUF_DONE,
+};
+
+#define HAL_RX_MAX_MPDU 256
+#define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP (HAL_RX_MAX_MPDU >> 5)
+
+struct hal_rx_user_status {
+ u32 mcs:4,
+ nss:3,
+ ofdma_info_valid:1,
+ ul_ofdma_ru_start_index:7,
+ ul_ofdma_ru_width:7,
+ ul_ofdma_ru_size:8;
+ u32 ul_ofdma_user_v0_word0;
+ u32 ul_ofdma_user_v0_word1;
+ u32 ast_index;
+ u32 tid;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 frame_control;
+ u8 frame_control_info_valid;
+ u8 data_sequence_control_info_valid;
+ u16 first_data_seq_ctrl;
+ u32 preamble_type;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u8 rs_flags;
+ u8 ldpc;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
+ u32 mpdu_ok_byte_count;
+ u32 mpdu_err_byte_count;
+};
+
+#define HAL_MAX_UL_MU_USERS 37
+
+struct hal_rx_mon_ppdu_info {
+ u32 ppdu_id;
+ u32 last_ppdu_id;
+ u64 ppdu_ts;
+ u32 num_mpdu_fcs_ok;
+ u32 num_mpdu_fcs_err;
+ u32 preamble_type;
+ u32 mpdu_len;
+ u16 chan_num;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 peer_id;
+ u8 rate;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+ u8 vht_flag_values1;
+ u8 vht_flag_values2;
+ u8 vht_flag_values3[4];
+ u8 vht_flag_values4;
+ u8 vht_flag_values5;
+ u16 vht_flag_values6;
+ u8 is_stbc;
+ u8 gi;
+ u8 sgi;
+ u8 ldpc;
+ u8 beamformed;
+ u8 rssi_comb;
+ u16 tid;
+ u8 fc_valid;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u16 he_mu_flags;
+ u8 dcm;
+ u8 ru_alloc;
+ u8 reception_type;
+ u64 tsft;
+ u64 rx_duration;
+ u16 frame_control;
+ u32 ast_index;
+ u8 rs_fcs_err;
+ u8 rs_flags;
+ u8 cck_flag;
+ u8 ofdm_flag;
+ u8 ulofdma_flag;
+ u8 frame_control_info_valid;
+ u16 he_per_user_1;
+ u16 he_per_user_2;
+ u8 he_per_user_position;
+ u8 he_per_user_known;
+ u16 he_flags1;
+ u16 he_flags2;
+ u8 he_RU[4];
+ u16 he_data1;
+ u16 he_data2;
+ u16 he_data3;
+ u16 he_data4;
+ u16 he_data5;
+ u16 he_data6;
+ u32 ppdu_len;
+ u32 prev_ppdu_id;
+ u32 device_id;
+ u16 first_data_seq_ctrl;
+ u8 monitor_direct_used;
+ u8 data_sequence_control_info_valid;
+ u8 ltf_size;
+ u8 rxpcu_filter_pass;
+ s8 rssi_chain[8][8];
+ u32 num_users;
+ u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u8 addr4[ETH_ALEN];
+ struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
+ u8 userid;
+ u16 ampdu_id[HAL_MAX_UL_MU_USERS];
+ bool first_msdu_in_mpdu;
+ bool is_ampdu;
+ u8 medium_prot_type;
+};
+
+#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
+
+struct hal_rx_ppdu_start {
+ __le32 info0;
+ __le32 chan_num;
+ __le32 ppdu_start_ts;
+} __packed;
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO3_QOS_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT GENMASK(24, 0)
+
+struct hal_rx_ppdu_end_user_stats {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 ht_ctrl;
+ __le32 rsvd1[2];
+ __le32 info4;
+ __le32 info5;
+ __le32 usr_resp_ref;
+ __le32 info6;
+ __le32 rsvd3[4];
+ __le32 mpdu_ok_cnt;
+ __le32 rsvd4;
+ __le32 mpdu_err_cnt;
+ __le32 rsvd5[2];
+ __le32 usr_resp_ref_ext;
+ __le32 rsvd6;
+} __packed;
+
+struct hal_rx_ppdu_end_user_stats_ext {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+} __packed;
+
+#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
+#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
+
+#define HAL_RX_HT_SIG_INFO_INFO1_STBC GENMASK(5, 4)
+#define HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING BIT(6)
+#define HAL_RX_HT_SIG_INFO_INFO1_GI BIT(7)
+
+struct hal_rx_ht_sig_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_LSIG_B_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_B_INFO_INFO0_LEN GENMASK(15, 4)
+
+struct hal_rx_lsig_b_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_LSIG_A_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_A_INFO_INFO0_LEN GENMASK(16, 5)
+#define HAL_RX_LSIG_A_INFO_INFO0_PKT_TYPE GENMASK(27, 24)
+
+struct hal_rx_lsig_a_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_BW GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_STBC BIT(3)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID GENMASK(9, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS GENMASK(21, 10)
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING BIT(2)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_MCS GENMASK(7, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED BIT(8)
+
+struct hal_rx_vht_sig_a_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+enum hal_rx_vht_sig_a_gi_setting {
+ HAL_RX_VHT_SIG_A_NORMAL_GI = 0,
+ HAL_RX_VHT_SIG_A_SHORT_GI = 1,
+ HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
+};
+
+#define HE_GI_0_8 0
+#define HE_GI_0_4 1
+#define HE_GI_1_6 2
+#define HE_GI_3_2 3
+
+#define HE_LTF_1_X 0
+#define HE_LTF_2_X 1
+#define HE_LTF_4_X 2
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2)
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15)
+
+struct hal_rx_he_sig_a_su_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG BIT(1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB GENMASK(3, 1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB BIT(4)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR GENMASK(10, 5)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE GENMASK(14, 11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB BIT(22)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION BIT(25)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA BIT(11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC BIT(12)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM BIT(15)
+
+struct hal_rx_he_sig_a_mu_dl_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION GENMASK(7, 0)
+
+struct hal_rx_he_sig_b1_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
+
+struct hal_rx_he_sig_b2_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
+
+struct hal_rx_he_sig_b2_ofdma_info {
+ __le32 info0;
+} __packed;
+
+enum hal_rx_ul_reception_type {
+ HAL_RECEPTION_TYPE_ULOFMDA,
+ HAL_RECEPTION_TYPE_ULMIMO,
+ HAL_RECEPTION_TYPE_OTHER,
+ HAL_RECEPTION_TYPE_FRAMELESS
+};
+
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8)
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION GENMASK(3, 0)
+
+struct hal_rx_phyrx_rssi_legacy_info {
+ __le32 rsvd[35];
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
+#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16)
+#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0)
+struct hal_rx_mpdu_start {
+ __le32 info0;
+ __le32 info1;
+ __le32 rsvd1[11];
+ __le32 info2;
+ __le32 rsvd2[9];
+} __packed;
+
+#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
+struct hal_rx_ppdu_end_duration {
+ __le32 rsvd0[9];
+ __le32 info0;
+ __le32 rsvd1[4];
+} __packed;
+
+struct hal_rx_rxpcu_classification_overview {
+ u32 rsvd0;
+} __packed;
+
+struct hal_rx_msdu_desc_info {
+ u32 msdu_flags;
+ u16 msdu_len; /* 14 bits for length */
+};
+
+#define HAL_RX_NUM_MSDU_DESC 6
+struct hal_rx_msdu_list {
+ struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
+ u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
+ u8 rbm[HAL_RX_NUM_MSDU_DESC];
+};
+
+#define HAL_RX_FBM_ACK_INFO0_ADDR1_31_0 GENMASK(31, 0)
+#define HAL_RX_FBM_ACK_INFO1_ADDR1_47_32 GENMASK(15, 0)
+#define HAL_RX_FBM_ACK_INFO1_ADDR2_15_0 GENMASK(31, 16)
+#define HAL_RX_FBM_ACK_INFO2_ADDR2_47_16 GENMASK(31, 0)
+
+struct hal_rx_frame_bitmap_ack {
+ __le32 reserved;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 reserved1[10];
+} __packed;
+
+#define HAL_RX_RESP_REQ_INFO0_PPDU_ID GENMASK(15, 0)
+#define HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE BIT(16)
+#define HAL_RX_RESP_REQ_INFO1_DURATION GENMASK(15, 0)
+#define HAL_RX_RESP_REQ_INFO1_RATE_MCS GENMASK(24, 21)
+#define HAL_RX_RESP_REQ_INFO1_SGI GENMASK(26, 25)
+#define HAL_RX_RESP_REQ_INFO1_STBC BIT(27)
+#define HAL_RX_RESP_REQ_INFO1_LDPC BIT(28)
+#define HAL_RX_RESP_REQ_INFO1_IS_AMPDU BIT(29)
+#define HAL_RX_RESP_REQ_INFO2_NUM_USER GENMASK(6, 0)
+#define HAL_RX_RESP_REQ_INFO3_ADDR1_31_0 GENMASK(31, 0)
+#define HAL_RX_RESP_REQ_INFO4_ADDR1_47_32 GENMASK(15, 0)
+#define HAL_RX_RESP_REQ_INFO4_ADDR1_15_0 GENMASK(31, 16)
+#define HAL_RX_RESP_REQ_INFO5_ADDR1_47_16 GENMASK(31, 0)
+
+struct hal_rx_resp_req_info {
+ __le32 info0;
+ __le32 reserved[1];
+ __le32 info1;
+ __le32 info2;
+ __le32 reserved1[2];
+ __le32 info3;
+ __le32 info4;
+ __le32 info5;
+ __le32 reserved2[5];
+} __packed;
+
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
+
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID BIT(30)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER BIT(31)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS GENMASK(2, 0)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS GENMASK(6, 3)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC BIT(7)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_DCM BIT(8)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START GENMASK(15, 9)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE GENMASK(18, 16)
+
+/* HE Radiotap data1 Mask */
+#define HE_SU_FORMAT_TYPE 0x0000
+#define HE_EXT_SU_FORMAT_TYPE 0x0001
+#define HE_MU_FORMAT_TYPE 0x0002
+#define HE_TRIG_FORMAT_TYPE 0x0003
+#define HE_BEAM_CHANGE_KNOWN 0x0008
+#define HE_DL_UL_KNOWN 0x0010
+#define HE_MCS_KNOWN 0x0020
+#define HE_DCM_KNOWN 0x0040
+#define HE_CODING_KNOWN 0x0080
+#define HE_LDPC_EXTRA_SYMBOL_KNOWN 0x0100
+#define HE_STBC_KNOWN 0x0200
+#define HE_DATA_BW_RU_KNOWN 0x4000
+#define HE_DOPPLER_KNOWN 0x8000
+#define HE_BSS_COLOR_KNOWN 0x0004
+
+/* HE Radiotap data2 Mask */
+#define HE_GI_KNOWN 0x0002
+#define HE_TXBF_KNOWN 0x0010
+#define HE_PE_DISAMBIGUITY_KNOWN 0x0020
+#define HE_TXOP_KNOWN 0x0040
+#define HE_LTF_SYMBOLS_KNOWN 0x0004
+#define HE_PRE_FEC_PADDING_KNOWN 0x0008
+#define HE_MIDABLE_PERIODICITY_KNOWN 0x0080
+
+/* HE radiotap data3 shift values */
+#define HE_BEAM_CHANGE_SHIFT 6
+#define HE_DL_UL_SHIFT 7
+#define HE_TRANSMIT_MCS_SHIFT 8
+#define HE_DCM_SHIFT 12
+#define HE_CODING_SHIFT 13
+#define HE_LDPC_EXTRA_SYMBOL_SHIFT 14
+#define HE_STBC_SHIFT 15
+
+/* HE radiotap data4 shift values */
+#define HE_STA_ID_SHIFT 4
+
+/* HE radiotap data5 */
+#define HE_GI_SHIFT 4
+#define HE_LTF_SIZE_SHIFT 6
+#define HE_LTF_SYM_SHIFT 8
+#define HE_TXBF_SHIFT 14
+#define HE_PE_DISAMBIGUITY_SHIFT 15
+#define HE_PRE_FEC_PAD_SHIFT 12
+
+/* HE radiotap data6 */
+#define HE_DOPPLER_SHIFT 4
+#define HE_TXOP_SHIFT 8
+
+/* HE radiotap HE-MU flags1 */
+#define HE_SIG_B_MCS_KNOWN 0x0010
+#define HE_SIG_B_DCM_KNOWN 0x0040
+#define HE_SIG_B_SYM_NUM_KNOWN 0x8000
+#define HE_RU_0_KNOWN 0x0100
+#define HE_RU_1_KNOWN 0x0200
+#define HE_RU_2_KNOWN 0x0400
+#define HE_RU_3_KNOWN 0x0800
+#define HE_DCM_FLAG_1_SHIFT 5
+#define HE_SPATIAL_REUSE_MU_KNOWN 0x0100
+#define HE_SIG_B_COMPRESSION_FLAG_1_KNOWN 0x4000
+
+/* HE radiotap HE-MU flags2 */
+#define HE_SIG_B_COMPRESSION_FLAG_2_SHIFT 3
+#define HE_BW_KNOWN 0x0004
+#define HE_NUM_SIG_B_SYMBOLS_SHIFT 4
+#define HE_SIG_B_COMPRESSION_FLAG_2_KNOWN 0x0100
+#define HE_NUM_SIG_B_FLAG_2_SHIFT 9
+#define HE_LTF_FLAG_2_SYMBOLS_SHIFT 12
+#define HE_LTF_KNOWN 0x8000
+
+/* HE radiotap per_user_1 */
+#define HE_STA_SPATIAL_SHIFT 11
+#define HE_TXBF_SHIFT 14
+#define HE_RESERVED_SET_TO_1_SHIFT 19
+#define HE_STA_CODING_SHIFT 20
+
+/* HE radiotap per_user_2 */
+#define HE_STA_MCS_SHIFT 4
+#define HE_STA_DCM_SHIFT 5
+
+/* HE radiotap per user known */
+#define HE_USER_FIELD_POSITION_KNOWN 0x01
+#define HE_STA_ID_PER_USER_KNOWN 0x02
+#define HE_STA_NSTS_KNOWN 0x04
+#define HE_STA_TX_BF_KNOWN 0x08
+#define HE_STA_SPATIAL_CONFIG_KNOWN 0x10
+#define HE_STA_MCS_KNOWN 0x20
+#define HE_STA_DCM_KNOWN 0x40
+#define HE_STA_CODING_KNOWN 0x80
+
+#define HAL_RX_MPDU_ERR_FCS BIT(0)
+#define HAL_RX_MPDU_ERR_DECRYPT BIT(1)
+#define HAL_RX_MPDU_ERR_TKIP_MIC BIT(2)
+#define HAL_RX_MPDU_ERR_AMSDU_ERR BIT(3)
+#define HAL_RX_MPDU_ERR_OVERFLOW BIT(4)
+#define HAL_RX_MPDU_ERR_MSDU_LEN BIT(5)
+#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6)
+#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
+
+static inline
+enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
+{
+ enum nl80211_he_ru_alloc ret;
+
+ switch (ru_tones) {
+ case RU_52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case RU_106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case RU_242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case RU_484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case RU_996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case RU_26:
+ fallthrough;
+ default:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ }
+ return ret;
+}
+
+void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm);
+void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
+ struct hal_wbm_release_ring *dst_desc,
+ struct hal_wbm_release_ring *src_desc,
+ enum hal_wbm_rel_bm_act action);
+void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
+ dma_addr_t paddr, u32 cookie, u8 manager);
+void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
+ dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm);
+int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
+ struct hal_reo_dest_ring *desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info);
+void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
+ struct ath12k_buffer_addr *buff_addr,
+ dma_addr_t *paddr, u32 *cookie);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.c b/drivers/net/wireless/ath/ath12k/hal_tx.c
new file mode 100644
index 000000000000..869e07e406fe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "hal_desc.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hif.h"
+
+#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
+
+/* dscp_tid_map - Default DSCP-TID mapping
+ *=================
+ * DSCP TID
+ *=================
+ * 000xxx 0
+ * 001xxx 1
+ * 010xxx 2
+ * 011xxx 3
+ * 100xxx 4
+ * 101xxx 5
+ * 110xxx 6
+ * 111xxx 7
+ */
+static inline u8 dscp2tid(u8 dscp)
+{
+ return dscp >> 3;
+}
+
+void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd,
+ struct hal_tx_info *ti)
+{
+ tcl_cmd->buf_addr_info.info0 =
+ le32_encode_bits(ti->paddr, BUFFER_ADDR_INFO0_ADDR);
+ tcl_cmd->buf_addr_info.info1 =
+ le32_encode_bits(((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ BUFFER_ADDR_INFO1_ADDR);
+ tcl_cmd->buf_addr_info.info1 |=
+ le32_encode_bits((ti->rbm_id), BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+ le32_encode_bits(ti->desc_id, BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ tcl_cmd->info0 =
+ le32_encode_bits(ti->type, HAL_TCL_DATA_CMD_INFO0_DESC_TYPE) |
+ le32_encode_bits(ti->bank_id, HAL_TCL_DATA_CMD_INFO0_BANK_ID);
+
+ tcl_cmd->info1 =
+ le32_encode_bits(ti->meta_data_flags,
+ HAL_TCL_DATA_CMD_INFO1_CMD_NUM);
+
+ tcl_cmd->info2 = cpu_to_le32(ti->flags0) |
+ le32_encode_bits(ti->data_len, HAL_TCL_DATA_CMD_INFO2_DATA_LEN) |
+ le32_encode_bits(ti->pkt_offset, HAL_TCL_DATA_CMD_INFO2_PKT_OFFSET);
+
+ tcl_cmd->info3 = cpu_to_le32(ti->flags1) |
+ le32_encode_bits(ti->tid, HAL_TCL_DATA_CMD_INFO3_TID) |
+ le32_encode_bits(ti->lmac_id, HAL_TCL_DATA_CMD_INFO3_PMAC_ID) |
+ le32_encode_bits(ti->vdev_id, HAL_TCL_DATA_CMD_INFO3_VDEV_ID);
+
+ tcl_cmd->info4 = le32_encode_bits(ti->bss_ast_idx,
+ HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX) |
+ le32_encode_bits(ti->bss_ast_hash,
+ HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM);
+ tcl_cmd->info5 = 0;
+}
+
+void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id)
+{
+ u32 ctrl_reg_val;
+ u32 addr;
+ u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE], dscp, tid;
+ int i;
+ u32 value;
+
+ ctrl_reg_val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ /* Enable read/write access */
+ ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
+
+ addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
+ (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
+
+ /* Configure each DSCP-TID mapping in three bits there by configure
+ * three bytes in an iteration.
+ */
+ for (i = 0, dscp = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 3) {
+ tid = dscp2tid(dscp);
+ value = u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP0);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP1);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP2);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP3);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP4);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP5);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP6);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP7);
+ dscp++;
+
+ memcpy(&hw_map_val[i], &value, 3);
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
+ ath12k_hif_write32(ab, addr, *(u32 *)&hw_map_val[i]);
+ addr += 4;
+ }
+
+ /* Disable read/write access */
+ ctrl_reg_val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG,
+ ctrl_reg_val);
+}
+
+void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config,
+ u8 bank_id)
+{
+ ath12k_hif_write32(ab, HAL_TCL_SW_CONFIG_BANK_ADDR + 4 * bank_id,
+ bank_config);
+}
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h
new file mode 100644
index 000000000000..7c837094a6f7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HAL_TX_H
+#define ATH12K_HAL_TX_H
+
+#include "hal_desc.h"
+#include "core.h"
+
+#define HAL_TX_ADDRX_EN 1
+#define HAL_TX_ADDRY_EN 2
+
+#define HAL_TX_ADDR_SEARCH_DEFAULT 0
+#define HAL_TX_ADDR_SEARCH_INDEX 1
+
+/* TODO: check all these data can be managed with struct ath12k_tx_desc_info for perf */
+struct hal_tx_info {
+ u16 meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
+ u8 ring_id;
+ u8 rbm_id;
+ u32 desc_id;
+ enum hal_tcl_desc_type type;
+ enum hal_tcl_encap_type encap_type;
+ dma_addr_t paddr;
+ u32 data_len;
+ u32 pkt_offset;
+ enum hal_encrypt_type encrypt_type;
+ u32 flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
+ u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
+ u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
+ u16 bss_ast_hash;
+ u16 bss_ast_idx;
+ u8 tid;
+ u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
+ u8 lmac_id;
+ u8 vdev_id;
+ u8 dscp_tid_tbl_idx;
+ bool enable_mesh;
+ int bank_id;
+};
+
+/* TODO: Check if the actual desc macros can be used instead */
+#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0)
+#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1)
+#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2)
+#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3)
+#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4)
+#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5)
+#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6)
+
+#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring)
+
+/* Tx status parsed from srng desc */
+struct hal_tx_status {
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason status;
+ u8 ack_rssi;
+ u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
+ u32 ppdu_id;
+ u8 try_cnt;
+ u8 tid;
+ u16 peer_id;
+ u32 rate_stats;
+};
+
+#define HAL_TX_PHY_DESC_INFO0_BF_TYPE GENMASK(17, 16)
+#define HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B BIT(20)
+#define HAL_TX_PHY_DESC_INFO0_PKT_TYPE GENMASK(24, 21)
+#define HAL_TX_PHY_DESC_INFO0_BANDWIDTH GENMASK(30, 28)
+#define HAL_TX_PHY_DESC_INFO1_MCS GENMASK(3, 0)
+#define HAL_TX_PHY_DESC_INFO1_STBC BIT(6)
+#define HAL_TX_PHY_DESC_INFO2_NSS GENMASK(23, 21)
+#define HAL_TX_PHY_DESC_INFO3_AP_PKT_BW GENMASK(6, 4)
+#define HAL_TX_PHY_DESC_INFO3_LTF_SIZE GENMASK(20, 19)
+#define HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL GENMASK(17, 15)
+
+struct hal_tx_phy_desc {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+} __packed;
+
+#define HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0 GENMASK(15, 0)
+#define HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16 GENMASK(31, 16)
+#define HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0 GENMASK(15, 0)
+#define HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16 GENMASK(31, 16)
+
+struct hal_tx_fes_status_prot {
+ __le64 reserved;
+ __le32 info0;
+ __le32 info1;
+ __le32 reserved1[11];
+} __packed;
+
+#define HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION GENMASK(15, 0)
+
+struct hal_tx_fes_status_user_ppdu {
+ __le64 reserved;
+ __le32 info0;
+ __le32 reserved1[3];
+} __packed;
+
+#define HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32 GENMASK(31, 0)
+#define HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32 GENMASK(31, 0)
+
+struct hal_tx_fes_status_start_prot {
+ __le32 info0;
+ __le32 info1;
+ __le64 reserved;
+} __packed;
+
+#define HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE GENMASK(29, 27)
+
+struct hal_tx_fes_status_start {
+ __le32 reserved;
+ __le32 info0;
+ __le64 reserved1;
+} __packed;
+
+#define HAL_TX_Q_EXT_INFO0_FRAME_CTRL GENMASK(15, 0)
+#define HAL_TX_Q_EXT_INFO0_QOS_CTRL GENMASK(31, 16)
+#define HAL_TX_Q_EXT_INFO1_AMPDU_FLAG BIT(0)
+
+struct hal_tx_queue_exten {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS GENMASK(28, 23)
+
+struct hal_tx_fes_setup {
+ __le32 schedule_id;
+ __le32 info0;
+ __le64 reserved;
+} __packed;
+
+#define HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE GENMASK(2, 0)
+#define HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0 GENMASK(31, 0)
+#define HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32 GENMASK(15, 0)
+#define HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0 GENMASK(31, 16)
+#define HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16 GENMASK(31, 0)
+#define HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0 GENMASK(31, 0)
+#define HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32 GENMASK(15, 0)
+#define HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0 GENMASK(31, 16)
+#define HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16 GENMASK(31, 0)
+
+struct hal_tx_pcu_ppdu_setup_init {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 reserved;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+} __packed;
+
+#define HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0 GENMASK(15, 0)
+#define HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16 GENMASK(31, 16)
+
+struct hal_tx_fes_status_end {
+ __le32 reserved[2];
+ __le32 info0;
+ __le32 reserved1[19];
+} __packed;
+
+#define HAL_TX_BANK_CONFIG_EPD BIT(0)
+#define HAL_TX_BANK_CONFIG_ENCAP_TYPE GENMASK(2, 1)
+#define HAL_TX_BANK_CONFIG_ENCRYPT_TYPE GENMASK(6, 3)
+#define HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP BIT(7)
+#define HAL_TX_BANK_CONFIG_LINK_META_SWAP BIT(8)
+#define HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN BIT(9)
+#define HAL_TX_BANK_CONFIG_ADDRX_EN BIT(10)
+#define HAL_TX_BANK_CONFIG_ADDRY_EN BIT(11)
+#define HAL_TX_BANK_CONFIG_MESH_EN GENMASK(13, 12)
+#define HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN BIT(14)
+#define HAL_TX_BANK_CONFIG_PMAC_ID GENMASK(16, 15)
+/* STA mode will have MCAST_PKT_CTRL instead of DSCP_TID_MAP bitfield */
+#define HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID GENMASK(22, 17)
+
+void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd,
+ struct hal_tx_info *ti);
+void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id);
+int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd);
+void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config,
+ u8 bank_id);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h
new file mode 100644
index 000000000000..54490cdb63a1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hif.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HIF_H
+#define ATH12K_HIF_H
+
+#include "core.h"
+
+struct ath12k_hif_ops {
+ u32 (*read32)(struct ath12k_base *sc, u32 address);
+ void (*write32)(struct ath12k_base *sc, u32 address, u32 data);
+ void (*irq_enable)(struct ath12k_base *sc);
+ void (*irq_disable)(struct ath12k_base *sc);
+ int (*start)(struct ath12k_base *sc);
+ void (*stop)(struct ath12k_base *sc);
+ int (*power_up)(struct ath12k_base *sc);
+ void (*power_down)(struct ath12k_base *sc);
+ int (*suspend)(struct ath12k_base *ab);
+ int (*resume)(struct ath12k_base *ab);
+ int (*map_service_to_pipe)(struct ath12k_base *sc, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+ int (*get_user_msi_vector)(struct ath12k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+ void (*get_msi_address)(struct ath12k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+ void (*ce_irq_enable)(struct ath12k_base *ab);
+ void (*ce_irq_disable)(struct ath12k_base *ab);
+ void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx);
+};
+
+static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ return ab->hif.ops->map_service_to_pipe(ab, service_id,
+ ul_pipe, dl_pipe);
+}
+
+static inline int ath12k_hif_get_user_msi_vector(struct ath12k_base *ab,
+ char *user_name,
+ int *num_vectors,
+ u32 *user_base_data,
+ u32 *base_vector)
+{
+ if (!ab->hif.ops->get_user_msi_vector)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->get_user_msi_vector(ab, user_name, num_vectors,
+ user_base_data,
+ base_vector);
+}
+
+static inline void ath12k_hif_get_msi_address(struct ath12k_base *ab,
+ u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ if (!ab->hif.ops->get_msi_address)
+ return;
+
+ ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi);
+}
+
+static inline void ath12k_hif_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
+ u32 *msi_data_idx)
+{
+ if (ab->hif.ops->get_ce_msi_idx)
+ ab->hif.ops->get_ce_msi_idx(ab, ce_id, msi_data_idx);
+ else
+ *msi_data_idx = ce_id;
+}
+
+static inline void ath12k_hif_ce_irq_enable(struct ath12k_base *ab)
+{
+ if (ab->hif.ops->ce_irq_enable)
+ ab->hif.ops->ce_irq_enable(ab);
+}
+
+static inline void ath12k_hif_ce_irq_disable(struct ath12k_base *ab)
+{
+ if (ab->hif.ops->ce_irq_disable)
+ ab->hif.ops->ce_irq_disable(ab);
+}
+
+static inline void ath12k_hif_irq_enable(struct ath12k_base *ab)
+{
+ ab->hif.ops->irq_enable(ab);
+}
+
+static inline void ath12k_hif_irq_disable(struct ath12k_base *ab)
+{
+ ab->hif.ops->irq_disable(ab);
+}
+
+static inline int ath12k_hif_suspend(struct ath12k_base *ab)
+{
+ if (ab->hif.ops->suspend)
+ return ab->hif.ops->suspend(ab);
+
+ return 0;
+}
+
+static inline int ath12k_hif_resume(struct ath12k_base *ab)
+{
+ if (ab->hif.ops->resume)
+ return ab->hif.ops->resume(ab);
+
+ return 0;
+}
+
+static inline int ath12k_hif_start(struct ath12k_base *ab)
+{
+ return ab->hif.ops->start(ab);
+}
+
+static inline void ath12k_hif_stop(struct ath12k_base *ab)
+{
+ ab->hif.ops->stop(ab);
+}
+
+static inline u32 ath12k_hif_read32(struct ath12k_base *ab, u32 address)
+{
+ return ab->hif.ops->read32(ab, address);
+}
+
+static inline void ath12k_hif_write32(struct ath12k_base *ab, u32 address,
+ u32 data)
+{
+ ab->hif.ops->write32(ab, address, data);
+}
+
+static inline int ath12k_hif_power_up(struct ath12k_base *ab)
+{
+ return ab->hif.ops->power_up(ab);
+}
+
+static inline void ath12k_hif_power_down(struct ath12k_base *ab)
+{
+ ab->hif.ops->power_down(ab);
+}
+
+#endif /* ATH12K_HIF_H */
diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c
new file mode 100644
index 000000000000..23f7428abd95
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/htc.c
@@ -0,0 +1,789 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "debug.h"
+#include "hif.h"
+
+struct sk_buff *ath12k_htc_alloc_skb(struct ath12k_base *ab, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath12k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath12k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath12k_warn(ab, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+static void ath12k_htc_control_tx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath12k_htc_build_tx_ctrl_skb(void)
+{
+ struct sk_buff *skb;
+ struct ath12k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH12K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath12k_htc_hdr));
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ skb_cb = ATH12K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ return skb;
+}
+
+static void ath12k_htc_prepare_tx_skb(struct ath12k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath12k_htc_hdr *hdr;
+
+ hdr = (struct ath12k_htc_hdr *)skb->data;
+
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->htc_info = le32_encode_bits(ep->eid, HTC_HDR_ENDPOINTID) |
+ le32_encode_bits((skb->len - sizeof(*hdr)),
+ HTC_HDR_PAYLOADLEN);
+
+ if (ep->tx_credit_flow_enabled)
+ hdr->htc_info |= le32_encode_bits(ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE,
+ HTC_HDR_FLAGS);
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->ctrl_info = le32_encode_bits(ep->seq_no++, HTC_HDR_CONTROLBYTES1);
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath12k_htc_send(struct ath12k_htc *htc,
+ enum ath12k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath12k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct device *dev = htc->ab->dev;
+ struct ath12k_base *ab = htc->ab;
+ int credits = 0;
+ int ret;
+
+ if (eid >= ATH12K_HTC_EP_COUNT) {
+ ath12k_warn(ab, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath12k_htc_hdr));
+
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath12k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
+
+ ret = ath12k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath12k_htc_hdr));
+ return ret;
+}
+
+static void
+ath12k_htc_process_credit_report(struct ath12k_htc *htc,
+ const struct ath12k_htc_credit_report *report,
+ int len,
+ enum ath12k_htc_ep_id eid)
+{
+ struct ath12k_base *ab = htc->ab;
+ struct ath12k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath12k_warn(ab, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH12K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath12k_htc_process_trailer(struct ath12k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath12k_htc_ep_id src_eid)
+{
+ struct ath12k_base *ab = htc->ab;
+ int status = 0;
+ struct ath12k_htc_record *record;
+ size_t len;
+
+ while (length > 0) {
+ record = (struct ath12k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath12k_warn(ab, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (record->hdr.id) {
+ case ATH12K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath12k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath12k_warn(ab, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath12k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath12k_warn(ab, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ return status;
+}
+
+static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack)
+{
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot suspend complete %d\n", ack);
+
+ if (ack)
+ set_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+ else
+ clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ complete(&ab->htc_suspend);
+}
+
+void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath12k_htc *htc = &ab->htc;
+ struct ath12k_htc_hdr *hdr;
+ struct ath12k_htc_ep *ep;
+ u16 payload_len;
+ u32 trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath12k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = le32_get_bits(hdr->htc_info, HTC_HDR_ENDPOINTID);
+
+ if (eid >= ATH12K_HTC_EP_COUNT) {
+ ath12k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ payload_len = le32_get_bits(hdr->htc_info, HTC_HDR_PAYLOADLEN);
+
+ if (payload_len + sizeof(*hdr) > ATH12K_HTC_MAX_LEN) {
+ ath12k_warn(ab, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath12k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = le32_get_bits(hdr->htc_info, HTC_HDR_FLAGS) &
+ ATH12K_HTC_FLAG_TRAILER_PRESENT;
+
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = le32_get_bits(hdr->ctrl_info,
+ HTC_HDR_CONTROLBYTES0);
+ min_len = sizeof(struct ath12k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath12k_warn(ab, "Invalid trailer length: %d\n",
+ trailer_len);
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath12k_htc_process_trailer(htc, trailer,
+ trailer_len, eid);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (trailer_len >= payload_len)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ if (eid == ATH12K_HTC_EP_0) {
+ struct ath12k_htc_msg *msg = (struct ath12k_htc_msg *)skb->data;
+
+ switch (le32_get_bits(msg->msg_svc_id, HTC_MSG_MESSAGEID)) {
+ case ATH12K_HTC_MSG_READY_ID:
+ case ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath12k_warn(ab, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH12K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ case ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+ ath12k_htc_suspend_complete(ab, true);
+ break;
+ case ATH12K_HTC_MSG_NACK_SUSPEND:
+ ath12k_htc_suspend_complete(ab, false);
+ break;
+ case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ break;
+ default:
+ ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n",
+ le32_get_bits(msg->msg_svc_id, HTC_MSG_MESSAGEID));
+ break;
+ }
+ goto out;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ eid, skb);
+ ep->ep_ops.ep_rx_complete(ab, skb);
+
+ /* poll tx completion for interrupt disabled CE's */
+ ath12k_ce_poll_send_completed(ab, ep->ul_pipe_id);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+}
+
+static void ath12k_htc_control_rx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ /* This is unexpected. FW is not supposed to send regular rx on this
+ * endpoint.
+ */
+ ath12k_warn(ab, "unexpected htc rx\n");
+ kfree_skb(skb);
+}
+
+static const char *htc_service_name(enum ath12k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH12K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH12K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH12K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH12K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH12K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH12K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH12K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1:
+ return "WMI MAC1";
+ case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2:
+ return "WMI MAC2";
+ case ATH12K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH12K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH12K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ case ATH12K_HTC_SVC_ID_IPA_TX:
+ return "IPA TX";
+ case ATH12K_HTC_SVC_ID_PKT_LOG:
+ return "PKT LOG";
+ case ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG:
+ return "WMI DIAG";
+ }
+
+ return "Unknown";
+}
+
+static void ath12k_htc_reset_endpoint_states(struct ath12k_htc *htc)
+{
+ struct ath12k_htc_ep *ep;
+ int i;
+
+ for (i = ATH12K_HTC_EP_0; i < ATH12K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH12K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static u8 ath12k_htc_get_credit_allocation(struct ath12k_htc *htc,
+ u16 service_id)
+{
+ struct ath12k_htc_svc_tx_credits *serv_entry;
+ u8 i, allocation = 0;
+
+ serv_entry = htc->service_alloc_table;
+
+ for (i = 0; i < ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
+ if (serv_entry[i].service_id == service_id) {
+ allocation = serv_entry[i].credit_allocation;
+ break;
+ }
+ }
+
+ return allocation;
+}
+
+static int ath12k_htc_setup_target_buffer_assignments(struct ath12k_htc *htc)
+{
+ struct ath12k_htc_svc_tx_credits *serv_entry;
+ static const u32 svc_id[] = {
+ ATH12K_HTC_SVC_ID_WMI_CONTROL,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2,
+ };
+ int i, credits;
+
+ credits = htc->total_transmit_credits;
+ serv_entry = htc->service_alloc_table;
+
+ if ((htc->wmi_ep_count == 0) ||
+ (htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
+ return -EINVAL;
+
+ /* Divide credits among number of endpoints for WMI */
+ credits = credits / htc->wmi_ep_count;
+ for (i = 0; i < htc->wmi_ep_count; i++) {
+ serv_entry[i].service_id = svc_id[i];
+ serv_entry[i].credit_allocation = credits;
+ }
+
+ return 0;
+}
+
+int ath12k_htc_wait_target(struct ath12k_htc *htc)
+{
+ int i, status = 0;
+ struct ath12k_base *ab = htc->ab;
+ unsigned long time_left;
+ struct ath12k_htc_ready *ready;
+ u16 message_id;
+ u16 credit_count;
+ u16 credit_size;
+
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH12K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
+ ath12k_warn(ab, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < ab->hw_params->ce_count; i++)
+ ath12k_ce_per_engine_service(htc->ab, i);
+
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH12K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (!time_left)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath12k_warn(ab, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(*ready)) {
+ ath12k_warn(ab, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ ready = (struct ath12k_htc_ready *)htc->control_resp_buffer;
+ message_id = le32_get_bits(ready->id_credit_count, HTC_MSG_MESSAGEID);
+ credit_count = le32_get_bits(ready->id_credit_count,
+ HTC_READY_MSG_CREDITCOUNT);
+ credit_size = le32_get_bits(ready->size_ep, HTC_READY_MSG_CREDITSIZE);
+
+ if (message_id != ATH12K_HTC_MSG_READY_ID) {
+ ath12k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ htc->total_transmit_credits = credit_count;
+ htc->target_credit_size = credit_size;
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "Target ready! transmit resources: %d size:%d\n",
+ htc->total_transmit_credits, htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath12k_warn(ab, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ ath12k_htc_setup_target_buffer_assignments(htc);
+
+ return 0;
+}
+
+int ath12k_htc_connect_service(struct ath12k_htc *htc,
+ struct ath12k_htc_svc_conn_req *conn_req,
+ struct ath12k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath12k_base *ab = htc->ab;
+ struct ath12k_htc_conn_svc *req_msg;
+ struct ath12k_htc_conn_svc_resp resp_msg_dummy;
+ struct ath12k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
+ enum ath12k_htc_ep_id assigned_eid = ATH12K_HTC_EP_COUNT;
+ struct ath12k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ unsigned long time_left;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH12K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH12K_HTC_EP_0;
+ max_msg_size = ATH12K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath12k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath12k_htc_build_tx_ctrl_skb();
+ if (!skb) {
+ ath12k_warn(ab, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(*req_msg);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ req_msg = (struct ath12k_htc_conn_svc *)skb->data;
+ req_msg->msg_svc_id = le32_encode_bits(ATH12K_HTC_MSG_CONNECT_SERVICE_ID,
+ HTC_MSG_MESSAGEID);
+
+ flags |= u32_encode_bits(tx_alloc, ATH12K_HTC_CONN_FLAGS_RECV_ALLOC);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (!(conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL ||
+ conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
+ conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
+ flags |= ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg->flags_len = le32_encode_bits(flags, HTC_SVC_MSG_CONNECTIONFLAGS);
+ req_msg->msg_svc_id |= le32_encode_bits(conn_req->service_id,
+ HTC_SVC_MSG_SERVICE_ID);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH12K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath12k_err(ab, "Service connect timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ resp_msg = (struct ath12k_htc_conn_svc_resp *)htc->control_resp_buffer;
+ message_id = le32_get_bits(resp_msg->msg_svc_id, HTC_MSG_MESSAGEID);
+ service_id = le32_get_bits(resp_msg->msg_svc_id,
+ HTC_SVC_RESP_MSG_SERVICEID);
+
+ if ((message_id != ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(*resp_msg))) {
+ ath12k_err(ab, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "HTC Service %s connect response: status: %u, assigned ep: %u\n",
+ htc_service_name(service_id),
+ le32_get_bits(resp_msg->flags_len, HTC_SVC_RESP_MSG_STATUS),
+ le32_get_bits(resp_msg->flags_len, HTC_SVC_RESP_MSG_ENDPOINTID));
+
+ conn_resp->connect_resp_code = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_STATUS);
+
+ /* check response status */
+ if (conn_resp->connect_resp_code != ATH12K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath12k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ conn_resp->connect_resp_code);
+ return -EPROTO;
+ }
+
+ assigned_eid = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_ENDPOINTID);
+
+ max_msg_size = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_MAXMSGSIZE);
+
+setup:
+
+ if (assigned_eid >= ATH12K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH12K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_MAXMSGSIZE);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_MAXMSGSIZE);
+ ep->tx_credits = tx_alloc;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath12k_hif_map_service_to_pipe(htc->ab,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id);
+ if (status)
+ return status;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+int ath12k_htc_start(struct ath12k_htc *htc)
+{
+ struct sk_buff *skb;
+ int status;
+ struct ath12k_base *ab = htc->ab;
+ struct ath12k_htc_setup_complete_extended *msg;
+
+ skb = ath12k_htc_build_tx_ctrl_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(*msg));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath12k_htc_setup_complete_extended *)skb->data;
+ msg->msg_id = le32_encode_bits(ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID,
+ HTC_MSG_MESSAGEID);
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+ status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+int ath12k_htc_init(struct ath12k_base *ab)
+{
+ struct ath12k_htc *htc = &ab->htc;
+ struct ath12k_htc_svc_conn_req conn_req = { };
+ struct ath12k_htc_svc_conn_resp conn_resp = { };
+ int ret;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath12k_htc_reset_endpoint_states(htc);
+
+ htc->ab = ab;
+
+ switch (ab->wmi_ab.preferred_hw_mode) {
+ case WMI_HOST_HW_MODE_SINGLE:
+ htc->wmi_ep_count = 1;
+ break;
+ case WMI_HOST_HW_MODE_DBS:
+ case WMI_HOST_HW_MODE_DBS_OR_SBS:
+ htc->wmi_ep_count = 2;
+ break;
+ case WMI_HOST_HW_MODE_DBS_SBS:
+ htc->wmi_ep_count = 3;
+ break;
+ default:
+ htc->wmi_ep_count = ab->hw_params->max_radios;
+ break;
+ }
+
+ /* setup our pseudo HTC control endpoint connection */
+ conn_req.ep_ops.ep_tx_complete = ath12k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath12k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH12K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH12K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ ret = ath12k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (ret) {
+ ath12k_err(ab, "could not connect to htc service (%d)\n", ret);
+ return ret;
+ }
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/htc.h b/drivers/net/wireless/ath/ath12k/htc.h
new file mode 100644
index 000000000000..7e3dccc7cc14
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/htc.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HTC_H
+#define ATH12K_HTC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct ath12k_base;
+
+#define HTC_HDR_ENDPOINTID GENMASK(7, 0)
+#define HTC_HDR_FLAGS GENMASK(15, 8)
+#define HTC_HDR_PAYLOADLEN GENMASK(31, 16)
+#define HTC_HDR_CONTROLBYTES0 GENMASK(7, 0)
+#define HTC_HDR_CONTROLBYTES1 GENMASK(15, 8)
+#define HTC_HDR_RESERVED GENMASK(31, 16)
+
+#define HTC_SVC_MSG_SERVICE_ID GENMASK(31, 16)
+#define HTC_SVC_MSG_CONNECTIONFLAGS GENMASK(15, 0)
+#define HTC_SVC_MSG_SERVICEMETALENGTH GENMASK(23, 16)
+#define HTC_READY_MSG_CREDITCOUNT GENMASK(31, 16)
+#define HTC_READY_MSG_CREDITSIZE GENMASK(15, 0)
+#define HTC_READY_MSG_MAXENDPOINTS GENMASK(23, 16)
+
+#define HTC_READY_EX_MSG_HTCVERSION GENMASK(7, 0)
+#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE GENMASK(15, 8)
+
+#define HTC_SVC_RESP_MSG_SERVICEID GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_STATUS GENMASK(7, 0)
+#define HTC_SVC_RESP_MSG_ENDPOINTID GENMASK(15, 8)
+#define HTC_SVC_RESP_MSG_MAXMSGSIZE GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH GENMASK(7, 0)
+
+#define HTC_MSG_MESSAGEID GENMASK(15, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS GENMASK(31, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV GENMASK(7, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0 GENMASK(15, 8)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1 GENMASK(23, 16)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2 GENMASK(31, 24)
+
+enum ath12k_htc_tx_flags {
+ ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH12K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath12k_htc_rx_flags {
+ ATH12K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH12K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath12k_htc_hdr {
+ __le32 htc_info;
+ __le32 ctrl_info;
+} __packed __aligned(4);
+
+enum ath12k_htc_msg_id {
+ ATH12K_HTC_MSG_READY_ID = 1,
+ ATH12K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH12K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6,
+ ATH12K_HTC_MSG_NACK_SUSPEND = 7,
+ ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID = 8,
+};
+
+enum ath12k_htc_version {
+ ATH12K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH12K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+enum ath12k_htc_conn_flag_threshold_level {
+ ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH,
+ ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF,
+ ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS,
+ ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY,
+};
+
+#define ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0)
+#define ATH12K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE BIT(2)
+#define ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL BIT(3)
+#define ATH12K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8)
+
+enum ath12k_htc_conn_svc_status {
+ ATH12K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH12K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH12K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH12K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH12K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+struct ath12k_htc_ready {
+ __le32 id_credit_count;
+ __le32 size_ep;
+} __packed;
+
+struct ath12k_htc_ready_extended {
+ struct ath12k_htc_ready base;
+ __le32 ver_bundle;
+} __packed;
+
+struct ath12k_htc_conn_svc {
+ __le32 msg_svc_id;
+ __le32 flags_len;
+} __packed;
+
+struct ath12k_htc_conn_svc_resp {
+ __le32 msg_svc_id;
+ __le32 flags_len;
+ __le32 svc_meta_pad;
+} __packed;
+
+struct ath12k_htc_setup_complete_extended {
+ __le32 msg_id;
+ __le32 flags;
+ __le32 max_msgs_per_bundled_recv;
+} __packed;
+
+struct ath12k_htc_msg {
+ __le32 msg_svc_id;
+ __le32 flags_len;
+} __packed __aligned(4);
+
+enum ath12k_htc_record_id {
+ ATH12K_HTC_RECORD_NULL = 0,
+ ATH12K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath12k_htc_record_hdr {
+ u8 id; /* @enum ath12k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath12k_htc_credit_report {
+ u8 eid; /* @enum ath12k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath12k_htc_record {
+ struct ath12k_htc_record_hdr hdr;
+ struct ath12k_htc_credit_report credit_report[];
+} __packed __aligned(4);
+
+/* HTC FRAME structure layout draft
+ *
+ * note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ *
+ *=======================================================
+ *
+ * HTC HEADER
+ *
+ *=======================================================
+ * |
+ * HTC message | payload
+ * (variable length) | (variable length)
+ *=======================================================
+ *
+ * HTC Record
+ *
+ *=======================================================
+ */
+
+enum ath12k_htc_svc_gid {
+ ATH12K_HTC_SVC_GRP_RSVD = 0,
+ ATH12K_HTC_SVC_GRP_WMI = 1,
+ ATH12K_HTC_SVC_GRP_NMI = 2,
+ ATH12K_HTC_SVC_GRP_HTT = 3,
+ ATH12K_HTC_SVC_GRP_CFG = 4,
+ ATH12K_HTC_SVC_GRP_IPA = 5,
+ ATH12K_HTC_SVC_GRP_PKTLOG = 6,
+
+ ATH12K_HTC_SVC_GRP_TEST = 254,
+ ATH12K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath12k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH12K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH12K_HTC_SVC_ID_UNUSED = ATH12K_HTC_SVC_ID_RESERVED,
+
+ ATH12K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH12K_HTC_SVC_GRP_RSVD, 1),
+ ATH12K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH12K_HTC_SVC_GRP_WMI, 0),
+ ATH12K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH12K_HTC_SVC_GRP_WMI, 1),
+ ATH12K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH12K_HTC_SVC_GRP_WMI, 2),
+ ATH12K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH12K_HTC_SVC_GRP_WMI, 3),
+ ATH12K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH12K_HTC_SVC_GRP_WMI, 4),
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH12K_HTC_SVC_GRP_WMI, 5),
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH12K_HTC_SVC_GRP_WMI, 6),
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG = SVC(ATH12K_HTC_SVC_GRP_WMI, 7),
+
+ ATH12K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH12K_HTC_SVC_GRP_NMI, 0),
+ ATH12K_HTC_SVC_ID_NMI_DATA = SVC(ATH12K_HTC_SVC_GRP_NMI, 1),
+
+ ATH12K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH12K_HTC_SVC_GRP_HTT, 0),
+
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH12K_HTC_SVC_GRP_TEST, 0),
+ ATH12K_HTC_SVC_ID_IPA_TX = SVC(ATH12K_HTC_SVC_GRP_IPA, 0),
+ ATH12K_HTC_SVC_ID_PKT_LOG = SVC(ATH12K_HTC_SVC_GRP_PKTLOG, 0),
+};
+
+#undef SVC
+
+enum ath12k_htc_ep_id {
+ ATH12K_HTC_EP_UNUSED = -1,
+ ATH12K_HTC_EP_0 = 0,
+ ATH12K_HTC_EP_1 = 1,
+ ATH12K_HTC_EP_2,
+ ATH12K_HTC_EP_3,
+ ATH12K_HTC_EP_4,
+ ATH12K_HTC_EP_5,
+ ATH12K_HTC_EP_6,
+ ATH12K_HTC_EP_7,
+ ATH12K_HTC_EP_8,
+ ATH12K_HTC_EP_COUNT,
+};
+
+struct ath12k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath12k_base *ab, struct sk_buff *skb);
+ void (*ep_rx_complete)(struct ath12k_base *ab, struct sk_buff *skb);
+ void (*ep_tx_credits)(struct ath12k_base *ab);
+};
+
+/* service connection information */
+struct ath12k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath12k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath12k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath12k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH12K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH12K_HTC_MAX_LEN 4096
+#define ATH12K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH12K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH12K_HTC_CONTROL_BUFFER_SIZE (ATH12K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath12k_htc_hdr))
+#define ATH12K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+#define ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
+
+struct ath12k_htc_ep {
+ struct ath12k_htc *htc;
+ enum ath12k_htc_ep_id eid;
+ enum ath12k_htc_svc_id service_id;
+ struct ath12k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ bool tx_credit_flow_enabled;
+};
+
+struct ath12k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath12k_htc {
+ struct ath12k_base *ab;
+ struct ath12k_htc_ep endpoint[ATH12K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ u8 control_resp_buffer[ATH12K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ struct ath12k_htc_svc_tx_credits
+ service_alloc_table[ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
+ int target_credit_size;
+ u8 wmi_ep_count;
+};
+
+int ath12k_htc_init(struct ath12k_base *ar);
+int ath12k_htc_wait_target(struct ath12k_htc *htc);
+int ath12k_htc_start(struct ath12k_htc *htc);
+int ath12k_htc_connect_service(struct ath12k_htc *htc,
+ struct ath12k_htc_svc_conn_req *conn_req,
+ struct ath12k_htc_svc_conn_resp *conn_resp);
+int ath12k_htc_send(struct ath12k_htc *htc, enum ath12k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath12k_htc_alloc_skb(struct ath12k_base *ar, int size);
+void ath12k_htc_rx_completion_handler(struct ath12k_base *ar,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
new file mode 100644
index 000000000000..91d576fd4b0f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "debug.h"
+#include "core.h"
+#include "ce.h"
+#include "hw.h"
+#include "mhi.h"
+#include "dp_rx.h"
+
+static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx)
+{
+ return pdev_idx;
+}
+
+static int ath12k_hw_mac_id_to_pdev_id_qcn9274(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static int ath12k_hw_mac_id_to_srng_id_qcn9274(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static u8 ath12k_hw_get_ring_selector_qcn9274(struct sk_buff *skb)
+{
+ return smp_processor_id();
+}
+
+static bool ath12k_dp_srng_is_comp_ring_qcn9274(int ring_num)
+{
+ if (ring_num < 3 || ring_num == 4)
+ return true;
+
+ return false;
+}
+
+static int ath12k_hw_mac_id_to_pdev_id_wcn7850(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath12k_hw_mac_id_to_srng_id_wcn7850(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static u8 ath12k_hw_get_ring_selector_wcn7850(struct sk_buff *skb)
+{
+ return skb_get_queue_mapping(skb);
+}
+
+static bool ath12k_dp_srng_is_comp_ring_wcn7850(int ring_num)
+{
+ if (ring_num == 0 || ring_num == 2 || ring_num == 4)
+ return true;
+
+ return false;
+}
+
+static const struct ath12k_hw_ops qcn9274_ops = {
+ .get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
+ .mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_qcn9274,
+ .mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_qcn9274,
+ .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcn9274,
+ .get_ring_selector = ath12k_hw_get_ring_selector_qcn9274,
+ .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_qcn9274,
+};
+
+static const struct ath12k_hw_ops wcn7850_ops = {
+ .get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
+ .mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_wcn7850,
+ .mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_wcn7850,
+ .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_wcn7850,
+ .get_ring_selector = ath12k_hw_get_ring_selector_wcn7850,
+ .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_wcn7850,
+};
+
+#define ATH12K_TX_RING_MASK_0 0x1
+#define ATH12K_TX_RING_MASK_1 0x2
+#define ATH12K_TX_RING_MASK_2 0x4
+#define ATH12K_TX_RING_MASK_3 0x8
+#define ATH12K_TX_RING_MASK_4 0x10
+
+#define ATH12K_RX_RING_MASK_0 0x1
+#define ATH12K_RX_RING_MASK_1 0x2
+#define ATH12K_RX_RING_MASK_2 0x4
+#define ATH12K_RX_RING_MASK_3 0x8
+
+#define ATH12K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH12K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH12K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH12K_HOST2RXDMA_RING_MASK_0 0x1
+
+#define ATH12K_RX_MON_RING_MASK_0 0x1
+#define ATH12K_RX_MON_RING_MASK_1 0x2
+#define ATH12K_RX_MON_RING_MASK_2 0x4
+
+#define ATH12K_TX_MON_RING_MASK_0 0x1
+#define ATH12K_TX_MON_RING_MASK_1 0x2
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9, 10 and 11: Reserved for MHI */
+
+ /* CE12: Target CV prefetch */
+ {
+ .pipenum = __cpu_to_le32(12),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE13: Target CV prefetch */
+ {
+ .pipenum = __cpu_to_le32(13),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE14: WMI logging/CFR/Spectral/Radar */
+ {
+ .pipenum = __cpu_to_le32(14),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE15: Reserved */
+};
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config ath12k_target_ce_config_wlan_wcn7850[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_qcn9274[] = {
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(7),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(5),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(14),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_wcn7850[] = {
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
+ ATH12K_TX_RING_MASK_2,
+ ATH12K_TX_RING_MASK_3,
+ },
+ .rx_mon_dest = {
+ 0, 0, 0,
+ ATH12K_RX_MON_RING_MASK_0,
+ ATH12K_RX_MON_RING_MASK_1,
+ ATH12K_RX_MON_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+ ATH12K_RX_RING_MASK_0,
+ ATH12K_RX_RING_MASK_1,
+ ATH12K_RX_RING_MASK_2,
+ ATH12K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, 0, 0,
+ ATH12K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, 0, 0,
+ ATH12K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH12K_REO_STATUS_RING_MASK_0,
+ },
+ .host2rxdma = {
+ 0, 0, 0,
+ ATH12K_HOST2RXDMA_RING_MASK_0,
+ },
+ .tx_mon_dest = {
+ ATH12K_TX_MON_RING_MASK_0,
+ ATH12K_TX_MON_RING_MASK_1,
+ },
+};
+
+static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_2,
+ ATH12K_TX_RING_MASK_4,
+ },
+ .rx_mon_dest = {
+ },
+ .rx = {
+ 0, 0, 0,
+ ATH12K_RX_RING_MASK_0,
+ ATH12K_RX_RING_MASK_1,
+ ATH12K_RX_RING_MASK_2,
+ ATH12K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH12K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH12K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ ATH12K_REO_STATUS_RING_MASK_0,
+ },
+ .host2rxdma = {
+ },
+ .tx_mon_dest = {
+ },
+};
+
+static const struct ath12k_hw_regs qcn9274_v1_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_id = 0x00000908,
+ .hal_tcl1_ring_misc = 0x00000910,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000920,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000948,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000094c,
+ .hal_tcl1_ring_msi1_data = 0x00000950,
+ .hal_tcl_ring_base_lsb = 0x00000b58,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000d38,
+
+ .hal_wbm_idle_ring_base_lsb = 0x00000d0c,
+ .hal_wbm_idle_ring_misc_addr = 0x00000d1c,
+ .hal_wbm_r0_idle_list_cntl_addr = 0x00000210,
+ .hal_wbm_r0_idle_list_size_addr = 0x00000214,
+ .hal_wbm_scattered_ring_base_lsb = 0x00000220,
+ .hal_wbm_scattered_ring_base_msb = 0x00000224,
+ .hal_wbm_scattered_desc_head_info_ix0 = 0x00000230,
+ .hal_wbm_scattered_desc_head_info_ix1 = 0x00000234,
+ .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000240,
+ .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000244,
+ .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000024c,
+
+ .hal_wbm_sw_release_ring_base_lsb = 0x0000034c,
+ .hal_wbm_sw1_release_ring_base_lsb = 0x000003c4,
+ .hal_wbm0_release_ring_base_lsb = 0x00000dd8,
+ .hal_wbm1_release_ring_base_lsb = 0x00000e50,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
+
+ /* PPE release ring address */
+ .hal_ppe_rel_ring_base = 0x0000043c,
+
+ /* REO DEST ring address */
+ .hal_reo2_ring_base = 0x0000055c,
+ .hal_reo1_misc_ctrl_addr = 0x00000b7c,
+ .hal_reo1_sw_cookie_cfg0 = 0x00000050,
+ .hal_reo1_sw_cookie_cfg1 = 0x00000054,
+ .hal_reo1_qdesc_lut_base0 = 0x00000058,
+ .hal_reo1_qdesc_lut_base1 = 0x0000005c,
+ .hal_reo1_ring_base_lsb = 0x000004e4,
+ .hal_reo1_ring_base_msb = 0x000004e8,
+ .hal_reo1_ring_id = 0x000004ec,
+ .hal_reo1_ring_misc = 0x000004f4,
+ .hal_reo1_ring_hp_addr_lsb = 0x000004f8,
+ .hal_reo1_ring_hp_addr_msb = 0x000004fc,
+ .hal_reo1_ring_producer_int_setup = 0x00000508,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000052C,
+ .hal_reo1_ring_msi1_base_msb = 0x00000530,
+ .hal_reo1_ring_msi1_data = 0x00000534,
+ .hal_reo1_aging_thres_ix0 = 0x00000b08,
+ .hal_reo1_aging_thres_ix1 = 0x00000b0c,
+ .hal_reo1_aging_thres_ix2 = 0x00000b10,
+ .hal_reo1_aging_thres_ix3 = 0x00000b14,
+
+ /* REO Exception ring address */
+ .hal_reo2_sw0_ring_base = 0x000008a4,
+
+ /* REO Reinject ring address */
+ .hal_sw2reo_ring_base = 0x00000304,
+ .hal_sw2reo1_ring_base = 0x0000037c,
+
+ /* REO cmd ring address */
+ .hal_reo_cmd_ring_base = 0x0000028c,
+
+ /* REO status ring address */
+ .hal_reo_status_ring_base = 0x00000a84,
+};
+
+static const struct ath12k_hw_regs qcn9274_v2_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_id = 0x00000908,
+ .hal_tcl1_ring_misc = 0x00000910,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000920,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000948,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000094c,
+ .hal_tcl1_ring_msi1_data = 0x00000950,
+ .hal_tcl_ring_base_lsb = 0x00000b58,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000d38,
+
+ /* WBM idle link ring address */
+ .hal_wbm_idle_ring_base_lsb = 0x00000d3c,
+ .hal_wbm_idle_ring_misc_addr = 0x00000d4c,
+ .hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .hal_wbm_r0_idle_list_size_addr = 0x00000244,
+ .hal_wbm_scattered_ring_base_lsb = 0x00000250,
+ .hal_wbm_scattered_ring_base_msb = 0x00000254,
+ .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
+
+ /* SW2WBM release ring address */
+ .hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
+ .hal_wbm_sw1_release_ring_base_lsb = 0x000003f4,
+
+ /* WBM2SW release ring address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000e08,
+ .hal_wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
+
+ /* PPE release ring address */
+ .hal_ppe_rel_ring_base = 0x0000046c,
+
+ /* REO DEST ring address */
+ .hal_reo2_ring_base = 0x00000578,
+ .hal_reo1_misc_ctrl_addr = 0x00000b9c,
+ .hal_reo1_sw_cookie_cfg0 = 0x0000006c,
+ .hal_reo1_sw_cookie_cfg1 = 0x00000070,
+ .hal_reo1_qdesc_lut_base0 = 0x00000074,
+ .hal_reo1_qdesc_lut_base1 = 0x00000078,
+ .hal_reo1_ring_base_lsb = 0x00000500,
+ .hal_reo1_ring_base_msb = 0x00000504,
+ .hal_reo1_ring_id = 0x00000508,
+ .hal_reo1_ring_misc = 0x00000510,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000514,
+ .hal_reo1_ring_hp_addr_msb = 0x00000518,
+ .hal_reo1_ring_producer_int_setup = 0x00000524,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000548,
+ .hal_reo1_ring_msi1_base_msb = 0x0000054C,
+ .hal_reo1_ring_msi1_data = 0x00000550,
+ .hal_reo1_aging_thres_ix0 = 0x00000B28,
+ .hal_reo1_aging_thres_ix1 = 0x00000B2C,
+ .hal_reo1_aging_thres_ix2 = 0x00000B30,
+ .hal_reo1_aging_thres_ix3 = 0x00000B34,
+
+ /* REO Exception ring address */
+ .hal_reo2_sw0_ring_base = 0x000008c0,
+
+ /* REO Reinject ring address */
+ .hal_sw2reo_ring_base = 0x00000320,
+ .hal_sw2reo1_ring_base = 0x00000398,
+
+ /* REO cmd ring address */
+ .hal_reo_cmd_ring_base = 0x000002A8,
+
+ /* REO status ring address */
+ .hal_reo_status_ring_base = 0x00000aa0,
+};
+
+static const struct ath12k_hw_regs wcn7850_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_id = 0x00000908,
+ .hal_tcl1_ring_misc = 0x00000910,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000920,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000948,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000094c,
+ .hal_tcl1_ring_msi1_data = 0x00000950,
+ .hal_tcl_ring_base_lsb = 0x00000b58,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000d38,
+
+ .hal_wbm_idle_ring_base_lsb = 0x00000d3c,
+ .hal_wbm_idle_ring_misc_addr = 0x00000d4c,
+ .hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .hal_wbm_r0_idle_list_size_addr = 0x00000244,
+ .hal_wbm_scattered_ring_base_lsb = 0x00000250,
+ .hal_wbm_scattered_ring_base_msb = 0x00000254,
+ .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .hal_wbm_scattered_desc_ptr_hp_addr = 0x00000027c,
+
+ .hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
+ .hal_wbm_sw1_release_ring_base_lsb = 0x00000284,
+ .hal_wbm0_release_ring_base_lsb = 0x00000e08,
+ .hal_wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
+
+ /* PPE release ring address */
+ .hal_ppe_rel_ring_base = 0x0000043c,
+
+ /* REO DEST ring address */
+ .hal_reo2_ring_base = 0x0000055c,
+ .hal_reo1_misc_ctrl_addr = 0x00000b7c,
+ .hal_reo1_sw_cookie_cfg0 = 0x00000050,
+ .hal_reo1_sw_cookie_cfg1 = 0x00000054,
+ .hal_reo1_qdesc_lut_base0 = 0x00000058,
+ .hal_reo1_qdesc_lut_base1 = 0x0000005c,
+ .hal_reo1_ring_base_lsb = 0x000004e4,
+ .hal_reo1_ring_base_msb = 0x000004e8,
+ .hal_reo1_ring_id = 0x000004ec,
+ .hal_reo1_ring_misc = 0x000004f4,
+ .hal_reo1_ring_hp_addr_lsb = 0x000004f8,
+ .hal_reo1_ring_hp_addr_msb = 0x000004fc,
+ .hal_reo1_ring_producer_int_setup = 0x00000508,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000052C,
+ .hal_reo1_ring_msi1_base_msb = 0x00000530,
+ .hal_reo1_ring_msi1_data = 0x00000534,
+ .hal_reo1_aging_thres_ix0 = 0x00000b08,
+ .hal_reo1_aging_thres_ix1 = 0x00000b0c,
+ .hal_reo1_aging_thres_ix2 = 0x00000b10,
+ .hal_reo1_aging_thres_ix3 = 0x00000b14,
+
+ /* REO Exception ring address */
+ .hal_reo2_sw0_ring_base = 0x000008a4,
+
+ /* REO Reinject ring address */
+ .hal_sw2reo_ring_base = 0x00000304,
+ .hal_sw2reo1_ring_base = 0x0000037c,
+
+ /* REO cmd ring address */
+ .hal_reo_cmd_ring_base = 0x0000028c,
+
+ /* REO status ring address */
+ .hal_reo_status_ring_base = 0x00000a84,
+};
+
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+static const struct ath12k_hw_params ath12k_hw_params[] = {
+ {
+ .name = "qcn9274 hw1.0",
+ .hw_rev = ATH12K_HW_QCN9274_HW10,
+ .fw = {
+ .dir = "QCN9274/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
+ .internal_sleep_clock = false,
+
+ .hw_ops = &qcn9274_ops,
+ .ring_mask = &ath12k_hw_ring_mask_qcn9274,
+ .regs = &qcn9274_v1_regs,
+
+ .host_ce_config = ath12k_host_ce_config_qcn9274,
+ .ce_count = 16,
+ .target_ce_config = ath12k_target_ce_config_wlan_qcn9274,
+ .target_ce_count = 12,
+ .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274,
+ .svc_to_ce_map_len = 18,
+
+ .hal_params = &ath12k_hw_hal_params_qcn9274,
+
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 1,
+ .num_rxdma_dst_ring = 0,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = true,
+ .supports_suspend = false,
+ .tcl_ring_retry = true,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = false,
+
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
+ .num_tcl_banks = 48,
+ .max_tx_ring = 4,
+
+ .mhi_config = &ath12k_mhi_config_qcn9274,
+
+ .wmi_init = ath12k_wmi_init_qcn9274,
+
+ .hal_ops = &hal_qcn9274_ops,
+
+ },
+ {
+ .name = "wcn7850 hw2.0",
+ .hw_rev = ATH12K_HW_WCN7850_HW20,
+
+ .fw = {
+ .dir = "WCN7850/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 256 * 1024,
+ },
+
+ .max_radios = 1,
+ .single_pdev_only = true,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850,
+ .internal_sleep_clock = true,
+
+ .hw_ops = &wcn7850_ops,
+ .ring_mask = &ath12k_hw_ring_mask_wcn7850,
+ .regs = &wcn7850_regs,
+
+ .host_ce_config = ath12k_host_ce_config_wcn7850,
+ .ce_count = 9,
+ .target_ce_config = ath12k_target_ce_config_wlan_wcn7850,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_wcn7850,
+ .svc_to_ce_map_len = 14,
+
+ .hal_params = &ath12k_hw_hal_params_wcn7850,
+
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .num_rxdma_dst_ring = 1,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = false,
+ .supports_suspend = false,
+ .tcl_ring_retry = false,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = true,
+
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850),
+ .num_tcl_banks = 7,
+ .max_tx_ring = 3,
+
+ .mhi_config = &ath12k_mhi_config_wcn7850,
+
+ .wmi_init = ath12k_wmi_init_wcn7850,
+
+ .hal_ops = &hal_wcn7850_ops,
+ },
+ {
+ .name = "qcn9274 hw2.0",
+ .hw_rev = ATH12K_HW_QCN9274_HW20,
+ .fw = {
+ .dir = "QCN9274/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
+ .internal_sleep_clock = false,
+
+ .hw_ops = &qcn9274_ops,
+ .ring_mask = &ath12k_hw_ring_mask_qcn9274,
+ .regs = &qcn9274_v2_regs,
+
+ .host_ce_config = ath12k_host_ce_config_qcn9274,
+ .ce_count = 16,
+ .target_ce_config = ath12k_target_ce_config_wlan_qcn9274,
+ .target_ce_count = 12,
+ .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274,
+ .svc_to_ce_map_len = 18,
+
+ .hal_params = &ath12k_hw_hal_params_qcn9274,
+
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 1,
+ .num_rxdma_dst_ring = 0,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = true,
+ .supports_suspend = false,
+ .tcl_ring_retry = true,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = false,
+
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
+ .num_tcl_banks = 48,
+ .max_tx_ring = 4,
+
+ .mhi_config = &ath12k_mhi_config_qcn9274,
+
+ .wmi_init = ath12k_wmi_init_qcn9274,
+
+ .hal_ops = &hal_qcn9274_ops,
+ },
+};
+
+int ath12k_hw_init(struct ath12k_base *ab)
+{
+ const struct ath12k_hw_params *hw_params = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath12k_hw_params); i++) {
+ hw_params = &ath12k_hw_params[i];
+
+ if (hw_params->hw_rev == ab->hw_rev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath12k_hw_params)) {
+ ath12k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev);
+ return -EINVAL;
+ }
+
+ ab->hw_params = hw_params;
+
+ ath12k_info(ab, "Hardware name: %s\n", ab->hw_params->name);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
new file mode 100644
index 000000000000..e3461004188b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HW_H
+#define ATH12K_HW_H
+
+#include <linux/mhi.h>
+
+#include "wmi.h"
+#include "hal.h"
+
+/* Target configuration defines */
+
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS (16 + 1)
+
+#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+
+/* Num of peers for Single Radio mode */
+#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS */
+#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS_SBS */
+#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+
+/* Max num of stations (per radio) */
+#define TARGET_NUM_STATIONS 512
+
+#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \
+ 4 * TARGET_NUM_VDEVS + 8)
+
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_OFFLD_PEERS 4
+#define TARGET_NUM_OFFLD_REORDER_BUFFS 4
+
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_DECAP_MODE_RAW 0
+#define TARGET_DECAP_MODE_NATIVE_WIFI 1
+#define TARGET_DECAP_MODE_ETH 2
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 12
+#define TARGET_NUM_MCAST_TABLE_ELEMS 64
+#define TARGET_MCAST2UCAST_MODE 2
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (2500)
+#define TARGET_MAX_FRAG_ENTRIES 6
+#define TARGET_MAX_BCN_OFFLD 16
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 1
+#define TARGET_RX_BATCHMODE 1
+
+#define ATH12K_HW_MAX_QUEUES 4
+#define ATH12K_QUEUE_LEN 4096
+
+#define ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
+
+#define ATH12K_FW_DIR "ath12k"
+
+#define ATH12K_BOARD_MAGIC "QCA-ATH12K-BOARD"
+#define ATH12K_BOARD_API2_FILE "board-2.bin"
+#define ATH12K_DEFAULT_BOARD_FILE "board.bin"
+#define ATH12K_DEFAULT_CAL_FILE "caldata.bin"
+#define ATH12K_AMSS_FILE "amss.bin"
+#define ATH12K_M3_FILE "m3.bin"
+#define ATH12K_REGDB_FILE_NAME "regdb.bin"
+
+enum ath12k_hw_rate_cck {
+ ATH12K_HW_RATE_CCK_LP_11M = 0,
+ ATH12K_HW_RATE_CCK_LP_5_5M,
+ ATH12K_HW_RATE_CCK_LP_2M,
+ ATH12K_HW_RATE_CCK_LP_1M,
+ ATH12K_HW_RATE_CCK_SP_11M,
+ ATH12K_HW_RATE_CCK_SP_5_5M,
+ ATH12K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath12k_hw_rate_ofdm {
+ ATH12K_HW_RATE_OFDM_48M = 0,
+ ATH12K_HW_RATE_OFDM_24M,
+ ATH12K_HW_RATE_OFDM_12M,
+ ATH12K_HW_RATE_OFDM_6M,
+ ATH12K_HW_RATE_OFDM_54M,
+ ATH12K_HW_RATE_OFDM_36M,
+ ATH12K_HW_RATE_OFDM_18M,
+ ATH12K_HW_RATE_OFDM_9M,
+};
+
+enum ath12k_bus {
+ ATH12K_BUS_PCI,
+};
+
+#define ATH12K_EXT_IRQ_GRP_NUM_MAX 11
+
+struct hal_rx_desc;
+struct hal_tcl_data_cmd;
+struct htt_rx_ring_tlv_filter;
+enum hal_encrypt_type;
+
+struct ath12k_hw_ring_mask {
+ u8 tx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 reo_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 host2rxdma[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 tx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+};
+
+struct ath12k_hw_hal_params {
+ enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+ u32 wbm2sw_cc_enable;
+};
+
+struct ath12k_hw_params {
+ const char *name;
+ u16 hw_rev;
+
+ struct {
+ const char *dir;
+ size_t board_size;
+ size_t cal_offset;
+ } fw;
+
+ u8 max_radios;
+ bool single_pdev_only:1;
+ u32 qmi_service_ins_id;
+ bool internal_sleep_clock:1;
+
+ const struct ath12k_hw_ops *hw_ops;
+ const struct ath12k_hw_ring_mask *ring_mask;
+ const struct ath12k_hw_regs *regs;
+
+ const struct ce_attr *host_ce_config;
+ u32 ce_count;
+ const struct ce_pipe_config *target_ce_config;
+ u32 target_ce_count;
+ const struct service_to_pipe *svc_to_ce_map;
+ u32 svc_to_ce_map_len;
+
+ const struct ath12k_hw_hal_params *hal_params;
+
+ bool rxdma1_enable:1;
+ int num_rxmda_per_pdev;
+ int num_rxdma_dst_ring;
+ bool rx_mac_buf_ring:1;
+ bool vdev_start_delay:1;
+
+ u16 interface_modes;
+ bool supports_monitor:1;
+
+ bool idle_ps:1;
+ bool download_calib:1;
+ bool supports_suspend:1;
+ bool tcl_ring_retry:1;
+ bool reoq_lut_support:1;
+ bool supports_shadow_regs:1;
+
+ u32 hal_desc_sz;
+ u32 num_tcl_banks;
+ u32 max_tx_ring;
+
+ const struct mhi_controller_config *mhi_config;
+
+ void (*wmi_init)(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config);
+
+ const struct hal_ops *hal_ops;
+};
+
+struct ath12k_hw_ops {
+ u8 (*get_hw_mac_from_pdev_id)(int pdev_id);
+ int (*mac_id_to_pdev_id)(const struct ath12k_hw_params *hw, int mac_id);
+ int (*mac_id_to_srng_id)(const struct ath12k_hw_params *hw, int mac_id);
+ int (*rxdma_ring_sel_config)(struct ath12k_base *ab);
+ u8 (*get_ring_selector)(struct sk_buff *skb);
+ bool (*dp_srng_is_tx_comp_ring)(int ring_num);
+};
+
+static inline
+int ath12k_hw_get_mac_from_pdev_id(const struct ath12k_hw_params *hw,
+ int pdev_idx)
+{
+ if (hw->hw_ops->get_hw_mac_from_pdev_id)
+ return hw->hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
+
+ return 0;
+}
+
+static inline int ath12k_hw_mac_id_to_pdev_id(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_pdev_id)
+ return hw->hw_ops->mac_id_to_pdev_id(hw, mac_id);
+
+ return 0;
+}
+
+static inline int ath12k_hw_mac_id_to_srng_id(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_srng_id)
+ return hw->hw_ops->mac_id_to_srng_id(hw, mac_id);
+
+ return 0;
+}
+
+struct ath12k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[];
+};
+
+enum ath12k_bd_ie_board_type {
+ ATH12K_BD_IE_BOARD_NAME = 0,
+ ATH12K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath12k_bd_ie_type {
+ /* contains sub IEs of enum ath12k_bd_ie_board_type */
+ ATH12K_BD_IE_BOARD = 0,
+ ATH12K_BD_IE_BOARD_EXT = 1,
+};
+
+struct ath12k_hw_regs {
+ u32 hal_tcl1_ring_id;
+ u32 hal_tcl1_ring_misc;
+ u32 hal_tcl1_ring_tp_addr_lsb;
+ u32 hal_tcl1_ring_tp_addr_msb;
+ u32 hal_tcl1_ring_consumer_int_setup_ix0;
+ u32 hal_tcl1_ring_consumer_int_setup_ix1;
+ u32 hal_tcl1_ring_msi1_base_lsb;
+ u32 hal_tcl1_ring_msi1_base_msb;
+ u32 hal_tcl1_ring_msi1_data;
+ u32 hal_tcl_ring_base_lsb;
+
+ u32 hal_tcl_status_ring_base_lsb;
+
+ u32 hal_wbm_idle_ring_base_lsb;
+ u32 hal_wbm_idle_ring_misc_addr;
+ u32 hal_wbm_r0_idle_list_cntl_addr;
+ u32 hal_wbm_r0_idle_list_size_addr;
+ u32 hal_wbm_scattered_ring_base_lsb;
+ u32 hal_wbm_scattered_ring_base_msb;
+ u32 hal_wbm_scattered_desc_head_info_ix0;
+ u32 hal_wbm_scattered_desc_head_info_ix1;
+ u32 hal_wbm_scattered_desc_tail_info_ix0;
+ u32 hal_wbm_scattered_desc_tail_info_ix1;
+ u32 hal_wbm_scattered_desc_ptr_hp_addr;
+
+ u32 hal_wbm_sw_release_ring_base_lsb;
+ u32 hal_wbm_sw1_release_ring_base_lsb;
+ u32 hal_wbm0_release_ring_base_lsb;
+ u32 hal_wbm1_release_ring_base_lsb;
+
+ u32 pcie_qserdes_sysclk_en_sel;
+ u32 pcie_pcs_osc_dtct_config_base;
+
+ u32 hal_ppe_rel_ring_base;
+
+ u32 hal_reo2_ring_base;
+ u32 hal_reo1_misc_ctrl_addr;
+ u32 hal_reo1_sw_cookie_cfg0;
+ u32 hal_reo1_sw_cookie_cfg1;
+ u32 hal_reo1_qdesc_lut_base0;
+ u32 hal_reo1_qdesc_lut_base1;
+ u32 hal_reo1_ring_base_lsb;
+ u32 hal_reo1_ring_base_msb;
+ u32 hal_reo1_ring_id;
+ u32 hal_reo1_ring_misc;
+ u32 hal_reo1_ring_hp_addr_lsb;
+ u32 hal_reo1_ring_hp_addr_msb;
+ u32 hal_reo1_ring_producer_int_setup;
+ u32 hal_reo1_ring_msi1_base_lsb;
+ u32 hal_reo1_ring_msi1_base_msb;
+ u32 hal_reo1_ring_msi1_data;
+ u32 hal_reo1_aging_thres_ix0;
+ u32 hal_reo1_aging_thres_ix1;
+ u32 hal_reo1_aging_thres_ix2;
+ u32 hal_reo1_aging_thres_ix3;
+
+ u32 hal_reo2_sw0_ring_base;
+
+ u32 hal_sw2reo_ring_base;
+ u32 hal_sw2reo1_ring_base;
+
+ u32 hal_reo_cmd_ring_base;
+
+ u32 hal_reo_status_ring_base;
+};
+
+int ath12k_hw_init(struct ath12k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
new file mode 100644
index 000000000000..bf7e5b6977b2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -0,0 +1,7038 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include "mac.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "peer.h"
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN6G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_6GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath12k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath12k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+};
+
+static const struct ieee80211_channel ath12k_6ghz_channels[] = {
+ CHAN6G(1, 5955, 0),
+ CHAN6G(5, 5975, 0),
+ CHAN6G(9, 5995, 0),
+ CHAN6G(13, 6015, 0),
+ CHAN6G(17, 6035, 0),
+ CHAN6G(21, 6055, 0),
+ CHAN6G(25, 6075, 0),
+ CHAN6G(29, 6095, 0),
+ CHAN6G(33, 6115, 0),
+ CHAN6G(37, 6135, 0),
+ CHAN6G(41, 6155, 0),
+ CHAN6G(45, 6175, 0),
+ CHAN6G(49, 6195, 0),
+ CHAN6G(53, 6215, 0),
+ CHAN6G(57, 6235, 0),
+ CHAN6G(61, 6255, 0),
+ CHAN6G(65, 6275, 0),
+ CHAN6G(69, 6295, 0),
+ CHAN6G(73, 6315, 0),
+ CHAN6G(77, 6335, 0),
+ CHAN6G(81, 6355, 0),
+ CHAN6G(85, 6375, 0),
+ CHAN6G(89, 6395, 0),
+ CHAN6G(93, 6415, 0),
+ CHAN6G(97, 6435, 0),
+ CHAN6G(101, 6455, 0),
+ CHAN6G(105, 6475, 0),
+ CHAN6G(109, 6495, 0),
+ CHAN6G(113, 6515, 0),
+ CHAN6G(117, 6535, 0),
+ CHAN6G(121, 6555, 0),
+ CHAN6G(125, 6575, 0),
+ CHAN6G(129, 6595, 0),
+ CHAN6G(133, 6615, 0),
+ CHAN6G(137, 6635, 0),
+ CHAN6G(141, 6655, 0),
+ CHAN6G(145, 6675, 0),
+ CHAN6G(149, 6695, 0),
+ CHAN6G(153, 6715, 0),
+ CHAN6G(157, 6735, 0),
+ CHAN6G(161, 6755, 0),
+ CHAN6G(165, 6775, 0),
+ CHAN6G(169, 6795, 0),
+ CHAN6G(173, 6815, 0),
+ CHAN6G(177, 6835, 0),
+ CHAN6G(181, 6855, 0),
+ CHAN6G(185, 6875, 0),
+ CHAN6G(189, 6895, 0),
+ CHAN6G(193, 6915, 0),
+ CHAN6G(197, 6935, 0),
+ CHAN6G(201, 6955, 0),
+ CHAN6G(205, 6975, 0),
+ CHAN6G(209, 6995, 0),
+ CHAN6G(213, 7015, 0),
+ CHAN6G(217, 7035, 0),
+ CHAN6G(221, 7055, 0),
+ CHAN6G(225, 7075, 0),
+ CHAN6G(229, 7095, 0),
+ CHAN6G(233, 7115, 0),
+};
+
+static struct ieee80211_rate ath12k_legacy_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH12K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH12K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH12K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH12K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH12K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH12K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH12K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH12K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH12K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH12K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH12K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH12K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH12K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH12K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH12K_HW_RATE_OFDM_54M },
+};
+
+static const int
+ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = {
+ [NL80211_BAND_2GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
+ },
+ [NL80211_BAND_5GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+ [NL80211_BAND_6GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+
+};
+
+const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = {
+ .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+ .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
+ .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
+ .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
+ .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_FP_CTRL_FILTER_FLASG3
+};
+
+#define ATH12K_MAC_FIRST_OFDM_RATE_IDX 4
+#define ath12k_g_rates ath12k_legacy_rates
+#define ath12k_g_rates_size (ARRAY_SIZE(ath12k_legacy_rates))
+#define ath12k_a_rates (ath12k_legacy_rates + 4)
+#define ath12k_a_rates_size (ARRAY_SIZE(ath12k_legacy_rates) - 4)
+
+#define ATH12K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */
+
+static const u32 ath12k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_11AX_HE20:
+ return "11ax-he20";
+ case MODE_11AX_HE40:
+ return "11ax-he40";
+ case MODE_11AX_HE80:
+ return "11ax-he80";
+ case MODE_11AX_HE80_80:
+ return "11ax-he80+80";
+ case MODE_11AX_HE160:
+ return "11ax-he160";
+ case MODE_11AX_HE20_2G:
+ return "11ax-he20-2g";
+ case MODE_11AX_HE40_2G:
+ return "11ax-he40-2g";
+ case MODE_11AX_HE80_2G:
+ return "11ax-he80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<unknown>";
+}
+
+enum rate_info_bw
+ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
+{
+ u8 ret = RATE_INFO_BW_20;
+
+ switch (bw) {
+ case ATH12K_BW_20:
+ ret = RATE_INFO_BW_20;
+ break;
+ case ATH12K_BW_40:
+ ret = RATE_INFO_BW_40;
+ break;
+ case ATH12K_BW_80:
+ ret = RATE_INFO_BW_80;
+ break;
+ case ATH12K_BW_160:
+ ret = RATE_INFO_BW_160;
+ break;
+ }
+
+ return ret;
+}
+
+enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw)
+{
+ switch (bw) {
+ case RATE_INFO_BW_20:
+ return ATH12K_BW_20;
+ case RATE_INFO_BW_40:
+ return ATH12K_BW_40;
+ case RATE_INFO_BW_80:
+ return ATH12K_BW_80;
+ case RATE_INFO_BW_160:
+ return ATH12K_BW_160;
+ default:
+ return ATH12K_BW_20;
+ }
+}
+
+int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate)
+{
+ /* As default, it is OFDM rates */
+ int i = ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+ int max_rates_idx = ath12k_g_rates_size;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK) {
+ hw_rc &= ~ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK;
+ i = 0;
+ max_rates_idx = ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+ }
+
+ while (i < max_rates_idx) {
+ if (hw_rc == ath12k_legacy_rates[i].hw_value) {
+ *rateidx = i;
+ *rate = ath12k_legacy_rates[i].bitrate;
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static u32
+ath12k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath12k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u8 ath12k_parse_mpdudensity(u8 mpdudensity)
+{
+/* From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ * 1 microsecond
+ */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath12k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static bool ath12k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (ath12k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+static u8 ath12k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath12k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+static void ath12k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif_iter *arvif_iter = data;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
+{
+ struct ath12k_vif_iter arvif_iter = {};
+ u32 flags;
+
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath12k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif) {
+ ath12k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
+ return NULL;
+ }
+
+ return arvif_iter.arvif;
+}
+
+struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath12k_pdev *pdev;
+ struct ath12k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id)
+{
+ int i;
+ struct ath12k_pdev *pdev;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ if (pdev->ar->allocated_vdev_map & (1LL << vdev_id))
+ return pdev->ar;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
+{
+ int i;
+ struct ath12k_pdev *pdev;
+
+ if (ab->hw_params->single_pdev_only) {
+ pdev = rcu_dereference(ab->pdevs_active[0]);
+ return pdev ? pdev->ar : NULL;
+ }
+
+ if (WARN_ON(pdev_id > ab->num_radios))
+ return NULL;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+
+ if (pdev && pdev->pdev_id == pdev_id)
+ return (pdev->ar ? pdev->ar : NULL);
+ }
+
+ return NULL;
+}
+
+static void ath12k_pdev_caps_update(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+
+ ar->max_tx_power = ab->target_caps.hw_max_tx_power;
+
+ /* FIXME: Set min_tx_power to ab->target_caps.hw_min_tx_power.
+ * But since the received value in svcrdy is same as hw_max_tx_power,
+ * we can set ar->min_tx_power to 0 currently until
+ * this is fixed in firmware
+ */
+ ar->min_tx_power = 0;
+
+ ar->txpower_limit_2g = ar->max_tx_power;
+ ar->txpower_limit_5g = ar->max_tx_power;
+ ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
+}
+
+static int ath12k_mac_txpower_recalc(struct ath12k *ar)
+{
+ struct ath12k_pdev *pdev = ar->pdev;
+ struct ath12k_vif *arvif;
+ int ret, txpower = -1;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->txpower <= 0)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ /* txpwr is set as 2 units per dBm in FW*/
+ txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
+ ar->max_tx_power) * 2;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n",
+ txpower / 2);
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ ar->txpower_limit_2g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
+ ret = ath12k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_2g = txpower;
+ }
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+ ar->txpower_limit_5g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
+ ret = ath12k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_5g = txpower;
+ }
+
+ return 0;
+
+fail:
+ ath12k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
+ txpower / 2, param, ret);
+ return ret;
+}
+
+static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ u32 vdev_param, rts_cts;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
+
+ /* Enable RTS/CTS protection for sw retries (when legacy stations
+ * are in BSS) or by default only for second rate series.
+ * TODO: Check if we need to enable CTS 2 Self in any case
+ */
+ rts_cts = WMI_USE_RTS_CTS;
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4;
+ else
+ rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4;
+
+ /* Need not send duplicate param value to firmware */
+ if (arvif->rtscts_prot_mode == rts_cts)
+ return 0;
+
+ arvif->rtscts_prot_mode = rts_cts;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rts_cts);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return ret;
+}
+
+static int ath12k_mac_set_kickout(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ ATH12K_KICKOUT_THRESHOLD,
+ ar->pdev->pdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH12K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH12K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH12K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath12k_mac_peer_cleanup_all(struct ath12k *ar)
+{
+ struct ath12k_peer *peer, *tmp;
+ struct ath12k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH12K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id)
+{
+ int ret;
+
+ ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n",
+ vdev_id);
+ return 0;
+}
+
+static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *channel;
+ struct wmi_vdev_start_req_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ channel = chandef->chan;
+ arg.vdev_id = vdev_id;
+ arg.freq = channel->center_freq;
+ arg.band_center_freq1 = chandef->center_freq1;
+ arg.band_center_freq2 = chandef->center_freq2;
+ arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width];
+ arg.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.min_power = 0;
+ arg.max_power = channel->max_power;
+ arg.max_reg_power = channel->max_reg_power;
+ arg.max_antenna_gain = channel->max_antenna_gain;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath12k_wmi_vdev_start(ar, &arg, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ goto vdev_stop;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n",
+ vdev_id);
+ return 0;
+
+vdev_stop:
+ ret = ath12k_wmi_vdev_stop(ar, vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
+ vdev_id, ret);
+ return ret;
+}
+
+static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath12k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ret = ath12k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+ return ret;
+}
+
+static int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
+{
+ struct ath12k_pdev *pdev = ar->pdev;
+ struct ath12k_wmi_vdev_create_arg arg = {};
+ int bit, ret;
+ u8 tmp_addr[6];
+ u16 nss;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->monitor_vdev_created)
+ return 0;
+
+ if (ar->ab->free_vdev_map == 0) {
+ ath12k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->ab->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ arg.if_id = ar->monitor_vdev_id;
+ arg.type = WMI_VDEV_TYPE_MONITOR;
+ arg.subtype = WMI_VDEV_SUBTYPE_NONE;
+ arg.pdev_id = pdev->pdev_id;
+ arg.if_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+
+ ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ ar->monitor_vdev_id = -1;
+ return ret;
+ }
+
+ nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ return ret;
+ }
+
+ ret = ath12k_mac_txpower_recalc(ar);
+ if (ret)
+ return ret;
+
+ ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
+ ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->num_created_vdevs++;
+ ar->monitor_vdev_created = true;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ar->monitor_vdev_created)
+ return 0;
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath12k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH12K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ } else {
+ ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+ ar->num_created_vdevs--;
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ }
+
+ return ret;
+}
+
+static void
+ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath12k_mac_monitor_start(struct ath12k *ar)
+{
+ struct cfg80211_chan_def *chandef = NULL;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->monitor_started)
+ return 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath12k_mac_get_any_chandef_iter,
+ &chandef);
+ if (!chandef)
+ return 0;
+
+ ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
+ ath12k_mac_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ar->monitor_started = true;
+ ar->num_started_vdevs++;
+ ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret);
+
+ return ret;
+}
+
+static int ath12k_mac_monitor_stop(struct ath12k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ar->monitor_started)
+ return 0;
+
+ ret = ath12k_mac_monitor_vdev_stop(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ar->monitor_started = false;
+ ar->num_started_vdevs--;
+ ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, true);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor stopped ret %d\n", ret);
+ return ret;
+}
+
+static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath12k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR;
+ if (ar->monitor_conf_enabled) {
+ if (ar->monitor_vdev_created)
+ goto exit;
+ ret = ath12k_mac_monitor_vdev_create(ar);
+ if (ret)
+ goto exit;
+ ret = ath12k_mac_monitor_start(ar);
+ if (ret)
+ goto err_mon_del;
+ } else {
+ if (!ar->monitor_vdev_created)
+ goto exit;
+ ret = ath12k_mac_monitor_stop(ar);
+ if (ret)
+ goto exit;
+ ath12k_mac_monitor_vdev_delete(ar);
+ }
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+
+err_mon_del:
+ ath12k_mac_monitor_vdev_delete(ar);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ struct ieee80211_mgmt *mgmt;
+ u8 *ies;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (!bcn) {
+ ath12k_warn(ab, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
+ ies += sizeof(mgmt->u.beacon);
+
+ if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
+ arvif->rsnie_present = true;
+
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies, (skb_tail_pointer(bcn) - ies)))
+ arvif->wpaie_present = true;
+
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+
+ kfree_skb(bcn);
+
+ if (ret)
+ ath12k_warn(ab, "failed to submit beacon template command: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void ath12k_control_beaconing(struct ath12k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ return;
+ }
+
+ /* Install the beacon template to the FW */
+ ret = ath12k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
+ ret);
+ return;
+ }
+
+ arvif->aid = 0;
+
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ u32 aid;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->cfg.aid;
+ else
+ aid = sta->aid;
+
+ ether_addr_copy(arg->peer_mac, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_associd = aid;
+ arg->auth_flag = true;
+ /* TODO: STA WAR in ath10k for listen interval required? */
+ arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_nss = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
+ struct cfg80211_bss *bss;
+ struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv;
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+
+ if (arvif->rsnie_present || arvif->wpaie_present) {
+ arg->need_ptk_4_way = true;
+ if (arvif->wpaie_present)
+ arg->need_gtk_2_way = true;
+ } else if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "%s: rsn ie found\n", __func__);
+ arg->need_ptk_4_way = true;
+ }
+
+ if (wpaie) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "%s: wpa ie found\n", __func__);
+ arg->need_gtk_2_way = true;
+ }
+
+ if (sta->mfp) {
+ /* TODO: Need to check if FW supports PMF? */
+ arg->is_pmf_enabled = true;
+ }
+
+ /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
+}
+
+static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ enum nl80211_band band;
+ u32 ratemask;
+ u8 rate;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->deflink.supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rate = ath12k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
+ rateset->num_rates++;
+ }
+}
+
+static bool
+ath12k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath12k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ int i, n;
+ u8 max_nss;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+
+ if (ath12k_peer_assoc_h_ht_masked(ht_mcs_mask))
+ return;
+
+ arg->ht_flag = true;
+
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath12k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->ldpc_flag = true;
+
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->bw_40 = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
+ }
+
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40))
+ arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG;
+
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
+ arg->peer_ht_rates.rates[n++] = i;
+ }
+
+ /* This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->peer_mac,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_nss);
+}
+
+static int ath12k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u16
+ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath12k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u16 *vht_mcs_mask;
+ u16 tx_mcs_map;
+ u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath12k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->vht_flag = true;
+
+ /* TODO: similar flags required? */
+ arg->vht_capable = true;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->vht_ng_flag = true;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller.
+ */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+
+ tx_mcs_map = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+ arg->tx_mcs_set = ath12k_peer_assoc_h_vht_limit(tx_mcs_map, vht_mcs_mask);
+
+ /* In QCN9274 platform, VHT MCS rate 10 and 11 is enabled by default.
+ * VHT MCS rate 10 and 11 is not supported in 11ac standard.
+ * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
+ */
+ arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
+ arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
+
+ if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
+ IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ /* TODO: Check */
+ arg->tx_max_mcs_nss = 0xFF;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+
+ /* TODO: rxnss_override */
+}
+
+static void ath12k_peer_assoc_h_he(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ int i;
+ u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
+ u16 mcs_160_map, mcs_80_map;
+ bool support_160;
+ u16 v;
+
+ if (!he_cap->has_he)
+ return;
+
+ arg->he_flag = true;
+
+ support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G);
+
+ /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */
+ mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+
+ if (support_160) {
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
+
+ if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ rx_mcs_160 = i + 1;
+ break;
+ }
+ }
+ }
+
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
+
+ if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ rx_mcs_80 = i + 1;
+ break;
+ }
+ }
+
+ if (support_160)
+ max_nss = min(rx_mcs_80, rx_mcs_160);
+ else
+ max_nss = rx_mcs_80;
+
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+
+ memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
+ sizeof(arg->peer_he_cap_macinfo));
+ memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
+ sizeof(arg->peer_he_cap_phyinfo));
+ arg->peer_he_ops = vif->bss_conf.he_oper.params;
+
+ /* the top most byte is used to indicate BSS color info */
+ arg->peer_he_ops &= 0xffffff;
+
+ /* As per section 26.6.1 IEEE Std 802.11ax‐2022, if the Max AMPDU
+ * Exponent Extension in HE cap is zero, use the arg->peer_max_mpdu
+ * as calculated while parsing VHT caps(if VHT caps is present)
+ * or HT caps (if VHT caps is not present).
+ *
+ * For non-zero value of Max AMPDU Exponent Extension in HE MAC caps,
+ * if a HE STA sends VHT cap and HE cap IE in assoc request then, use
+ * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length.
+ * If a HE STA that does not send VHT cap, but HE and HT cap in assoc
+ * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
+ * length.
+ */
+ ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK;
+
+ if (ampdu_factor) {
+ if (sta->deflink.vht_cap.vht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ else if (sta->deflink.ht_cap.ht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ }
+
+ if (he_cap->he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ int bit = 7;
+ int nss, ru;
+
+ arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK;
+ arg->peer_ppet.ru_bit_mask =
+ (he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+
+ for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u32 val = 0;
+ int i;
+
+ if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ for (i = 0; i < 6; i++) {
+ val >>= 1;
+ val |= ((he_cap->ppe_thres[bit / 8] >>
+ (bit % 8)) & 0x1) << 5;
+ bit++;
+ }
+ arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |=
+ val << (ru * 6);
+ }
+ }
+ }
+
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES)
+ arg->twt_responder = true;
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
+ arg->twt_requester = true;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ arg->peer_he_mcs_count++;
+ }
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ arg->peer_he_mcs_count++;
+ fallthrough;
+
+ default:
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ arg->peer_he_mcs_count++;
+ break;
+ }
+}
+
+static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ switch (smps) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ arg->static_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ arg->dynamic_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ arg->spatial_mux_flag = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->apsd_flag = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (sta->wme) {
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
+ sta->addr, arg->qos_flag);
+}
+
+static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_wmi_ap_ps_arg arg;
+ u32 max_sp;
+ u32 uapsd;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arg.vdev_id = arvif->vdev_id;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ uapsd = 0;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ max_sp = 0;
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ arg.param = WMI_AP_PS_PEER_PARAM_UAPSD;
+ arg.value = uapsd;
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ if (ret)
+ goto err;
+
+ arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
+ arg.value = max_sp;
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ if (ret)
+ goto err;
+
+ /* TODO: revisit during testing */
+ arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
+ arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ if (ret)
+ goto err;
+
+ arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
+ arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath12k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
+ arg.param, arvif->vdev_id, ret);
+ return ret;
+}
+
+static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+ return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+ ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->deflink.vht_cap.cap &
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
+static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return MODE_11AX_HE160;
+ else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return MODE_11AX_HE80_80;
+ /* not sure if this is a valid case? */
+ return MODE_11AX_HE160;
+ }
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AX_HE80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AX_HE40;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AX_HE20;
+
+ return MODE_UNKNOWN;
+}
+
+static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ if (sta->deflink.he_cap.has_he) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AX_HE80_2G;
+ else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AX_HE40_2G;
+ else
+ phymode = MODE_11AX_HE20_2G;
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath12k_mac_sta_has_ofdm_only(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ case NL80211_BAND_6GHZ:
+ /* Check HE first */
+ if (sta->deflink.he_cap.has_he) {
+ phymode = ath12k_mac_get_phymode_he(ar, sta);
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ phymode = ath12k_mac_get_phymode_vht(ar, sta);
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath12k_mac_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static void ath12k_peer_assoc_prepare(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg,
+ bool reassoc)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ memset(arg, 0, sizeof(*arg));
+
+ reinit_completion(&ar->peer_assoc_done);
+
+ arg->peer_new_assoc = !reassoc;
+ ath12k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_crypto(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_vht(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_phymode(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_smps(sta, arg);
+
+ /* TODO: amsdu_disable req? */
+}
+
+static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath12k_smps_map))
+ return -EINVAL;
+
+ return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE,
+ ath12k_smps_map[smps]);
+}
+
+static void ath12k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ieee80211_sta *ap_sta;
+ struct ath12k_peer *peer;
+ bool is_auth = false;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
+
+ rcu_read_unlock();
+
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ return;
+ }
+
+ ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
+ &ap_sta->deflink.ht_cap);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = vif->cfg.aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
+ if (peer && peer->is_authorized)
+ is_auth = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ /* Authorize BSS Peer */
+ if (is_auth) {
+ ret = ath12k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath12k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
+ }
+
+ ret = ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &bss_conf->he_obss_pd);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static void ath12k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+
+ /* TODO: cancel connection_loss_work */
+}
+
+static u32 ath12k_mac_get_rate_hw_value(int bitrate)
+{
+ u32 preamble;
+ u16 hw_value;
+ int rate;
+ size_t i;
+
+ if (ath12k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ for (i = 0; i < ARRAY_SIZE(ath12k_legacy_rates); i++) {
+ if (ath12k_legacy_rates[i].bitrate != bitrate)
+ continue;
+
+ hw_value = ath12k_legacy_rates[i].hw_value;
+ rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ return rate;
+ }
+
+ return -EINVAL;
+}
+
+static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath12k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+
+ vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
+}
+
+static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath12k *ar = arvif->ar;
+ struct sk_buff *tmpl;
+ int ret;
+ u32 interval;
+ bool unsol_bcast_probe_resp_enabled = false;
+
+ if (info->fils_discovery.max_interval) {
+ interval = info->fils_discovery.max_interval;
+
+ tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
+ if (tmpl)
+ ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
+ tmpl);
+ } else if (info->unsol_bcast_probe_resp_interval) {
+ unsol_bcast_probe_resp_enabled = 1;
+ interval = info->unsol_bcast_probe_resp_interval;
+
+ tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
+ arvif->vif);
+ if (tmpl)
+ ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
+ tmpl);
+ } else { /* Disable */
+ return ath12k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false);
+ }
+
+ if (!tmpl) {
+ ath12k_warn(ar->ab,
+ "mac vdev %i failed to retrieve %s template\n",
+ arvif->vdev_id, (unsol_bcast_probe_resp_enabled ?
+ "unsolicited broadcast probe response" :
+ "FILS discovery"));
+ return -EPERM;
+ }
+ kfree_skb(tmpl);
+
+ if (!ret)
+ ret = ath12k_wmi_fils_discovery(ar, arvif->vdev_id, interval,
+ unsol_bcast_probe_resp_enabled);
+
+ return ret;
+}
+
+static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ u32 param_id, param_value;
+ enum nl80211_band band;
+ u32 vdev_param;
+ int mcast_rate;
+ u32 preamble;
+ u16 hw_value;
+ u16 bitrate;
+ int ret;
+ u8 rateidx;
+ u32 rate;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+
+ param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->beacon_interval);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Beacon interval: %d set for VDEV: %d\n",
+ arvif->beacon_interval, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
+ param_value = WMI_BEACON_STAGGERED_MODE;
+ ret = ath12k_wmi_pdev_set_param(ar, param_id,
+ param_value, ar->pdev->pdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Set staggered beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+
+ ret = ath12k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to update bcn template: %d\n",
+ ret);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ param_id = WMI_VDEV_PARAM_DTIM_PERIOD;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->dtim_period);
+
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
+ arvif->vdev_id, ret);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "DTIM period: %d set for VDEV: %d\n",
+ arvif->dtim_period, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ ath12k_control_beaconing(arvif, info);
+
+ if (arvif->is_up && vif->bss_conf.he_support &&
+ vif->bss_conf.he_oper.params) {
+ /* TODO: Extend to support 1024 BA Bitmap size */
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+
+ param_id = WMI_VDEV_PARAM_HEOPS_0_31;
+ param_value = vif->bss_conf.he_oper.params;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "he oper param: %x set for VDEV: %d\n",
+ param_value, arvif->vdev_id);
+
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n",
+ param_value, arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ u32 cts_prot;
+
+ cts_prot = !!(info->use_cts_prot);
+ param_id = WMI_VDEV_PARAM_PROTECTION_MODE;
+
+ if (arvif->is_started) {
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, cts_prot);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
+ cts_prot, arvif->vdev_id);
+ } else {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ u32 slottime;
+
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ param_id = WMI_VDEV_PARAM_SLOT_TIME;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, slottime);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Set slottime: %d for VDEV: %d\n",
+ slottime, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u32 preamble;
+
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ param_id = WMI_VDEV_PARAM_PREAMBLE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, preamble);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Set preamble: %d for VDEV: %d\n",
+ preamble, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc)
+ ath12k_bss_assoc(hw, vif, info);
+ else
+ ath12k_bss_disassoc(hw, vif);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ath12k_mac_txpower_recalc(ar);
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath12k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+ rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath12k_legacy_rates[rateidx].bitrate;
+ hw_value = ath12k_legacy_rates[rateidx].hw_value;
+
+ if (ath12k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath12k_mac_vif_chan(arvif->vif, &def))
+ ath12k_recalculate_mgmt_rate(ar, vif, &def);
+
+ if (changed & BSS_CHANGED_TWT) {
+ if (info->twt_requester || info->twt_responder)
+ ath12k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
+ else
+ ath12k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &info->he_obss_pd);
+
+ if (changed & BSS_CHANGED_HE_BSS_COLOR) {
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = ath12k_wmi_obss_color_cfg_cmd(ar,
+ arvif->vdev_id,
+ info->he_bss_color.color,
+ ATH12K_BSS_COLOR_AP_PERIODS,
+ info->he_bss_color.enabled);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ ret = ath12k_wmi_send_bss_color_change_enable_cmd(ar,
+ arvif->vdev_id,
+ 1);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ ret = ath12k_wmi_obss_color_cfg_cmd(ar,
+ arvif->vdev_id,
+ 0,
+ ATH12K_BSS_COLOR_STA_PERIODS,
+ 1);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_FILS_DISCOVERY ||
+ changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
+ ath12k_mac_fils_discovery(arvif, info);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+void __ath12k_mac_scan_finish(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ break;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = (ar->scan.state ==
+ ATH12K_SCAN_ABORTING),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ } else if (ar->scan.roc_notify) {
+ ieee80211_remain_on_channel_expired(ar->hw);
+ }
+ fallthrough;
+ case ATH12K_SCAN_STARTING:
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ cancel_delayed_work(&ar->scan.timeout);
+ complete(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath12k_mac_scan_finish(struct ath12k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath12k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath12k_scan_stop(struct ath12k *ar)
+{
+ struct ath12k_wmi_scan_cancel_arg arg = {
+ .req_type = WLAN_SCAN_CANCEL_SINGLE,
+ .scan_id = ATH12K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* TODO: Fill other STOP Params */
+ arg.pdev_id = ar->pdev->pdev_id;
+
+ ret = ath12k_wmi_send_scan_stop_cmd(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ar->ab,
+ "failed to receive scan abort comple: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH12K_SCAN_IDLE)
+ __ath12k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath12k_scan_abort(struct ath12k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH12K_SCAN_STARTING:
+ case ATH12K_SCAN_ABORTING:
+ ath12k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_RUNNING:
+ ar->scan.state = ATH12K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath12k_scan_stop(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath12k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath12k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath12k_start_scan(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath12k_wmi_send_scan_start_cmd(ar, arg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
+ if (ret == 0) {
+ ret = ath12k_scan_stop(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH12K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct ath12k_wmi_scan_req_arg arg = {};
+ int ret;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH12K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH12K_SCAN_STARTING:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ ath12k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH12K_SCAN_ID;
+
+ if (req->ie_len) {
+ arg.extraie.len = req->ie_len;
+ arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL);
+ memcpy(arg.extraie.ptr, req->ie, req->ie_len);
+ }
+
+ if (req->n_ssids) {
+ arg.num_ssids = req->n_ssids;
+ for (i = 0; i < arg.num_ssids; i++)
+ arg.ssid[i] = req->ssids[i];
+ } else {
+ arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ }
+
+ if (req->n_channels) {
+ arg.num_chan = req->n_channels;
+ for (i = 0; i < arg.num_chan; i++)
+ arg.chan_list[i] = req->channels[i]->center_freq;
+ }
+
+ ret = ath12k_start_scan(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ /* Add a margin to account for event/command processing */
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(arg.max_scan_time +
+ ATH12K_MAC_SCAN_TIMEOUT_MSECS));
+
+exit:
+ if (req->ie_len)
+ kfree(arg.extraie.ptr);
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath12k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static int ath12k_install_key(struct ath12k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ int ret;
+ struct ath12k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .key_flags = flags,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 0;
+
+ if (cmd == DISABLE_KEY) {
+ /* TODO: Check if FW expects value other than NONE for del */
+ /* arg.key_cipher = WMI_CIPHER_NONE; */
+ arg.key_len = 0;
+ arg.key_data = NULL;
+ goto install;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ /* TODO: Re-check if flag is valid */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
+ break;
+ default:
+ ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+ if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+
+install:
+ ret = ath12k_wmi_vdev_install_key(arvif->ar, &arg);
+
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
+ return -ETIMEDOUT;
+
+ if (ether_addr_equal(macaddr, arvif->vif->addr))
+ arvif->key_cipher = key->cipher;
+
+ return ar->install_key_status ? -EINVAL : 0;
+}
+
+static int ath12k_clear_peer_keys(struct ath12k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (!peer->keys[i])
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath12k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath12k_warn(ab, "failed to remove peer key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ab->base_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+ return first_errno;
+}
+
+static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_peer *peer;
+ struct ath12k_sta *arsta;
+ const u8 *peer_addr;
+ int ret = 0;
+ u32 flags = 0;
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now.
+ */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath12k_warn(ab, "cannot install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable
+ * anymore
+ */
+ goto exit;
+ }
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+
+ ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key);
+ if (ret) {
+ ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret);
+ goto exit;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY) {
+ peer->keys[key->keyidx] = key;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ peer->ucast_keyidx = key->keyidx;
+ peer->sec_type = ath12k_dp_tx_get_encrypt_type(key->cipher);
+ } else {
+ peer->mcast_keyidx = key->keyidx;
+ peer->sec_type_grp = ath12k_dp_tx_get_encrypt_type(key->cipher);
+ }
+ } else if (peer && cmd == DISABLE_KEY) {
+ peer->keys[key->keyidx] = NULL;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ peer->ucast_keyidx = 0;
+ else
+ peer->mcast_keyidx = 0;
+ } else if (!peer)
+ /* impossible unless FW goes crazy */
+ ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr);
+
+ if (sta) {
+ arsta = (struct ath12k_sta *)sta->drv_priv;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (cmd == SET_KEY)
+ arsta->pn_type = HAL_PN_TYPE_WPA;
+ else
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ default:
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int
+ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath12k *ar = arvif->ar;
+ u8 vht_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ nss = i + 1;
+ vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1,
+ WMI_RATE_PREAMBLE_VHT);
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to update STA %pM Fixed Rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int ath12k_station_assoc(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_wmi_peer_assoc_arg peer_arg;
+ int ret;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ struct cfg80211_bitrate_mask *mask;
+ u8 num_vht_rates;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return -EPERM;
+
+ band = def.chan->band;
+ mask = &arvif->bitrate_mask;
+
+ ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return -ETIMEDOUT;
+ }
+
+ num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+
+ /* If single VHT rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT. This is now enabled by a peer specific
+ * fixed param.
+ * Note that all other rates and NSS will be disabled for this peer.
+ */
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (reassoc)
+ return 0;
+
+ ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->deflink.ht_cap);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath12k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_station_disassoc(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath12k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ ret = ath12k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void ath12k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath12k *ar;
+ struct ath12k_vif *arvif;
+ struct ath12k_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 changed, bw, nss, smps;
+ int err, num_vht_rates;
+ const struct cfg80211_bitrate_mask *mask;
+ struct ath12k_wmi_peer_assoc_arg peer_arg;
+
+ arsta = container_of(wk, struct ath12k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask),
+ ath12k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+ if (err)
+ ath12k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE, smps);
+ if (err)
+ ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ mask = &arvif->bitrate_mask;
+ num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ /* Peer_assoc_prepare will reject vht rates in
+ * bitrate_mask if its not available in range format and
+ * sets vht tx_rateset as unsupported. So multiple VHT MCS
+ * setting(eg. MCS 4,5,6) per peer is not supported here.
+ * But, Single rate in VHT mask can be set as per-peer
+ * fixed rate. But even if any HT rates are configured in
+ * the bitrate mask, device will not switch to those rates
+ * when per-peer Fixed rate is set.
+ * TODO: Check RATEMASK_CMDID to support auto rates selection
+ * across HT/VHT and for multiple VHT MCS support.
+ */
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ } else {
+ /* If the peer is non-VHT or no fixed VHT rate
+ * is provided in the new bitrate mask we set the
+ * other rates using peer_assoc command.
+ */
+ ath12k_peer_assoc_prepare(ar, arvif->vif, sta,
+ &peer_arg, true);
+
+ err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (err)
+ ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, err);
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
+ ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return;
+
+ ar->num_stations--;
+}
+
+static int ath12k_mac_station_add(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+ struct ath12k_wmi_peer_create_arg peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath12k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_peer;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_peer;
+ }
+ }
+
+ ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_peer;
+ }
+
+ if (ab->hw_params->vdev_start_delay &&
+ !arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath12k_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath12k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_peer;
+ }
+ }
+
+ return 0;
+
+free_peer:
+ ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+dec_num_station:
+ ath12k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+ struct ath12k_peer *peer;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ cancel_work_sync(&arsta->update_wk);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk);
+
+ ret = ath12k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath12k_mac_dec_num_stations(arvif, sta);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer && peer->sta == sta) {
+ ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath12k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath12k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+ s16 txpwr;
+
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ txpwr = 0;
+ } else {
+ txpwr = sta->deflink.txpwr.power;
+ if (!txpwr)
+ return -EINVAL;
+ }
+
+ if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_USE_FIXED_PWR, txpwr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct ath12k_peer *peer;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss,
+ sta->deflink.smps_mode);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
+ sta->deflink.bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->deflink.rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->deflink.smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ default:
+ ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
+ sta->deflink.smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ u32 value;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath12k_warn(ar->ab, "could not set rx wake param %d\n", ret);
+
+exit:
+ return ret;
+}
+
+static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+ p->txop = params->txop;
+
+ ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct ieee80211_sta_ht_cap
+ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {0};
+ u32 ar_vht_cap = ar->pdev->cap.vht_cap;
+
+ if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_TX_STBC)
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar_ht_cap;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rx_chains; i++) {
+ if (rate_cap_rx_chainmask & BIT(i))
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+ }
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath12k *ar = arvif->ar;
+ int nsts;
+ int sound_dim;
+ u32 vht_cap = ar->pdev->cap.vht_cap;
+ u32 vdev_param = WMI_VDEV_PARAM_TXBF;
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
+ nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+ }
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ sound_dim = vht_cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+ value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+ }
+
+ if (!value)
+ return 0;
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ return ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, value);
+}
+
+static void ath12k_set_vht_txbf_cap(struct ath12k *ar, u32 *vht_cap)
+{
+ bool subfer, subfee;
+ int sound_dim = 0;
+
+ subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
+ subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+
+ if (ar->num_tx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ subfer = false;
+ }
+
+ /* If SU Beaformer is not set, then disable MU Beamformer Capability */
+ if (!subfer)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ /* If SU Beaformee is not set, then disable MU Beamformee Capability */
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+
+ sound_dim = u32_get_bits(*vht_cap,
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ *vht_cap = u32_replace_bits(*vht_cap, 0,
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+
+ /* TODO: Need to check invalid STS and Sound_dim values set by FW? */
+
+ /* Enable Sounding Dimension Field only if SU BF is enabled */
+ if (subfer) {
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+
+ *vht_cap = u32_replace_bits(*vht_cap, sound_dim,
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ }
+
+ /* Use the STS advertised by FW unless SU Beamformee is not supported*/
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+}
+
+static struct ieee80211_sta_vht_cap
+ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask,
+ u32 rate_cap_rx_chainmask)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {0};
+ u16 txmcs_map, rxmcs_map;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->pdev->cap.vht_cap;
+
+ ath12k_set_vht_txbf_cap(ar, &vht_cap.cap);
+
+ /* TODO: Enable back VHT160 mode once association issues are fixed */
+ /* Disabling VHT160 and VHT80+80 modes */
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
+
+ rxmcs_map = 0;
+ txmcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
+ txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
+ rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ if (rate_cap_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
+
+ return vht_cap;
+}
+
+static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar,
+ struct ath12k_pdev_cap *cap,
+ u32 *ht_cap_info)
+{
+ struct ieee80211_supported_band *band;
+ u32 rate_cap_tx_chainmask;
+ u32 rate_cap_rx_chainmask;
+ u32 ht_cap;
+
+ rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
+ rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath12k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ (ar->ab->hw_params->single_pdev_only ||
+ !ar->supports_6ghz)) {
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath12k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ band->vht_cap = ath12k_create_vht_cap(ar, rate_cap_tx_chainmask,
+ rate_cap_rx_chainmask);
+ }
+}
+
+static int ath12k_check_chain_mask(struct ath12k *ar, u32 ant, bool is_tx_ant)
+{
+ /* TODO: Check the request chainmask against the supported
+ * chainmask table which is advertised in extented_service_ready event
+ */
+
+ return 0;
+}
+
+static void ath12k_gen_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet,
+ u8 *he_ppet)
+{
+ int nss, ru;
+ u8 bit = 7;
+
+ he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK;
+ he_ppet[0] |= (fw_ppet->ru_bit_mask <<
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
+ for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u8 val;
+ int i;
+
+ if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+ 0x3f;
+ val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+ for (i = 5; i >= 0; i--) {
+ he_ppet[bit / 8] |=
+ ((val >> i) & 0x1) << ((bit % 8));
+ bit++;
+ }
+ }
+ }
+}
+
+static void
+ath12k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
+{
+ u8 m;
+
+ m = IEEE80211_HE_MAC_CAP0_TWT_RES |
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[0] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP2_TRS |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG |
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION |
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
+ IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
+ IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ he_cap_elem->mac_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ he_cap_elem->phy_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
+ he_cap_elem->phy_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ he_cap_elem->phy_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ he_cap_elem->phy_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+ he_cap_elem->phy_cap_info[6] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ he_cap_elem->phy_cap_info[7] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ he_cap_elem->phy_cap_info[8] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ he_cap_elem->phy_cap_info[9] &= ~m;
+}
+
+static __le16 ath12k_mac_setup_he_6ghz_cap(struct ath12k_pdev_cap *pcap,
+ struct ath12k_band_cap *bcap)
+{
+ u8 val;
+
+ bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE;
+ if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+ bcap->he_6ghz_capa |=
+ u32_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+ else
+ bcap->he_6ghz_capa |=
+ u32_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+ val = u32_get_bits(pcap->vht_cap,
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+ bcap->he_6ghz_capa |=
+ u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ val = u32_get_bits(pcap->vht_cap,
+ IEEE80211_VHT_CAP_MAX_MPDU_MASK);
+ bcap->he_6ghz_capa |=
+ u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
+ bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+ if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN)
+ bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS;
+
+ return cpu_to_le16(bcap->he_6ghz_capa);
+}
+
+static int ath12k_mac_copy_he_cap(struct ath12k *ar,
+ struct ath12k_pdev_cap *cap,
+ struct ieee80211_sband_iftype_data *data,
+ int band)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ath12k_band_cap *band_cap = &cap->band[band];
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+ memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
+ sizeof(he_cap_elem->mac_cap_info));
+ memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
+ sizeof(he_cap_elem->phy_cap_info));
+
+ he_cap_elem->mac_cap_info[1] &=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
+
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->phy_cap_info[3] &=
+ ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] &=
+ ~IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ ath12k_mac_filter_he_cap_mesh(he_cap_elem);
+ break;
+ }
+
+ he_cap->he_mcs_nss_supp.rx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
+ ath12k_gen_ppe_thresh(&band_cap->he_ppet,
+ he_cap->ppe_thres);
+
+ if (band == NL80211_BAND_6GHZ) {
+ data[idx].he_6ghz_capa.capa =
+ ath12k_mac_setup_he_6ghz_cap(cap, band_cap);
+ }
+ idx++;
+ }
+
+ return idx;
+}
+
+static void ath12k_mac_setup_he_cap(struct ath12k *ar,
+ struct ath12k_pdev_cap *cap)
+{
+ struct ieee80211_supported_band *band;
+ int count;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ count = ath12k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_2GHZ],
+ NL80211_BAND_2GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ];
+ band->n_iftype_data = count;
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ count = ath12k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_5GHZ],
+ NL80211_BAND_5GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ];
+ band->n_iftype_data = count;
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ ar->supports_6ghz) {
+ count = ath12k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_6GHZ],
+ NL80211_BAND_6GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ];
+ band->n_iftype_data = count;
+ }
+}
+
+static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath12k_check_chain_mask(ar, tx_ant, true))
+ return -EINVAL;
+
+ if (ath12k_check_chain_mask(ar, rx_ant, false))
+ return -EINVAL;
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if (ar->state != ATH12K_STATE_ON &&
+ ar->state != ATH12K_STATE_RESTARTED)
+ return 0;
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ tx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ar->num_tx_chains = hweight32(tx_ant);
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ rx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ ar->num_rx_chains = hweight32(rx_ant);
+
+ /* Reload HT/VHT/HE capability */
+ ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
+ ath12k_mac_setup_he_cap(ar, &ar->pdev->cap);
+
+ return 0;
+}
+
+int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct sk_buff *msdu = skb;
+ struct ieee80211_tx_info *info;
+ struct ath12k *ar = ctx;
+ struct ath12k_base *ab = ar->ab;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
+static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = ctx;
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct sk_buff *msdu = skb;
+ struct ath12k *ar = skb_cb->ar;
+ struct ath12k_base *ab = ar->ab;
+
+ if (skb_cb->vif == vif) {
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info;
+ dma_addr_t paddr;
+ int buf_id;
+ int ret;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
+ ATH12K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ if (buf_id < 0)
+ return -ENOSPC;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ }
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr)) {
+ ath12k_warn(ab, "failed to DMA map mgmt Tx buffer\n");
+ ret = -EIO;
+ goto err_free_idr;
+ }
+
+ ATH12K_SKB_CB(skb)->paddr = paddr;
+
+ ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
+ goto err_unmap_buf;
+ }
+
+ return 0;
+
+err_unmap_buf:
+ dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+err_free_idr:
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ return ret;
+}
+
+static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
+ ieee80211_free_txskb(ar->hw, skb);
+}
+
+static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
+ struct ath12k_skb_cb *skb_cb;
+ struct ath12k_vif *arvif;
+ struct sk_buff *skb;
+ int ret;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
+ skb_cb = ATH12K_SKB_CB(skb);
+ if (!skb_cb->vif) {
+ ath12k_warn(ar->ab, "no vif found for mgmt frame\n");
+ ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+
+ arvif = ath12k_vif_to_arvif(skb_cb->vif);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
+ arvif->is_started) {
+ ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
+ arvif->vdev_id, ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ atomic_inc(&ar->num_pending_mgmt_tx);
+ }
+ } else {
+ ath12k_warn(ar->ab,
+ "dropping mgmt frame for vdev %d, is_started %d\n",
+ arvif->vdev_id,
+ arvif->is_started);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ }
+}
+
+static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+
+ if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ /* Drop probe response packets when the pending management tx
+ * count has reached a certain threshold, so as to prioritize
+ * other mgmt packets like auth and assoc to be sent on time
+ * for establishing successful connections.
+ */
+ if (is_prb_rsp &&
+ atomic_read(&ar->num_pending_mgmt_tx) > ATH12K_PRB_RSP_DROP_THRESHOLD) {
+ ath12k_warn(ar->ab,
+ "dropping probe response as pending queue is almost full\n");
+ return -ENOSPC;
+ }
+
+ if (skb_queue_len(q) == ATH12K_TX_MGMT_NUM_PENDING_MAX) {
+ ath12k_warn(ar->ab, "mgmt tx queue is full\n");
+ return -ENOSPC;
+ }
+
+ skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+ return 0;
+}
+
+static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct ath12k *ar = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ u32 info_flags = info->flags;
+ bool is_prb_rsp;
+ int ret;
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ skb_cb->vif = vif;
+
+ if (key) {
+ skb_cb->cipher = key->cipher;
+ skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
+ }
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+ ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to queue management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ return;
+ }
+
+ ret = ath12k_dp_tx(ar, arvif, skb);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath12k_mac_drain_tx(struct ath12k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ ath12k_mgmt_over_wmi_tx_purge(ar);
+}
+
+static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
+{
+ return -ENOTSUPP;
+ /* TODO: Need to support new monitor mode */
+}
+
+static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
+{
+ int recovery_start_count;
+
+ if (!ab->is_reset)
+ return;
+
+ recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
+
+ if (recovery_start_count == ab->num_radios) {
+ complete(&ab->recovery_start);
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n");
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n");
+
+ wait_for_completion_timeout(&ab->reconfigure_complete,
+ ATH12K_RECONFIGURE_TIMEOUT_HZ);
+}
+
+static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev = ar->pdev;
+ int ret;
+
+ ath12k_mac_drain_tx(ar);
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH12K_STATE_OFF:
+ ar->state = ATH12K_STATE_ON;
+ break;
+ case ATH12K_STATE_RESTARTING:
+ ar->state = ATH12K_STATE_RESTARTED;
+ ath12k_mac_wait_reconfigure(ab);
+ break;
+ case ATH12K_STATE_RESTARTED:
+ case ATH12K_STATE_WEDGED:
+ case ATH12K_STATE_ON:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
+ pdev->pdev_id);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ 0, pdev->pdev_id);
+ if (ret) {
+ ath12k_err(ab, "failed to set ac override for ARP: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
+ if (ret) {
+ ath12k_err(ab, "failed to offload radar detection: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath12k_err(ab, "failed to req ppdu stats: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ goto err;
+ }
+
+ __ath12k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+ /* TODO: Do we need to enable ANI? */
+
+ ath12k_reg_update_chan_list(ar);
+
+ ar->num_started_vdevs = 0;
+ ar->num_created_vdevs = 0;
+ ar->num_peers = 0;
+ ar->allocated_vdev_map = 0;
+
+ /* Configure monitor status ring with default rx_filter to get rx status
+ * such as rssi, rx_duration.
+ */
+ ret = ath12k_mac_config_mon_status_default(ar, true);
+ if (ret && (ret != -ENOTSUPP)) {
+ ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
+ ret);
+ goto err;
+ }
+
+ if (ret == -ENOTSUPP)
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "monitor status config is not yet supported");
+
+ /* Configure the hash seed for hash based reo dest ring selection */
+ ath12k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
+
+ /* allow device to enter IMPS */
+ if (ab->hw_params->idle_ps) {
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ 1, pdev->pdev_id);
+ if (ret) {
+ ath12k_err(ab, "failed to enable idle ps: %d\n", ret);
+ goto err;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+ &ab->pdevs[ar->pdev_idx]);
+
+ return 0;
+
+err:
+ ar->state = ATH12K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath12k *ar = hw->priv;
+ struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ int ret;
+
+ ath12k_mac_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_config_mon_status_default(ar, false);
+ if (ret && (ret != -ENOTSUPP))
+ ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
+ ret);
+
+ clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ar->state = ATH12K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+ list_del(&ppdu_stats->list);
+ kfree(ppdu_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+
+ synchronize_rcu();
+
+ atomic_set(&ar->num_pending_mgmt_tx, 0);
+}
+
+static u8
+ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
+{
+ struct ath12k_base *ab = arvif->ar->ab;
+ u8 vdev_stats_id = 0;
+
+ do {
+ if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
+ vdev_stats_id++;
+ if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
+ vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
+ break;
+ }
+ } else {
+ ab->free_vdev_stats_id_map |= (1LL << vdev_stats_id);
+ break;
+ }
+ } while (vdev_stats_id);
+
+ arvif->vdev_stats_id = vdev_stats_id;
+ return vdev_stats_id;
+}
+
+static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
+ struct ath12k_wmi_vdev_create_arg *arg)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_pdev *pdev = ar->pdev;
+
+ arg->if_id = arvif->vdev_id;
+ arg->type = arvif->vdev_type;
+ arg->subtype = arvif->vdev_subtype;
+ arg->pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ ar->supports_6ghz) {
+ arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
+ arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
+ }
+
+ arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
+}
+
+static u32
+ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype)
+{
+ struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
+ struct ath12k_band_cap *cap_band = NULL;
+ u32 *hecap_phy_ptr = NULL;
+ u32 hemode;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ else
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+
+ hecap_phy_ptr = &cap_band->he_cap_phy_info[0];
+
+ hemode = u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE) |
+ u32_encode_bits(HECAP_PHY_SUBFMR_GET(hecap_phy_ptr),
+ HE_MODE_SU_TX_BFER) |
+ u32_encode_bits(HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr),
+ HE_MODE_UL_MUMIMO);
+
+ /* TODO: WDS and other modes */
+ if (viftype == NL80211_IFTYPE_AP) {
+ hemode |= u32_encode_bits(HECAP_PHY_MUBFMR_GET(hecap_phy_ptr),
+ HE_MODE_MU_TX_BFER) |
+ u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
+ u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
+ } else {
+ hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE);
+ }
+
+ return hemode;
+}
+
+static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
+ struct ath12k_vif *arvif)
+{
+ u32 param_id, param_value;
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type);
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n",
+ arvif->vdev_id, ret, param_value);
+ return ret;
+ }
+ param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+ param_value =
+ u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) |
+ u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE,
+ HE_TRIG_NONTRIG_SOUNDING_MODE);
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ u32 param_id, param_value;
+ int ret;
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
+ IEEE80211_OFFLOAD_DECAP_ENABLED);
+
+ if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+ arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+ arvif->tx_encap_type = ATH12K_HW_TXRX_RAW;
+ else
+ arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, arvif->tx_encap_type);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ }
+
+ param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
+ if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
+ param_value = ATH12K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH12K_HW_TXRX_RAW;
+ else
+ param_value = ATH12K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d rx decap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED;
+ }
+}
+
+static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
+ struct ath12k_wmi_peer_create_arg peer_param;
+ u32 param_id, param_value;
+ u16 nss;
+ int i;
+ int ret;
+ int bit;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+ TARGET_NUM_VDEVS);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+
+ /* Should we initialize any worker to handle connection loss indication
+ * from firmware in sta mode?
+ */
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ bit = __ffs64(ab->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+ fallthrough;
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ ar->monitor_vdev_id = bit;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ ab->free_vdev_map);
+
+ vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1);
+
+ ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+
+ ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to create WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ar->num_created_vdevs++;
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
+ ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
+ ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
+
+ spin_lock_bh(&ar->data_lock);
+ list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath12k_mac_op_update_vif_offload(hw, vif);
+
+ nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = vif->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ ret = ath12k_peer_create(ar, arvif, NULL, &peer_param);
+ if (ret) {
+ ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath12k_mac_set_kickout(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ default:
+ break;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath12k_mac_txpower_recalc(ar);
+ if (ret)
+ goto err_peer_del;
+
+ param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ param_value = ar->hw->wiphy->rts_threshold;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ath12k_dp_vdev_tx_attach(ar, arvif);
+
+ if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
+ ath12k_mac_monitor_vdev_create(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+
+err_peer_del:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ reinit_completion(&ar->peer_delete_done);
+
+ ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr,
+ arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
+ arvif->vdev_id, vif->addr);
+ goto err;
+ }
+
+ ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
+ vif->addr);
+ if (ret)
+ goto err;
+
+ ar->num_peers--;
+ }
+
+err_vdev_del:
+ ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->num_created_vdevs--;
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ab->free_vdev_map |= 1LL << arvif->vdev_id;
+ ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif)
+{
+ struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
+ struct ath12k_skb_cb *skb_cb;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
+ spin_lock_bh(&dp->tx_desc_lock[i]);
+
+ list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
+ list) {
+ skb = tx_desc_info->skb;
+ if (!skb)
+ continue;
+
+ skb_cb = ATH12K_SKB_CB(skb);
+ if (skb_cb->vif == vif)
+ skb_cb->vif = NULL;
+ }
+
+ spin_unlock_bh(&dp->tx_desc_lock[i]);
+ }
+}
+
+static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
+ arvif->vdev_id);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to delete WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH12K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath12k_warn(ab, "Timeout in receiving vdev delete response\n");
+ goto err_vdev_del;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ } else if (ar->monitor_vdev_created && !ar->monitor_started) {
+ ret = ath12k_mac_monitor_vdev_delete(ar);
+ }
+
+ ab->free_vdev_map |= 1LL << (arvif->vdev_id);
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
+ ar->num_created_vdevs--;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
+
+err_vdev_del:
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath12k_peer_cleanup(ar, arvif->vdev_id);
+
+ idr_for_each(&ar->txmgmt_idr,
+ ath12k_mac_vif_txmgmt_idr_remove, vif);
+
+ ath12k_mac_vif_unref(&ab->dp, vif);
+ ath12k_dp_tx_put_bank_profile(&ab->dp, arvif->bank_id);
+
+ /* Recalc txpower for remaining vdev */
+ ath12k_mac_txpower_recalc(ar);
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+
+ /* TODO: recal traffic pause state based on the available vdevs */
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* FIXME: Has to be verified. */
+#define SUPPORTED_FILTERS \
+ (FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath12k *ar = hw->priv;
+ bool reset_flag;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ /* For monitor mode */
+ reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
+
+ ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
+ if (!ret) {
+ if (!reset_flag)
+ set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ else
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ } else {
+ ath12k_warn(ar->ab,
+ "fail to set monitor filter: %d\n", ret);
+ }
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n",
+ changed_flags, *total_flags, reset_flag);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath12k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath12k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k *ar = hw->priv;
+ int ret = -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = ath12k_dp_rx_ampdu_start(ar, params);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = ath12k_dp_rx_ampdu_stop(ar, params);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211
+ * Tx aggregation requests.
+ */
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx add freq %u width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of multiple channel context, populate rx_channel from
+ * Rx PPDU desc information.
+ */
+ ar->rx_channel = ctx->def.chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx remove freq %u width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of there is one more channel context left, populate
+ * rx_channel with the channel of that remaining channel context.
+ */
+ ar->rx_channel = NULL;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct wmi_vdev_start_req_arg arg = {};
+ int he_support = arvif->vif->bss_conf.he_support;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.freq = chandef->chan->center_freq;
+ arg.band_center_freq1 = chandef->center_freq1;
+ arg.band_center_freq2 = chandef->center_freq2;
+ arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width];
+
+ arg.min_power = 0;
+ arg.max_power = chandef->chan->max_power * 2;
+ arg.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
+ arg.passive = arg.chan_radar;
+
+ spin_lock_bh(&ab->base_lock);
+ arg.regdomain = ar->ab->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ /* TODO: Notify if secondary 80Mhz also needs radar detection */
+ if (he_support) {
+ ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
+ arg.vdev_id);
+ return ret;
+ }
+ }
+ }
+
+ arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.freq,
+ ath12k_mac_phymode_str(arg.mode));
+
+ ret = ath12k_wmi_vdev_start(ar, &arg, restart);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to %s WMI vdev %i\n",
+ restart ? "restart" : "start", arg.vdev_id);
+ return ret;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n",
+ arg.vdev_id, restart ? "restart" : "start", ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ /* Enable CAC Flag in the driver by checking the channel DFS cac time,
+ * i.e dfs_cac_ms value which will be valid only for radar channels
+ * and state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flags is used to drop rx packets.
+ * during CAC.
+ */
+ /* TODO: Set the flag for other interface types as required */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
+ chandef->chan->dfs_cac_ms &&
+ chandef->chan->dfs_state == NL80211_DFS_USABLE) {
+ set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "CAC Started in chan_freq %d for vdev %d\n",
+ arg.freq, arg.vdev_id);
+ }
+
+ ret = ath12k_mac_set_txbf_conf(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return 0;
+}
+
+static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath12k_mac_vdev_start_restart(arvif, chandef, false);
+}
+
+static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath12k_mac_vdev_start_restart(arvif, chandef, true);
+}
+
+struct ath12k_mac_change_chanctx_arg {
+ struct ieee80211_chanctx_conf *ctx;
+ struct ieee80211_vif_chanctx_switch *vifs;
+ int n_vifs;
+ int next_vif;
+};
+
+static void
+ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_mac_change_chanctx_arg *arg = data;
+
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
+ return;
+
+ arg->n_vifs++;
+}
+
+static void
+ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_chanctx_conf *ctx;
+
+ ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
+ if (ctx != arg->ctx)
+ return;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->next_vif++;
+}
+
+static void
+ath12k_mac_update_vif_chan(struct ath12k *ar,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif;
+ int ret;
+ int i;
+ bool monitor_vif = false;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
+ monitor_vif = true;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width);
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to down vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* All relevant vdevs are downed and associated channel resources
+ * should be available for the channel switch now.
+ */
+
+ /* TODO: Update ar->rx_channel */
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ if (ret) {
+ ath12k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath12k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath12k_warn(ab, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* Restart the internal monitor vdev on new channel */
+ if (!monitor_vif && ar->monitor_vdev_created) {
+ if (!ath12k_mac_monitor_stop(ar))
+ ath12k_mac_monitor_start(ar);
+ }
+}
+
+static void
+ath12k_mac_update_active_vif_chan(struct ath12k *ar,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_change_chanctx_cnt_iter,
+ &arg);
+ if (arg.n_vifs == 0)
+ return;
+
+ arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
+ if (!arg.vifs)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_change_chanctx_fill_iter,
+ &arg);
+
+ ath12k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+
+ kfree(arg.vifs);
+}
+
+static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx change freq %u width %d ptr %pK changed %x\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+ ath12k_mac_update_active_vif_chan(ar, ctx);
+
+ /* TODO: Recalc radar detection */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ if (WARN_ON(arvif->is_started))
+ return -EBUSY;
+
+ ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx.def);
+ if (ret) {
+ ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ arvif->chanctx.def.chan->center_freq, ret);
+ return ret;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed put monitor up: %d\n", ret);
+ return ret;
+ }
+ }
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
+static int
+ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+ struct ath12k_wmi_peer_create_arg param;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx assign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ /* for some targets bss peer must be created before vdev_start */
+ if (ab->hw_params->vdev_start_delay &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) {
+ memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
+ ret = 0;
+ goto out;
+ }
+
+ if (WARN_ON(arvif->is_started)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (ab->hw_params->vdev_start_delay &&
+ (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
+ param.vdev_id = arvif->vdev_id;
+ param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ param.peer_addr = ar->mac_addr;
+
+ ret = ath12k_peer_create(ar, arvif, NULL, &param);
+ if (ret) {
+ ath12k_warn(ab, "failed to create peer after vdev start delay: %d",
+ ret);
+ goto out;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath12k_mac_monitor_start(ar);
+ if (ret)
+ goto out;
+ arvif->is_started = true;
+ goto out;
+ }
+
+ ret = ath12k_mac_vdev_start(arvif, &ctx->def);
+ if (ret) {
+ ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto out;
+ }
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created)
+ ath12k_mac_monitor_start(ar);
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx unassign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ if (ab->hw_params->vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
+ ath12k_peer_find_by_addr(ab, ar->mac_addr))
+ ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath12k_mac_monitor_stop(ar);
+ if (ret) {
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ arvif->is_started = false;
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ if (ab->hw_params->vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ ath12k_wmi_vdev_down(ar, arvif->vdev_id);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
+ ath12k_mac_monitor_stop(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath12k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+ ath12k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
+{
+ struct ath12k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
+ param, arvif->vdev_id, value);
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
+ param, arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* mac80211 stores device specific RTS/Fragmentation threshold value,
+ * this is set interface specific to firmware from ath12k driver
+ */
+static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath12k *ar = hw->priv;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+
+ return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+}
+
+static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ /* Even though there's a WMI vdev param for fragmentation threshold no
+ * known firmware actually implements it. Moreover it is not possible to
+ * rely frame fragmentation to mac80211 because firmware clears the
+ * "more fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
+static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath12k *ar = hw->priv;
+ long time_left;
+
+ if (drop)
+ return;
+
+ time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
+ (atomic_read(&ar->dp.num_tx_pending) == 0),
+ ATH12K_FLUSH_TIMEOUT);
+ if (time_left == 0)
+ ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+}
+
+static int
+ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight16(mask->control[band].ht_mcs[i]);
+
+ return num_rates;
+}
+
+static bool
+ath12k_mac_has_single_legacy_rate(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+
+ num_rates = hweight32(mask->control[band].legacy);
+
+ if (ath12k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
+ return false;
+
+ if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
+ return false;
+
+ return num_rates == 1;
+}
+
+static bool
+ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
+
+ /* No need to consider legacy here. Basic rates are always present
+ * in bitrate mask
+ */
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath12k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask)
+ return false;
+
+ if (ht_nss_mask == 0)
+ return false;
+
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+ return false;
+
+ *nss = fls(ht_nss_mask);
+
+ return true;
+}
+
+static int
+ath12k_mac_get_single_legacy_rate(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u32 *rate, u8 *nss)
+{
+ int rate_idx;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (hweight32(mask->control[band].legacy) != 1)
+ return -EINVAL;
+
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ)
+ rate_idx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath12k_legacy_rates[rate_idx].hw_value;
+ bitrate = ath12k_legacy_rates[rate_idx].bitrate;
+
+ if (ath12k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = ATH12K_HW_RATE_CODE(hw_rate, 0, preamble);
+
+ return 0;
+}
+
+static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc)
+{
+ struct ath12k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
+ arvif->vdev_id, rate, nss, sgi);
+
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, nss);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_LDPC;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, ldpc);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
+ ldpc, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+ath12k_mac_vht_mcs_range_present(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ath12k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_vif *arvif = data;
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+ struct ath12k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static void ath12k_mac_disable_peer_fixed_rate(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_vif *arvif = data;
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to disable peer fixed rate for STA %pM ret %d\n",
+ sta->addr, ret);
+}
+
+static int
+ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ struct ath12k *ar = arvif->ar;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 rate;
+ u8 nss;
+ u8 sgi;
+ u8 ldpc;
+ int single_nss;
+ int ret;
+ int num_rates;
+
+ if (ath12k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
+ * requires passing at least one of used basic rates along with them.
+ * Fixed rate setting across different preambles(legacy, HT, VHT) is
+ * not supported by the FW. Hence use of FIXED_RATE vdev param is not
+ * suitable for setting single HT/VHT rates.
+ * But, there could be a single basic rate passed from userspace which
+ * can be done through the FIXED_RATE param.
+ */
+ if (ath12k_mac_has_single_legacy_rate(ar, band, mask)) {
+ ret = ath12k_mac_get_single_legacy_rate(ar, band, mask, &rate,
+ &nss);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath12k_mac_disable_peer_fixed_rate,
+ arvif);
+ } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min_t(u32, ar->num_tx_chains,
+ max(ath12k_mac_max_ht_nss(ht_mcs_mask),
+ ath12k_mac_max_vht_nss(vht_mcs_mask)));
+
+ /* If multiple rates across different preambles are given
+ * we can reconfigure this info with all peers using PEER_ASSOC
+ * command with the below exception cases.
+ * - Single VHT Rate : peer_assoc command accommodates only MCS
+ * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211
+ * mandates passing basic rates along with HT/VHT rates, FW
+ * doesn't allow switching from VHT to Legacy. Hence instead of
+ * setting legacy and VHT rates using RATEMASK_CMD vdev cmd,
+ * we could set this VHT rate as peer fixed rate param, which
+ * will override FIXED rate and FW rate control algorithm.
+ * If single VHT rate is passed along with HT rates, we select
+ * the VHT rate as fixed rate for vht peers.
+ * - Multiple VHT Rates : When Multiple VHT rates are given,this
+ * can be set using RATEMASK CMD which uses FW rate-ctl alg.
+ * TODO: Setting multiple VHT MCS and replacing peer_assoc with
+ * RATEMASK_CMDID can cover all use cases of setting rates
+ * across multiple preambles and rates within same type.
+ * But requires more validation of the command at this point.
+ */
+
+ num_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ if (!ath12k_mac_vht_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ /* TODO: Handle multiple VHT MCS values setting using
+ * RATEMASK CMD
+ */
+ ath12k_warn(ar->ab,
+ "Setting more than one MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath12k_mac_disable_peer_fixed_rate,
+ arvif);
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath12k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ int recovery_count;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH12K_STATE_RESTARTED) {
+ ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
+ ar->pdev->pdev_id);
+ ar->state = ATH12K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+
+ if (ab->is_reset) {
+ recovery_count = atomic_inc_return(&ab->recovery_count);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n",
+ recovery_count);
+ /* When there are multiple radios in an SOC,
+ * the recovery has to be done for each radio
+ */
+ if (recovery_count == ab->num_radios) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
+ }
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
+ ar->rx_channel != channel)
+ return;
+
+ if (ar->scan.state != ATH12K_SCAN_IDLE) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "ignoring bss chan info req while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath12k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (ret == 0)
+ ath12k_warn(ar->ab, "bss channel survey timed out\n");
+}
+
+static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath12k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey;
+ int ret = 0;
+
+ if (idx >= ATH12K_NUM_CHANS)
+ return -ENOENT;
+
+ ar_survey = &ar->survey[idx];
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ sinfo->tx_duration = arsta->tx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+
+ if (!arsta->txrate.legacy && !arsta->txrate.nss)
+ return;
+
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ /* TODO: Use real NF instead of default one. */
+ sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+}
+
+static const struct ieee80211_ops ath12k_ops = {
+ .tx = ath12k_mac_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
+ .start = ath12k_mac_op_start,
+ .stop = ath12k_mac_op_stop,
+ .reconfig_complete = ath12k_mac_op_reconfig_complete,
+ .add_interface = ath12k_mac_op_add_interface,
+ .remove_interface = ath12k_mac_op_remove_interface,
+ .update_vif_offload = ath12k_mac_op_update_vif_offload,
+ .config = ath12k_mac_op_config,
+ .bss_info_changed = ath12k_mac_op_bss_info_changed,
+ .configure_filter = ath12k_mac_op_configure_filter,
+ .hw_scan = ath12k_mac_op_hw_scan,
+ .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan,
+ .set_key = ath12k_mac_op_set_key,
+ .sta_state = ath12k_mac_op_sta_state,
+ .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
+ .sta_rc_update = ath12k_mac_op_sta_rc_update,
+ .conf_tx = ath12k_mac_op_conf_tx,
+ .set_antenna = ath12k_mac_op_set_antenna,
+ .get_antenna = ath12k_mac_op_get_antenna,
+ .ampdu_action = ath12k_mac_op_ampdu_action,
+ .add_chanctx = ath12k_mac_op_add_chanctx,
+ .remove_chanctx = ath12k_mac_op_remove_chanctx,
+ .change_chanctx = ath12k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx,
+ .set_rts_threshold = ath12k_mac_op_set_rts_threshold,
+ .set_frag_threshold = ath12k_mac_op_set_frag_threshold,
+ .set_bitrate_mask = ath12k_mac_op_set_bitrate_mask,
+ .get_survey = ath12k_mac_op_get_survey,
+ .flush = ath12k_mac_op_flush,
+ .sta_statistics = ath12k_mac_op_sta_statistics,
+};
+
+static void ath12k_mac_update_ch_list(struct ath12k *ar,
+ struct ieee80211_supported_band *band,
+ u32 freq_low, u32 freq_high)
+{
+ int i;
+
+ if (!(freq_low && freq_high))
+ return;
+
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < freq_low ||
+ band->channels[i].center_freq > freq_high)
+ band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+ }
+}
+
+static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
+{
+ struct ath12k_pdev *pdev = ar->pdev;
+ struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
+
+ if (band == WMI_HOST_WLAN_2G_CAP)
+ return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
+
+ if (band == WMI_HOST_WLAN_5G_CAP)
+ return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
+
+ ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band);
+
+ return 0;
+}
+
+static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
+ u32 supported_bands)
+{
+ struct ieee80211_supported_band *band;
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+ void *channels;
+ u32 phy_id;
+
+ BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) +
+ ARRAY_SIZE(ath12k_5ghz_channels) +
+ ARRAY_SIZE(ath12k_6ghz_channels)) !=
+ ATH12K_NUM_CHANS);
+
+ reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+
+ if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ channels = kmemdup(ath12k_2ghz_channels,
+ sizeof(ath12k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->band = NL80211_BAND_2GHZ;
+ band->n_channels = ARRAY_SIZE(ath12k_2ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath12k_g_rates_size;
+ band->bitrates = ath12k_g_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+
+ if (ar->ab->hw_params->single_pdev_only) {
+ phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
+ reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+ ath12k_mac_update_ch_list(ar, band,
+ reg_cap->low_2ghz_chan,
+ reg_cap->high_2ghz_chan);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
+ channels = kmemdup(ath12k_6ghz_channels,
+ sizeof(ath12k_6ghz_channels), GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ return -ENOMEM;
+ }
+
+ ar->supports_6ghz = true;
+ band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+ band->band = NL80211_BAND_6GHZ;
+ band->n_channels = ARRAY_SIZE(ath12k_6ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath12k_a_rates_size;
+ band->bitrates = ath12k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+ ath12k_mac_update_ch_list(ar, band,
+ reg_cap->low_5ghz_chan,
+ reg_cap->high_5ghz_chan);
+ }
+
+ if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
+ channels = kmemdup(ath12k_5ghz_channels,
+ sizeof(ath12k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+ return -ENOMEM;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->band = NL80211_BAND_5GHZ;
+ band->n_channels = ARRAY_SIZE(ath12k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath12k_a_rates_size;
+ band->bitrates = ath12k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+
+ if (ar->ab->hw_params->single_pdev_only) {
+ phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
+ ath12k_mac_update_ch_list(ar, band,
+ reg_cap->low_5ghz_chan,
+ reg_cap->high_5ghz_chan);
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_iface_combination *combinations;
+ struct ieee80211_iface_limit *limits;
+ int n_limits, max_interfaces;
+ bool ap, mesh;
+
+ ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP);
+
+ mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ if (ap || mesh) {
+ n_limits = 2;
+ max_interfaces = 16;
+ } else {
+ n_limits = 1;
+ max_interfaces = 1;
+ }
+
+ limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+ if (!limits) {
+ kfree(combinations);
+ return -ENOMEM;
+ }
+
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+
+ if (ap) {
+ limits[1].max = max_interfaces;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ }
+
+ if (mesh)
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = n_limits;
+ combinations[0].max_interfaces = max_interfaces;
+ combinations[0].num_different_channels = 1;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80);
+
+ ar->hw->wiphy->iface_combinations = combinations;
+ ar->hw->wiphy->n_iface_combinations = 1;
+
+ return 0;
+}
+
+static const u8 ath12k_if_types_ext_capa[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const u8 ath12k_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+static const u8 ath12k_if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+};
+
+static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
+ {
+ .extended_capabilities = ath12k_if_types_ext_capa,
+ .extended_capabilities_mask = ath12k_if_types_ext_capa,
+ .extended_capabilities_len = sizeof(ath12k_if_types_ext_capa),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = ath12k_if_types_ext_capa_sta,
+ .extended_capabilities_mask = ath12k_if_types_ext_capa_sta,
+ .extended_capabilities_len =
+ sizeof(ath12k_if_types_ext_capa_sta),
+ }, {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = ath12k_if_types_ext_capa_ap,
+ .extended_capabilities_mask = ath12k_if_types_ext_capa_ap,
+ .extended_capabilities_len =
+ sizeof(ath12k_if_types_ext_capa_ap),
+ },
+};
+
+static void __ath12k_mac_unregister(struct ath12k *ar)
+{
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(ar->hw);
+
+ idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
+
+void ath12k_mac_unregister(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ __ath12k_mac_unregister(ar);
+ }
+}
+
+static int __ath12k_mac_register(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ };
+ int ret;
+ u32 ht_cap = 0;
+
+ ath12k_pdev_caps_update(ar);
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ab->dev);
+
+ ret = ath12k_mac_setup_channels_rates(ar,
+ cap->supported_bands);
+ if (ret)
+ goto err;
+
+ ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
+ ath12k_mac_setup_he_cap(ar, cap);
+
+ ret = ath12k_mac_setup_iface_combinations(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_free_channels;
+ }
+
+ ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
+ ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
+
+ ar->hw->wiphy->interface_modes = ab->hw_params->interface_modes;
+
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+
+ if (ht_cap & WMI_HT_CAP_ENABLED) {
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(ar->hw, USES_RSS);
+ }
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ /* TODO: Check if HT capability advertised from firmware is different
+ * for each band for a dual band capable radio. It will be tricky to
+ * handle it when the ht capability different for each band.
+ */
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
+
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
+
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ar->hw->queues = ATH12K_HW_MAX_QUEUES;
+ ar->hw->wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
+ ar->hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+
+ ar->hw->vif_data_size = sizeof(struct ath12k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath12k_sta);
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+ ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ar->hw->wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
+ ar->hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(ath12k_iftypes_ext_capa);
+
+ if (ar->supports_6ghz) {
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY);
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+ }
+
+ ath12k_reg_init(ar);
+
+ if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ }
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
+ if (!ab->hw_params->supports_monitor)
+ /* There's a race between calling ieee80211_register_hw()
+ * and here where the monitor mode is enabled for a little
+ * while. But that time is so short and in practise it make
+ * a difference in real life.
+ */
+ ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
+
+ /* Apply the regd received during initialization */
+ ret = ath12k_regd_update(ar, true);
+ if (ret) {
+ ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
+ goto err_unregister_hw;
+ }
+
+ return 0;
+
+err_unregister_hw:
+ ieee80211_unregister_hw(ar->hw);
+
+err_free_if_combs:
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+err_free_channels:
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+
+err:
+ SET_IEEE80211_DEV(ar->hw, NULL);
+ return ret;
+}
+
+int ath12k_mac_register(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+ int ret;
+
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ ar->mac_addr[4] += i;
+ }
+
+ ret = __ath12k_mac_register(ar);
+ if (ret)
+ goto err_cleanup;
+
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+ }
+
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = 320000;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ return 0;
+
+err_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ __ath12k_mac_unregister(ar);
+ }
+
+ return ret;
+}
+
+int ath12k_mac_allocate(struct ath12k_base *ab)
+{
+ struct ieee80211_hw *hw;
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int ret;
+ int i;
+
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops);
+ if (!hw) {
+ ath12k_warn(ab, "failed to allocate mac80211 hw device\n");
+ ret = -ENOMEM;
+ goto err_free_mac;
+ }
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->ab = ab;
+ ar->pdev = pdev;
+ ar->pdev_idx = i;
+ ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i);
+
+ ar->wmi = &ab->wmi_ab.wmi[i];
+ /* FIXME: wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath12k_wmi_pdev_attach(ab, i);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+
+ pdev->ar = ar;
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->peer_delete_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ }
+
+ return 0;
+
+err_free_mac:
+ ath12k_mac_destroy(ab);
+
+ return ret;
+}
+
+void ath12k_mac_destroy(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ ieee80211_free_hw(ar->hw);
+ pdev->ar = NULL;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
new file mode 100644
index 000000000000..57f4295420bb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_MAC_H
+#define ATH12K_MAC_H
+
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+
+struct ath12k;
+struct ath12k_base;
+
+struct ath12k_generic_iter {
+ struct ath12k *ar;
+ int ret;
+};
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH12K_KICKOUT_THRESHOLD (20 * 16)
+
+/* Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH12K_KEEPALIVE_MIN_IDLE 3747
+#define ATH12K_KEEPALIVE_MAX_IDLE 3895
+#define ATH12K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+/* FIXME: should these be in ieee80211.h? */
+#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
+#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
+
+#define ATH12K_CHAN_WIDTH_NUM 8
+
+#define ATH12K_TX_POWER_MAX_VAL 70
+#define ATH12K_TX_POWER_MIN_VAL 0
+
+enum ath12k_supported_bw {
+ ATH12K_BW_20 = 0,
+ ATH12K_BW_40 = 1,
+ ATH12K_BW_80 = 2,
+ ATH12K_BW_160 = 3,
+};
+
+extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
+
+void ath12k_mac_destroy(struct ath12k_base *ab);
+void ath12k_mac_unregister(struct ath12k_base *ab);
+int ath12k_mac_register(struct ath12k_base *ab);
+int ath12k_mac_allocate(struct ath12k_base *ab);
+int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate);
+u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck);
+
+void __ath12k_mac_scan_finish(struct ath12k *ar);
+void ath12k_mac_scan_finish(struct ath12k *ar);
+
+struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id);
+struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
+ u32 vdev_id);
+struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id);
+struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id);
+
+void ath12k_mac_drain_tx(struct ath12k *ar);
+void ath12k_mac_peer_cleanup_all(struct ath12k *ar);
+int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
+enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw);
+enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw);
+enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
new file mode 100644
index 000000000000..42f1140baa4f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include "core.h"
+#include "debug.h"
+#include "mhi.h"
+#include "pci.h"
+
+#define MHI_TIMEOUT_DEFAULT_MS 90000
+
+static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath12k_mhi_events_qcn9274[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .data_type = MHI_ER_CTRL,
+ .mode = MHI_DB_BRST_DISABLE,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+const struct mhi_controller_config ath12k_mhi_config_qcn9274 = {
+ .max_channels = 30,
+ .timeout_ms = 10000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath12k_mhi_channels_qcn9274),
+ .ch_cfg = ath12k_mhi_channels_qcn9274,
+ .num_events = ARRAY_SIZE(ath12k_mhi_events_qcn9274),
+ .event_cfg = ath12k_mhi_events_qcn9274,
+};
+
+static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath12k_mhi_events_wcn7850[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+const struct mhi_controller_config ath12k_mhi_config_wcn7850 = {
+ .max_channels = 128,
+ .timeout_ms = 2000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850),
+ .ch_cfg = ath12k_mhi_channels_wcn7850,
+ .num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850),
+ .event_cfg = ath12k_mhi_events_wcn7850,
+};
+
+void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab)
+{
+ u32 val;
+
+ val = ath12k_pci_read32(ab, MHISTATUS);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "MHISTATUS 0x%x\n", val);
+
+ /* Observed on some targets that after SOC_GLOBAL_RESET, MHISTATUS
+ * has SYSERR bit set and thus need to set MHICTRL_RESET
+ * to clear SYSERR.
+ */
+ ath12k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
+
+ mdelay(10);
+}
+
+static void ath12k_mhi_reset_txvecdb(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_TXVECDB, 0);
+}
+
+static void ath12k_mhi_reset_txvecstatus(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_TXVECSTATUS, 0);
+}
+
+static void ath12k_mhi_reset_rxvecdb(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_RXVECDB, 0);
+}
+
+static void ath12k_mhi_reset_rxvecstatus(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_RXVECSTATUS, 0);
+}
+
+void ath12k_mhi_clear_vector(struct ath12k_base *ab)
+{
+ ath12k_mhi_reset_txvecdb(ab);
+ ath12k_mhi_reset_txvecstatus(ab);
+ ath12k_mhi_reset_rxvecdb(ab);
+ ath12k_mhi_reset_rxvecstatus(ab);
+}
+
+static int ath12k_mhi_get_msi(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ u32 user_base_data, base_vector;
+ int ret, num_vectors, i;
+ int *irq;
+
+ ret = ath12k_pci_get_user_msi_assignment(ab,
+ "MHI", &num_vectors,
+ &user_base_data, &base_vector);
+ if (ret)
+ return ret;
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "Number of assigned MSI for MHI is %d, base vector is %d\n",
+ num_vectors, base_vector);
+
+ irq = kcalloc(num_vectors, sizeof(*irq), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vectors; i++)
+ irq[i] = ath12k_pci_get_msi_irq(ab->dev,
+ base_vector + i);
+
+ ab_pci->mhi_ctrl->irq = irq;
+ ab_pci->mhi_ctrl->nr_irqs = num_vectors;
+
+ return 0;
+}
+
+static int ath12k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static void ath12k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static char *ath12k_mhi_op_callback_to_str(enum mhi_callback reason)
+{
+ switch (reason) {
+ case MHI_CB_IDLE:
+ return "MHI_CB_IDLE";
+ case MHI_CB_PENDING_DATA:
+ return "MHI_CB_PENDING_DATA";
+ case MHI_CB_LPM_ENTER:
+ return "MHI_CB_LPM_ENTER";
+ case MHI_CB_LPM_EXIT:
+ return "MHI_CB_LPM_EXIT";
+ case MHI_CB_EE_RDDM:
+ return "MHI_CB_EE_RDDM";
+ case MHI_CB_EE_MISSION_MODE:
+ return "MHI_CB_EE_MISSION_MODE";
+ case MHI_CB_SYS_ERROR:
+ return "MHI_CB_SYS_ERROR";
+ case MHI_CB_FATAL_ERROR:
+ return "MHI_CB_FATAL_ERROR";
+ case MHI_CB_BW_REQ:
+ return "MHI_CB_BW_REQ";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb)
+{
+ struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n",
+ ath12k_mhi_op_callback_to_str(cb));
+
+ switch (cb) {
+ case MHI_CB_SYS_ERROR:
+ ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
+ break;
+ case MHI_CB_EE_RDDM:
+ if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 *out)
+{
+ *out = readl(addr);
+
+ return 0;
+}
+
+static void ath12k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 val)
+{
+ writel(val, addr);
+}
+
+int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ struct mhi_controller *mhi_ctrl;
+ int ret;
+
+ mhi_ctrl = mhi_alloc_controller();
+ if (!mhi_ctrl)
+ return -ENOMEM;
+
+ ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+
+ ab_pci->mhi_ctrl = mhi_ctrl;
+ mhi_ctrl->cntrl_dev = ab->dev;
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ mhi_ctrl->regs = ab->mem;
+ mhi_ctrl->reg_len = ab->mem_len;
+
+ ret = ath12k_mhi_get_msi(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to get msi for mhi\n");
+ mhi_free_controller(mhi_ctrl);
+ return ret;
+ }
+
+ mhi_ctrl->iova_start = 0;
+ mhi_ctrl->iova_stop = 0xffffffff;
+ mhi_ctrl->sbl_size = SZ_512K;
+ mhi_ctrl->seg_len = SZ_512K;
+ mhi_ctrl->fbc_download = true;
+ mhi_ctrl->runtime_get = ath12k_mhi_op_runtime_get;
+ mhi_ctrl->runtime_put = ath12k_mhi_op_runtime_put;
+ mhi_ctrl->status_cb = ath12k_mhi_op_status_cb;
+ mhi_ctrl->read_reg = ath12k_mhi_op_read_reg;
+ mhi_ctrl->write_reg = ath12k_mhi_op_write_reg;
+
+ ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config);
+ if (ret) {
+ ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+ mhi_free_controller(mhi_ctrl);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath12k_mhi_unregister(struct ath12k_pci *ab_pci)
+{
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+
+ mhi_unregister_controller(mhi_ctrl);
+ kfree(mhi_ctrl->irq);
+ mhi_free_controller(mhi_ctrl);
+ ab_pci->mhi_ctrl = NULL;
+}
+
+static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case ATH12K_MHI_INIT:
+ return "INIT";
+ case ATH12K_MHI_DEINIT:
+ return "DEINIT";
+ case ATH12K_MHI_POWER_ON:
+ return "POWER_ON";
+ case ATH12K_MHI_POWER_OFF:
+ return "POWER_OFF";
+ case ATH12K_MHI_FORCE_POWER_OFF:
+ return "FORCE_POWER_OFF";
+ case ATH12K_MHI_SUSPEND:
+ return "SUSPEND";
+ case ATH12K_MHI_RESUME:
+ return "RESUME";
+ case ATH12K_MHI_TRIGGER_RDDM:
+ return "TRIGGER_RDDM";
+ case ATH12K_MHI_RDDM_DONE:
+ return "RDDM_DONE";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci,
+ enum ath12k_mhi_state mhi_state)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+
+ switch (mhi_state) {
+ case ATH12K_MHI_INIT:
+ set_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_DEINIT:
+ clear_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_POWER_ON:
+ set_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_FORCE_POWER_OFF:
+ clear_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
+ clear_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
+ clear_bit(ATH12K_MHI_RDDM_DONE, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_SUSPEND:
+ set_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_RESUME:
+ clear_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_TRIGGER_RDDM:
+ set_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_RDDM_DONE:
+ set_bit(ATH12K_MHI_RDDM_DONE, &ab_pci->mhi_state);
+ break;
+ default:
+ ath12k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
+ }
+}
+
+static int ath12k_mhi_check_state_bit(struct ath12k_pci *ab_pci,
+ enum ath12k_mhi_state mhi_state)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+
+ switch (mhi_state) {
+ case ATH12K_MHI_INIT:
+ if (!test_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_DEINIT:
+ case ATH12K_MHI_POWER_ON:
+ if (test_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state) &&
+ !test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_FORCE_POWER_OFF:
+ if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_SUSPEND:
+ if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) &&
+ !test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_RESUME:
+ if (test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_TRIGGER_RDDM:
+ if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) &&
+ !test_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_RDDM_DONE:
+ return 0;
+ default:
+ ath12k_err(ab, "unhandled mhi state: %s(%d)\n",
+ ath12k_mhi_state_to_str(mhi_state), mhi_state);
+ }
+
+ ath12k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n",
+ ath12k_mhi_state_to_str(mhi_state), mhi_state,
+ ab_pci->mhi_state);
+
+ return -EINVAL;
+}
+
+static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci,
+ enum ath12k_mhi_state mhi_state)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ int ret;
+
+ ret = ath12k_mhi_check_state_bit(ab_pci, mhi_state);
+ if (ret)
+ goto out;
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "setting mhi state: %s(%d)\n",
+ ath12k_mhi_state_to_str(mhi_state), mhi_state);
+
+ switch (mhi_state) {
+ case ATH12K_MHI_INIT:
+ ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_DEINIT:
+ mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
+ ret = 0;
+ break;
+ case ATH12K_MHI_POWER_ON:
+ ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_POWER_OFF:
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+ ret = 0;
+ break;
+ case ATH12K_MHI_FORCE_POWER_OFF:
+ mhi_power_down(ab_pci->mhi_ctrl, false);
+ ret = 0;
+ break;
+ case ATH12K_MHI_SUSPEND:
+ ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_RESUME:
+ ret = mhi_pm_resume(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_TRIGGER_RDDM:
+ ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_RDDM_DONE:
+ break;
+ default:
+ ath12k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto out;
+
+ ath12k_mhi_set_state_bit(ab_pci, mhi_state);
+
+ return 0;
+
+out:
+ ath12k_err(ab, "failed to set mhi state: %s(%d)\n",
+ ath12k_mhi_state_to_str(mhi_state), mhi_state);
+ return ret;
+}
+
+int ath12k_mhi_start(struct ath12k_pci *ab_pci)
+{
+ int ret;
+
+ ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
+
+ ret = ath12k_mhi_set_state(ab_pci, ATH12K_MHI_INIT);
+ if (ret)
+ goto out;
+
+ ret = ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_ON);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+void ath12k_mhi_stop(struct ath12k_pci *ab_pci)
+{
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF);
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_DEINIT);
+}
+
+void ath12k_mhi_suspend(struct ath12k_pci *ab_pci)
+{
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_SUSPEND);
+}
+
+void ath12k_mhi_resume(struct ath12k_pci *ab_pci)
+{
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_RESUME);
+}
diff --git a/drivers/net/wireless/ath/ath12k/mhi.h b/drivers/net/wireless/ath/ath12k/mhi.h
new file mode 100644
index 000000000000..ebc23640ce7a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/mhi.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH12K_MHI_H
+#define _ATH12K_MHI_H
+
+#include "pci.h"
+
+#define PCIE_TXVECDB 0x360
+#define PCIE_TXVECSTATUS 0x368
+#define PCIE_RXVECDB 0x394
+#define PCIE_RXVECSTATUS 0x39C
+
+#define MHISTATUS 0x48
+#define MHICTRL 0x38
+#define MHICTRL_RESET_MASK 0x2
+
+enum ath12k_mhi_state {
+ ATH12K_MHI_INIT,
+ ATH12K_MHI_DEINIT,
+ ATH12K_MHI_POWER_ON,
+ ATH12K_MHI_POWER_OFF,
+ ATH12K_MHI_FORCE_POWER_OFF,
+ ATH12K_MHI_SUSPEND,
+ ATH12K_MHI_RESUME,
+ ATH12K_MHI_TRIGGER_RDDM,
+ ATH12K_MHI_RDDM,
+ ATH12K_MHI_RDDM_DONE,
+};
+
+extern const struct mhi_controller_config ath12k_mhi_config_qcn9274;
+extern const struct mhi_controller_config ath12k_mhi_config_wcn7850;
+
+int ath12k_mhi_start(struct ath12k_pci *ar_pci);
+void ath12k_mhi_stop(struct ath12k_pci *ar_pci);
+int ath12k_mhi_register(struct ath12k_pci *ar_pci);
+void ath12k_mhi_unregister(struct ath12k_pci *ar_pci);
+void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab);
+void ath12k_mhi_clear_vector(struct ath12k_base *ab);
+
+void ath12k_mhi_suspend(struct ath12k_pci *ar_pci);
+void ath12k_mhi_resume(struct ath12k_pci *ar_pci);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
new file mode 100644
index 000000000000..ae7f6083c9fc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -0,0 +1,1374 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "core.h"
+#include "hif.h"
+#include "mhi.h"
+#include "debug.h"
+
+#define ATH12K_PCI_BAR_NUM 0
+#define ATH12K_PCI_DMA_MASK 32
+
+#define ATH12K_PCI_IRQ_CE0_OFFSET 3
+
+#define WINDOW_ENABLE_BIT 0x40000000
+#define WINDOW_REG_ADDRESS 0x310c
+#define WINDOW_VALUE_MASK GENMASK(24, 19)
+#define WINDOW_START 0x80000
+#define WINDOW_RANGE_MASK GENMASK(18, 0)
+#define WINDOW_STATIC_MASK GENMASK(31, 6)
+
+#define TCSR_SOC_HW_VERSION 0x1B00000
+#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
+#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4)
+
+/* BAR0 + 4k is always accessible, and no
+ * need to force wakeup.
+ * 4K - 32 = 0xFE0
+ */
+#define ACCESS_ALWAYS_OFF 0xFE0
+
+#define QCN9274_DEVICE_ID 0x1109
+#define WCN7850_DEVICE_ID 0x1107
+
+static const struct pci_device_id ath12k_pci_id_table[] = {
+ { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table);
+
+/* TODO: revisit IRQ mapping for new SRNG's */
+static const struct ath12k_msi_config ath12k_msi_config[] = {
+ {
+ .total_vectors = 16,
+ .total_users = 3,
+ .users = (struct ath12k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+ { .name = "DP", .num_vectors = 8, .base_vector = 8 },
+ },
+ },
+};
+
+static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
+ "bhi",
+ "mhi-er0",
+ "mhi-er1",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "ce12",
+ "ce13",
+ "ce14",
+ "ce15",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring4",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+
+ u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK);
+ u32 static_window;
+
+ lockdep_assert_held(&ab_pci->window_lock);
+
+ /* Preserve the static window configuration and reset only dynamic window */
+ static_window = ab_pci->register_window & WINDOW_STATIC_MASK;
+ window |= static_window;
+
+ if (window != ab_pci->register_window) {
+ iowrite32(WINDOW_ENABLE_BIT | window,
+ ab->mem + WINDOW_REG_ADDRESS);
+ ioread32(ab->mem + WINDOW_REG_ADDRESS);
+ ab_pci->register_window = window;
+ }
+}
+
+static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci)
+{
+ u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK);
+ u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK);
+ u32 window;
+
+ window = (umac_window << 12) | (ce_window << 6);
+
+ spin_lock_bh(&ab_pci->window_lock);
+ ab_pci->register_window = window;
+ spin_unlock_bh(&ab_pci->window_lock);
+
+ iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
+}
+
+static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
+ u32 offset)
+{
+ u32 window_start;
+
+ /* If offset lies within DP register range, use 3rd window */
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
+ window_start = 3 * WINDOW_START;
+ /* If offset lies within CE register range, use 2nd window */
+ else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
+ window_start = 2 * WINDOW_START;
+ /* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
+ * use 0th window
+ */
+ else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
+ !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
+ window_start = 0;
+ else
+ window_start = WINDOW_START;
+
+ return window_start;
+}
+
+static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
+{
+ u32 val, delay;
+
+ val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+
+ val |= PCIE_SOC_GLOBAL_RESET_V;
+
+ ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ /* TODO: exact time to sleep is uncertain */
+ delay = 10;
+ mdelay(delay);
+
+ /* Need to toggle V bit back otherwise stuck in reset status */
+ val &= ~PCIE_SOC_GLOBAL_RESET_V;
+
+ ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ mdelay(delay);
+
+ val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ if (val == 0xffffffff)
+ ath12k_warn(ab, "link down error during global reset\n");
+}
+
+static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab)
+{
+ u32 val;
+
+ /* read cookie */
+ val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val);
+
+ val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
+
+ /* TODO: exact time to sleep is uncertain */
+ mdelay(10);
+
+ /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
+ * continuing warm path and entering dead loop.
+ */
+ ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
+ mdelay(10);
+
+ val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
+
+ /* A read clear register. clear the register to prevent
+ * Q6 from entering wrong code path.
+ */
+ val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val);
+}
+
+static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
+{
+ u32 val;
+ int i;
+
+ val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
+
+ /* PCIE link seems very unstable after the Hot Reset*/
+ for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
+ if (val == 0xffffffff)
+ mdelay(5);
+
+ ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
+ val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
+
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ val |= GCC_GCC_PCIE_HOT_RST_VAL;
+ ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
+
+ mdelay(5);
+}
+
+static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab)
+{
+ /* This is a WAR for PCIE Hotreset.
+ * When target receive Hotreset, but will set the interrupt.
+ * So when download SBL again, SBL will open Interrupt and
+ * receive it, and crash immediately.
+ */
+ ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
+}
+
+static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab)
+{
+ u32 val;
+
+ val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
+ val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
+ ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
+}
+
+static void ath12k_pci_force_wake(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
+ mdelay(5);
+}
+
+static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on)
+{
+ if (power_on) {
+ ath12k_pci_enable_ltssm(ab);
+ ath12k_pci_clear_all_intrs(ab);
+ ath12k_pci_set_wlaon_pwr_ctrl(ab);
+ }
+
+ ath12k_mhi_clear_vector(ab);
+ ath12k_pci_clear_dbg_registers(ab);
+ ath12k_pci_soc_global_reset(ab);
+ ath12k_mhi_set_mhictrl_reset(ab);
+}
+
+static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ }
+}
+
+static void ath12k_pci_free_irq(struct ath12k_base *ab)
+{
+ int i, irq_idx;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath12k_pci_free_ext_irq(ab);
+}
+
+static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
+ enable_irq(ab->irq_num[irq_idx]);
+}
+
+static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+}
+
+static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_pci_ce_irq_disable(ab, i);
+ }
+}
+
+static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
+{
+ struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+
+ ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath12k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ce_pipe *ce_pipe = arg;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ ath12k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc)
+{
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+
+ ath12k_pci_ext_grp_disable(irq_grp);
+
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ }
+}
+
+static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab)
+{
+ int i, j, irq_idx;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath12k_ext_irq_grp,
+ napi);
+ struct ath12k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath12k_pci_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ext_irq_grp *irq_grp = arg;
+
+ ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ ath12k_pci_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
+{
+ int i, j, ret, num_vectors = 0;
+ u32 user_base_data = 0, base_vector = 0, base_idx;
+
+ base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
+ ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
+ &num_vectors,
+ &user_base_data,
+ &base_vector);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath12k_pci_ext_grp_napi_poll);
+
+ if (ab->hw_params->ring_mask->tx[i] ||
+ ab->hw_params->ring_mask->rx[i] ||
+ ab->hw_params->ring_mask->rx_err[i] ||
+ ab->hw_params->ring_mask->rx_wbm_rel[i] ||
+ ab->hw_params->ring_mask->reo_status[i] ||
+ ab->hw_params->ring_mask->host2rxdma[i] ||
+ ab->hw_params->ring_mask->rx_mon_dest[i]) {
+ num_irq = 1;
+ }
+
+ irq_grp->num_irq = num_irq;
+ irq_grp->irqs[0] = base_idx + i;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+ int vector = (i % num_vectors) + base_vector;
+ int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
+
+ ab->irq_num[irq_idx] = irq;
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "irq:%d group:%d\n", irq, i);
+
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+ ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
+ IRQF_SHARED,
+ "DP_EXT_IRQ", irq_grp);
+ if (ret) {
+ ath12k_err(ab, "failed request irq %d: %d\n",
+ vector, ret);
+ return ret;
+ }
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_pci_config_irq(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *ce_pipe;
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ unsigned int msi_data;
+ int irq, i, ret, irq_idx;
+
+ ret = ath12k_pci_get_user_msi_assignment(ab,
+ "CE", &msi_data_count,
+ &msi_data_start, &msi_irq_start);
+ if (ret)
+ return ret;
+
+ /* Configure CE irqs */
+
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
+ irq = ath12k_pci_get_msi_irq(ab->dev, msi_data);
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+
+ tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet);
+
+ ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
+ IRQF_SHARED, irq_name[irq_idx],
+ ce_pipe);
+ if (ret) {
+ ath12k_err(ab, "failed to request irq %d: %d\n",
+ irq_idx, ret);
+ return ret;
+ }
+
+ ab->irq_num[irq_idx] = irq;
+ msi_data_idx++;
+
+ ath12k_pci_ce_irq_disable(ab, i);
+ }
+
+ ret = ath12k_pci_ext_irq_config(ab);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
+{
+ struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce = ab->hw_params->target_ce_config;
+ cfg->tgt_ce_len = ab->hw_params->target_ce_count;
+
+ cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
+ cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
+ ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+}
+
+static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_pci_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable)
+{
+ struct pci_dev *dev = ab_pci->pdev;
+ u16 control;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ else
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+}
+
+static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci)
+{
+ ath12k_pci_msi_config(ab_pci, true);
+}
+
+static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci)
+{
+ ath12k_pci_msi_config(ab_pci, false);
+}
+
+static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
+ struct msi_desc *msi_desc;
+ int num_vectors;
+ int ret;
+
+ num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ msi_config->total_vectors,
+ msi_config->total_vectors,
+ PCI_IRQ_MSI);
+ if (num_vectors != msi_config->total_vectors) {
+ ath12k_err(ab, "failed to get %d MSI vectors, only %d available",
+ msi_config->total_vectors, num_vectors);
+
+ if (num_vectors >= 0)
+ return -EINVAL;
+ else
+ return num_vectors;
+ }
+
+ ath12k_pci_msi_disable(ab_pci);
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath12k_err(ab, "msi_desc is NULL!\n");
+ ret = -EINVAL;
+ goto free_msi_vector;
+ }
+
+ ab_pci->msi_ep_base_data = msi_desc->msg.data;
+ if (msi_desc->pci.msi_attrib.is_64)
+ set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
+
+ return 0;
+
+free_msi_vector:
+ pci_free_irq_vectors(ab_pci->pdev);
+
+ return ret;
+}
+
+static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci)
+{
+ pci_free_irq_vectors(ab_pci->pdev);
+}
+
+static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ u16 device_id;
+ int ret = 0;
+
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ if (device_id != ab_pci->dev_id) {
+ ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
+ device_id, ab_pci->dev_id);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM);
+ if (ret) {
+ ath12k_err(ab, "failed to assign pci resource: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath12k_err(ab, "failed to enable pci device: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci");
+ if (ret) {
+ ath12k_err(ab, "failed to request pci region: %d\n", ret);
+ goto disable_device;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(ATH12K_PCI_DMA_MASK));
+ if (ret) {
+ ath12k_err(ab, "failed to set pci dma mask to %d: %d\n",
+ ATH12K_PCI_DMA_MASK, ret);
+ goto release_region;
+ }
+
+ pci_set_master(pdev);
+
+ ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM);
+ ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0);
+ if (!ab->mem) {
+ ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM);
+ ret = -EIO;
+ goto clear_master;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
+ return 0;
+
+clear_master:
+ pci_clear_master(pdev);
+release_region:
+ pci_release_region(pdev, ATH12K_PCI_BAR_NUM);
+disable_device:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+static void ath12k_pci_free_region(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ struct pci_dev *pci_dev = ab_pci->pdev;
+
+ pci_iounmap(pci_dev, ab->mem);
+ ab->mem = NULL;
+ pci_clear_master(pci_dev);
+ pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM);
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl);
+}
+
+static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
+{
+ ath12k_pci_ce_irqs_disable(ab);
+ ath12k_pci_sync_ce_irqs(ab);
+ ath12k_pci_kill_tasklets(ab);
+}
+
+int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params->svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ return pci_irq_vector(pci_dev, vector);
+}
+
+int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
+ int idx;
+
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *user_base_data = msi_config->users[idx].base_vector
+ + ab_pci->msi_ep_base_data;
+ *base_vector = msi_config->users[idx].base_vector;
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+
+void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ struct pci_dev *pci_dev = to_pci_dev(ab->dev);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ msi_addr_lo);
+
+ if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ msi_addr_hi);
+ } else {
+ *msi_addr_hi = 0;
+ }
+}
+
+void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
+ u32 *msi_idx)
+{
+ u32 i, msi_data_idx;
+
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ if (ce_id == i)
+ break;
+
+ msi_data_idx++;
+ }
+ *msi_idx = msi_data_idx;
+}
+
+void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab)
+{
+ ath12k_pci_ce_irqs_enable(ab);
+}
+
+void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab)
+{
+ ath12k_pci_ce_irq_disable_sync(ab);
+}
+
+void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ napi_enable(&irq_grp->napi);
+ ath12k_pci_ext_grp_enable(irq_grp);
+ }
+}
+
+void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
+{
+ __ath12k_pci_ext_irq_disable(ab);
+ ath12k_pci_sync_ext_irqs(ab);
+}
+
+int ath12k_pci_hif_suspend(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
+
+ ath12k_mhi_suspend(ar_pci);
+
+ return 0;
+}
+
+int ath12k_pci_hif_resume(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
+
+ ath12k_mhi_resume(ar_pci);
+
+ return 0;
+}
+
+void ath12k_pci_stop(struct ath12k_base *ab)
+{
+ ath12k_pci_ce_irq_disable_sync(ab);
+ ath12k_ce_cleanup_pipes(ab);
+}
+
+int ath12k_pci_start(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+
+ ath12k_pci_aspm_restore(ab_pci);
+
+ ath12k_pci_ce_irqs_enable(ab);
+ ath12k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ u32 val, window_start;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup MHI to access.
+ */
+ if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+
+ if (offset < WINDOW_START) {
+ val = ioread32(ab->mem + offset);
+ } else {
+ if (ab->static_window_map)
+ window_start = ath12k_pci_get_window_start(ab, offset);
+ else
+ window_start = WINDOW_START;
+
+ if (window_start == WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath12k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ if ((!window_start) &&
+ (offset >= PCI_MHIREGLEN_REG &&
+ offset <= PCI_MHI_REGION_END))
+ offset = offset - PCI_MHIREGLEN_REG;
+
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
+ }
+
+ if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+
+ return val;
+}
+
+void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ u32 window_start;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup MHI to access.
+ */
+ if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+
+ if (offset < WINDOW_START) {
+ iowrite32(value, ab->mem + offset);
+ } else {
+ if (ab->static_window_map)
+ window_start = ath12k_pci_get_window_start(ab, offset);
+ else
+ window_start = WINDOW_START;
+
+ if (window_start == WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath12k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ if ((!window_start) &&
+ (offset >= PCI_MHIREGLEN_REG &&
+ offset <= PCI_MHI_REGION_END))
+ offset = offset - PCI_MHIREGLEN_REG;
+
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
+ }
+
+ if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+int ath12k_pci_power_up(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ int ret;
+
+ ab_pci->register_window = 0;
+ clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath12k_pci_sw_reset(ab_pci->ab, true);
+
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath12k_pci_aspm_disable(ab_pci);
+
+ ath12k_pci_msi_enable(ab_pci);
+
+ ret = ath12k_mhi_start(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to start mhi: %d\n", ret);
+ return ret;
+ }
+
+ if (ab->static_window_map)
+ ath12k_pci_select_static_window(ab_pci);
+
+ return 0;
+}
+
+void ath12k_pci_power_down(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ /* restore aspm in case firmware bootup fails */
+ ath12k_pci_aspm_restore(ab_pci);
+
+ ath12k_pci_force_wake(ab_pci->ab);
+ ath12k_pci_msi_disable(ab_pci);
+ ath12k_mhi_stop(ab_pci);
+ clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath12k_pci_sw_reset(ab_pci->ab, false);
+}
+
+static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
+ .start = ath12k_pci_start,
+ .stop = ath12k_pci_stop,
+ .read32 = ath12k_pci_read32,
+ .write32 = ath12k_pci_write32,
+ .power_down = ath12k_pci_power_down,
+ .power_up = ath12k_pci_power_up,
+ .suspend = ath12k_pci_hif_suspend,
+ .resume = ath12k_pci_hif_resume,
+ .irq_enable = ath12k_pci_ext_irq_enable,
+ .irq_disable = ath12k_pci_ext_irq_disable,
+ .get_msi_address = ath12k_pci_get_msi_address,
+ .get_user_msi_vector = ath12k_pci_get_user_msi_assignment,
+ .map_service_to_pipe = ath12k_pci_map_service_to_pipe,
+ .ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
+ .ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
+ .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
+};
+
+static
+void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor)
+{
+ u32 soc_hw_version;
+
+ soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION);
+ *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
+ soc_hw_version);
+ *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
+ soc_hw_version);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "pci tcsr_soc_hw_version major %d minor %d\n",
+ *major, *minor);
+}
+
+static int ath12k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ struct ath12k_base *ab;
+ struct ath12k_pci *ab_pci;
+ u32 soc_hw_version_major, soc_hw_version_minor;
+ int ret;
+
+ ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath12k base\n");
+ return -ENOMEM;
+ }
+
+ ab->dev = &pdev->dev;
+ pci_set_drvdata(pdev, ab);
+ ab_pci = ath12k_pci_priv(ab);
+ ab_pci->dev_id = pci_dev->device;
+ ab_pci->ab = ab;
+ ab_pci->pdev = pdev;
+ ab->hif.ops = &ath12k_pci_hif_ops;
+ pci_set_drvdata(pdev, ab);
+ spin_lock_init(&ab_pci->window_lock);
+
+ ret = ath12k_pci_claim(ab_pci, pdev);
+ if (ret) {
+ ath12k_err(ab, "failed to claim device: %d\n", ret);
+ goto err_free_core;
+ }
+
+ switch (pci_dev->device) {
+ case QCN9274_DEVICE_ID:
+ ab_pci->msi_config = &ath12k_msi_config[0];
+ ab->static_window_map = true;
+ ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ switch (soc_hw_version_major) {
+ case ATH12K_PCI_SOC_HW_VERSION_2:
+ ab->hw_rev = ATH12K_HW_QCN9274_HW20;
+ break;
+ case ATH12K_PCI_SOC_HW_VERSION_1:
+ ab->hw_rev = ATH12K_HW_QCN9274_HW10;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "Unknown hardware version found for QCN9274: 0x%x\n",
+ soc_hw_version_major);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case WCN7850_DEVICE_ID:
+ ab_pci->msi_config = &ath12k_msi_config[0];
+ ab->static_window_map = false;
+ ab->hw_rev = ATH12K_HW_WCN7850_HW20;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
+ pci_dev->device);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ ret = ath12k_pci_msi_alloc(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to alloc msi: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath12k_core_pre_init(ab);
+ if (ret)
+ goto err_pci_msi_free;
+
+ ret = ath12k_mhi_register(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to register mhi: %d\n", ret);
+ goto err_pci_msi_free;
+ }
+
+ ret = ath12k_hal_srng_init(ab);
+ if (ret)
+ goto err_mhi_unregister;
+
+ ret = ath12k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath12k_pci_init_qmi_ce_config(ab);
+
+ ret = ath12k_pci_config_irq(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to config irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath12k_core_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init core: %d\n", ret);
+ goto err_free_irq;
+ }
+ return 0;
+
+err_free_irq:
+ ath12k_pci_free_irq(ab);
+
+err_ce_free:
+ ath12k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath12k_hal_srng_deinit(ab);
+
+err_mhi_unregister:
+ ath12k_mhi_unregister(ab_pci);
+
+err_pci_msi_free:
+ ath12k_pci_msi_free(ab_pci);
+
+err_pci_free_region:
+ ath12k_pci_free_region(ab_pci);
+
+err_free_core:
+ ath12k_core_free(ab);
+
+ return ret;
+}
+
+static void ath12k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath12k_base *ab = pci_get_drvdata(pdev);
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath12k_pci_power_down(ab);
+ ath12k_qmi_deinit_service(ab);
+ goto qmi_fail;
+ }
+
+ set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
+
+ cancel_work_sync(&ab->reset_work);
+ ath12k_core_deinit(ab);
+
+qmi_fail:
+ ath12k_mhi_unregister(ab_pci);
+
+ ath12k_pci_free_irq(ab);
+ ath12k_pci_msi_free(ab_pci);
+ ath12k_pci_free_region(ab_pci);
+
+ ath12k_hal_srng_deinit(ab);
+ ath12k_ce_free_pipes(ab);
+ ath12k_core_free(ab);
+}
+
+static void ath12k_pci_shutdown(struct pci_dev *pdev)
+{
+ struct ath12k_base *ab = pci_get_drvdata(pdev);
+
+ ath12k_pci_power_down(ab);
+}
+
+static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_suspend(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to suspend core: %d\n", ret);
+
+ return ret;
+}
+
+static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_resume(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to resume core: %d\n", ret);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops,
+ ath12k_pci_pm_suspend,
+ ath12k_pci_pm_resume);
+
+static struct pci_driver ath12k_pci_driver = {
+ .name = "ath12k_pci",
+ .id_table = ath12k_pci_id_table,
+ .probe = ath12k_pci_probe,
+ .remove = ath12k_pci_remove,
+ .shutdown = ath12k_pci_shutdown,
+ .driver.pm = &ath12k_pci_pm_ops,
+};
+
+static int ath12k_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath12k_pci_driver);
+ if (ret) {
+ pr_err("failed to register ath12k pci driver: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(ath12k_pci_init);
+
+static void ath12k_pci_exit(void)
+{
+ pci_unregister_driver(&ath12k_pci_driver);
+}
+
+module_exit(ath12k_pci_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
new file mode 100644
index 000000000000..0d9e40ab31f2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_PCI_H
+#define ATH12K_PCI_H
+
+#include <linux/mhi.h>
+
+#include "core.h"
+
+#define PCIE_SOC_GLOBAL_RESET 0x3008
+#define PCIE_SOC_GLOBAL_RESET_V 1
+
+#define WLAON_WARM_SW_ENTRY 0x1f80504
+#define WLAON_SOC_RESET_CAUSE_REG 0x01f8060c
+
+#define PCIE_Q6_COOKIE_ADDR 0x01f80500
+#define PCIE_Q6_COOKIE_DATA 0xc0000000
+
+/* register to wake the UMAC from power collapse */
+#define PCIE_SCRATCH_0_SOC_PCIE_REG 0x4040
+
+/* register used for handshake mechanism to validate UMAC is awake */
+#define PCIE_SOC_WAKE_PCIE_LOCAL_REG 0x3004
+
+#define PCIE_PCIE_PARF_LTSSM 0x1e081b0
+#define PARM_LTSSM_VALUE 0x111
+
+#define GCC_GCC_PCIE_HOT_RST 0x1e38338
+#define GCC_GCC_PCIE_HOT_RST_VAL 0x10
+
+#define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228
+#define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2
+#define PCIE_INT_CLEAR_ALL 0xffffffff
+
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab) \
+ ((ab)->hw_params->regs->pcie_qserdes_sysclk_en_sel)
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff
+#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab) \
+ ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base)
+#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02
+#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab) \
+ ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base + 0x4)
+#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52
+#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab) \
+ ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base + 0xc)
+#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff
+#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff
+
+#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
+#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
+
+#define PCI_BAR_WINDOW0_BASE 0x1E00000
+#define PCI_BAR_WINDOW0_END 0x1E7FFFC
+#define PCI_SOC_RANGE_MASK 0x3FFF
+#define PCI_SOC_PCI_REG_BASE 0x1E04000
+#define PCI_SOC_PCI_REG_END 0x1E07FFC
+#define PCI_PARF_BASE 0x1E08000
+#define PCI_PARF_END 0x1E0BFFC
+#define PCI_MHIREGLEN_REG 0x1E0E100
+#define PCI_MHI_REGION_END 0x1E0EFFC
+#define QRTR_PCI_DOMAIN_NR_MASK GENMASK(7, 4)
+#define QRTR_PCI_BUS_NUMBER_MASK GENMASK(3, 0)
+
+#define ATH12K_PCI_SOC_HW_VERSION_1 1
+#define ATH12K_PCI_SOC_HW_VERSION_2 2
+
+struct ath12k_msi_user {
+ const char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct ath12k_msi_config {
+ int total_vectors;
+ int total_users;
+ const struct ath12k_msi_user *users;
+};
+
+enum ath12k_pci_flags {
+ ATH12K_PCI_FLAG_INIT_DONE,
+ ATH12K_PCI_FLAG_IS_MSI_64,
+ ATH12K_PCI_ASPM_RESTORE,
+};
+
+struct ath12k_pci {
+ struct pci_dev *pdev;
+ struct ath12k_base *ab;
+ u16 dev_id;
+ char amss_path[100];
+ u32 msi_ep_base_data;
+ struct mhi_controller *mhi_ctrl;
+ const struct ath12k_msi_config *msi_config;
+ unsigned long mhi_state;
+ u32 register_window;
+
+ /* protects register_window above */
+ spinlock_t window_lock;
+
+ /* enum ath12k_pci_flags */
+ unsigned long flags;
+ u16 link_ctl;
+};
+
+static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)
+{
+ return (struct ath12k_pci *)ab->drv_priv;
+}
+
+int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector);
+void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value);
+u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset);
+int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
+ u32 *msi_idx);
+void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab);
+void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab);
+void ath12k_pci_ext_irq_enable(struct ath12k_base *ab);
+void ath12k_pci_ext_irq_disable(struct ath12k_base *ab);
+int ath12k_pci_hif_suspend(struct ath12k_base *ab);
+int ath12k_pci_hif_resume(struct ath12k_base *ab);
+void ath12k_pci_stop(struct ath12k_base *ab);
+int ath12k_pci_start(struct ath12k_base *ab);
+int ath12k_pci_power_up(struct ath12k_base *ab);
+void ath12k_pci_power_down(struct ath12k_base *ab);
+#endif /* ATH12K_PCI_H */
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
new file mode 100644
index 000000000000..19c0626fbff1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
+ const u8 *addr)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
+ u8 pdev_idx, const u8 *addr)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->pdev_idx != pdev_idx)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
+ const u8 *addr)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
+ int peer_id)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (peer_id == peer->peer_id)
+ return peer;
+
+ return NULL;
+}
+
+bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
+{
+ struct ath12k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return true;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return false;
+}
+
+struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
+ int ast_hash)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (ast_hash == peer->ast_hash)
+ return peer;
+
+ return NULL;
+}
+
+void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
+{
+ struct ath12k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
+ peer_id);
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, peer_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ab->peer_mapping_wq);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
+{
+ struct ath12k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, vdev_id, mac_addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = vdev_id;
+ peer->peer_id = peer_id;
+ peer->ast_hash = ast_hash;
+ peer->hw_peer_id = hw_peer_id;
+ ether_addr_copy(peer->addr, mac_addr);
+ list_add(&peer->list, &ab->peers);
+ wake_up(&ab->peer_mapping_wq);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ vdev_id, mac_addr, peer_id);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ int ret;
+
+ ret = wait_event_timeout(ab->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ab->base_lock);
+ mapped = !!ath12k_peer_find(ab, vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
+ }), 3 * HZ);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
+{
+ struct ath12k_peer *peer, *tmp;
+ struct ath12k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
+{
+ return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
+}
+
+int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
+ const u8 *addr)
+{
+ int ret;
+ unsigned long time_left;
+
+ ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed wait for peer deleted");
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->peer_delete_done,
+ 3 * HZ);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->peer_delete_done);
+
+ ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ vdev_id, addr, ret);
+ return ret;
+ }
+
+ ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
+{
+ return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
+}
+
+int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_create_arg *arg)
+{
+ struct ath12k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->num_peers > (ar->max_num_peers - 1)) {
+ ath12k_warn(ar->ab,
+ "failed to create peer due to insufficient peer entry resource in firmware\n");
+ return -ENOBUFS;
+ }
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
+ if (peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send peer create vdev_id %d ret %d\n",
+ arg->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath12k_wait_for_peer_created(ar, arg->vdev_id,
+ arg->peer_addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
+ arg->peer_addr, arg->vdev_id);
+
+ reinit_completion(&ar->peer_delete_done);
+
+ ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
+ arg->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
+ arg->vdev_id, arg->peer_addr);
+ return ret;
+ }
+
+ ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
+ arg->peer_addr);
+ if (ret)
+ return ret;
+
+ return -ENOENT;
+ }
+
+ peer->pdev_idx = ar->pdev_idx;
+ peer->sta = sta;
+
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ arvif->ast_hash = peer->ast_hash;
+ arvif->ast_idx = peer->hw_peer_id;
+ }
+
+ peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+ peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+
+ ar->num_peers++;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
new file mode 100644
index 000000000000..b296dc0e2f67
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_PEER_H
+#define ATH12K_PEER_H
+
+#include "dp_rx.h"
+
+struct ppdu_user_delayba {
+ u16 sw_peer_id;
+ u32 info0;
+ u16 ru_end;
+ u16 ru_start;
+ u32 info1;
+ u32 rate_flags;
+ u32 resp_rate_flags;
+};
+
+struct ath12k_peer {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ int peer_id;
+ u16 ast_hash;
+ u8 pdev_idx;
+ u16 hw_peer_id;
+
+ /* protected by ab->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ struct ath12k_dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+
+ /* Info used in MMIC verification of
+ * RX fragments
+ */
+ struct crypto_shash *tfm_mmic;
+ u8 mcast_keyidx;
+ u8 ucast_keyidx;
+ u16 sec_type;
+ u16 sec_type_grp;
+ struct ppdu_user_delayba ppdu_stats_delayba;
+ bool delayba_flag;
+ bool is_authorized;
+};
+
+void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
+void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
+struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
+ const u8 *addr);
+struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id);
+void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id);
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr);
+int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_create_arg *arg);
+int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
+ const u8 *addr);
+bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
+struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
+
+#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
new file mode 100644
index 000000000000..979a63f2e2ab
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -0,0 +1,3087 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/elf.h>
+
+#include "qmi.h"
+#include "core.h"
+#include "debug.h"
+#include <linux/of.h>
+#include <linux/firmware.h>
+
+#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
+#define HOST_CSTATE_BIT 0x04
+#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
+#define ATH12K_QMI_MAX_CHUNK_SIZE 2097152
+
+static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
+ num_local_links),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
+ hw_link_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
+ valid_mlo_link_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_duration_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_duraiton),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ platform_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ platform_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ ddr_range_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_host_ddr_range),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ ddr_range),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x20,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ host_build_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_host_build_type),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x20,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ host_build_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x21,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_capable_valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x21,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_capable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_chip_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_chip_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x23,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_group_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x23,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_group_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x24,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ max_mlo_peer_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x24,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ max_mlo_peer),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x25,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_num_chips_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x25,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_num_chips),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x26,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MLO_CHIPS_V01,
+ .elem_size = sizeof(struct wlfw_host_mlo_chip_info_s_v01),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x26,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_chip_info),
+ .ei_array = wlfw_host_mlo_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x27,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ feature_list_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x27,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ feature_list),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
+ .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_dev_mem_info_s_v01,
+ start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_dev_mem_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = qmi_wlanfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_caldata_read_timeout_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_caldata_read_timeout),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_caps_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_caps),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ rd_card_chain_cap_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ rd_card_chain_cap),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ dev_mem_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_dev_mem_info_s_v01),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, dev_mem),
+ .ei_array = qmi_wlanfw_dev_mem_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v3_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v3_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v3),
+ .ei_array = qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+{
+ req->mlo_capable_valid = 1;
+ req->mlo_capable = 1;
+ req->mlo_chip_id_valid = 1;
+ req->mlo_chip_id = 0;
+ req->mlo_group_id_valid = 1;
+ req->mlo_group_id = 0;
+ req->max_mlo_peer_valid = 1;
+ /* Max peer number generally won't change for the same device
+ * but needs to be synced with host driver.
+ */
+ req->max_mlo_peer = 32;
+ req->mlo_num_chips_valid = 1;
+ req->mlo_num_chips = 1;
+ req->mlo_chip_info_valid = 1;
+ req->mlo_chip_info[0].chip_id = 0;
+ req->mlo_chip_info[0].num_local_links = 2;
+ req->mlo_chip_info[0].hw_link_id[0] = 0;
+ req->mlo_chip_info[0].hw_link_id[1] = 1;
+ req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
+ req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
+}
+
+static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_host_cap_req_msg_v01 req;
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.num_clients_valid = 1;
+ req.num_clients = 1;
+ req.mem_cfg_mode = ab->qmi.target_mem_mode;
+ req.mem_cfg_mode_valid = 1;
+ req.bdf_support_valid = 1;
+ req.bdf_support = 1;
+
+ req.m3_support_valid = 1;
+ req.m3_support = 1;
+ req.m3_cache_support_valid = 1;
+ req.m3_cache_support = 1;
+
+ req.cal_done_valid = 1;
+ req.cal_done = ab->qmi.cal_done;
+
+ req.feature_list_valid = 1;
+ req.feature_list = BIT(CNSS_QDSS_CFG_MISS_V01);
+
+ /* BRINGUP: here we are piggybacking a lot of stuff using
+ * internal_sleep_clock, should it be split?
+ */
+ if (ab->hw_params->internal_sleep_clock) {
+ req.nm_modem_valid = 1;
+
+ /* Notify firmware that this is non-qualcomm platform. */
+ req.nm_modem |= HOST_CSTATE_BIT;
+
+ /* Notify firmware about the sleep clock selection,
+ * nm_modem_bit[1] is used for this purpose. Host driver on
+ * non-qualcomm platforms should select internal sleep
+ * clock.
+ */
+ req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
+ req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
+
+ ath12k_host_cap_parse_mlo(&req);
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_HOST_CAP_REQ_V01,
+ QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "Host capability request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_ind_register_req_msg_v01 *req;
+ struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
+ struct qmi_handle *handle = &ab->qmi.handle;
+ struct qmi_txn txn;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ ret = -ENOMEM;
+ goto resp_out;
+ }
+
+ req->client_id_valid = 1;
+ req->client_id = QMI_WLANFW_CLIENT_ID;
+ req->fw_ready_enable_valid = 1;
+ req->fw_ready_enable = 1;
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ req->cal_done_enable_valid = 1;
+ req->cal_done_enable = 1;
+ req->fw_init_done_enable_valid = 1;
+ req->fw_init_done_enable = 1;
+
+ req->pin_connect_result_enable_valid = 0;
+ req->pin_connect_result_enable = 0;
+
+ ret = qmi_txn_init(handle, &txn,
+ qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_IND_REGISTER_REQ_V01,
+ QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_ind_register_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath12k_warn(ab, "Failed to send indication register request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to register fw indication %d\n", ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(resp);
+resp_out:
+ kfree(req);
+ return ret;
+}
+
+static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0, i;
+ bool delayed;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ /* Some targets by default request a block of big contiguous
+ * DMA memory, it's hard to allocate from kernel. So host returns
+ * failure to firmware and firmware then request multiple blocks of
+ * small chunk size memory.
+ */
+ if (ab->qmi.target_mem_delayed) {
+ delayed = true;
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
+ ab->qmi.mem_seg_count);
+ memset(req, 0, sizeof(*req));
+ } else {
+ delayed = false;
+ req->mem_seg_len = ab->qmi.mem_seg_count;
+ for (i = 0; i < req->mem_seg_len ; i++) {
+ req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
+ req->mem_seg[i].size = ab->qmi.target_mem[i].size;
+ req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "qmi req mem_seg[%d] %pad %u %u\n", i,
+ &ab->qmi.target_mem[i].paddr,
+ ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].type);
+ }
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_RESPOND_MEM_REQ_V01,
+ QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed memory request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
+ ath12k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ kfree(req);
+ return ret;
+}
+
+static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ if (!ab->qmi.target_mem[i].v.addr)
+ continue;
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].v.addr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].v.addr = NULL;
+ }
+}
+
+static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
+{
+ int i;
+ struct target_mem_chunk *chunk;
+
+ ab->qmi.target_mem_delayed = false;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ chunk = &ab->qmi.target_mem[i];
+
+ /* Allocate memory for the region and the functionality supported
+ * on the host. For the non-supported memory region, host does not
+ * allocate memory, assigns NULL and FW will handle this without crashing.
+ */
+ switch (chunk->type) {
+ case HOST_DDR_REGION_TYPE:
+ case M3_DUMP_REGION_TYPE:
+ case PAGEABLE_MEM_REGION_TYPE:
+ case CALDB_MEM_REGION_TYPE:
+ chunk->v.addr = dma_alloc_coherent(ab->dev,
+ chunk->size,
+ &chunk->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!chunk->v.addr) {
+ if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) {
+ ab->qmi.target_mem_delayed = true;
+ ath12k_warn(ab,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath12k_qmi_free_target_mem_chunk(ab);
+ return 0;
+ }
+ ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
+ chunk->type, chunk->size);
+ return -ENOMEM;
+ }
+ break;
+ default:
+ ath12k_warn(ab, "memory type %u not supported\n",
+ chunk->type);
+ chunk->paddr = 0;
+ chunk->v.addr = NULL;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_cap_req_msg_v01 req;
+ struct qmi_wlanfw_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ unsigned int board_id = ATH12K_BOARD_ID_DEFAULT;
+ int ret = 0;
+ int i;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_CAP_REQ_V01,
+ QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed target cap request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.chip_info_valid) {
+ ab->qmi.target.chip_id = resp.chip_info.chip_id;
+ ab->qmi.target.chip_family = resp.chip_info.chip_family;
+ }
+
+ if (resp.board_info_valid)
+ ab->qmi.target.board_id = resp.board_info.board_id;
+ else
+ ab->qmi.target.board_id = board_id;
+
+ if (resp.soc_info_valid)
+ ab->qmi.target.soc_id = resp.soc_info.soc_id;
+
+ if (resp.fw_version_info_valid) {
+ ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
+ strscpy(ab->qmi.target.fw_build_timestamp,
+ resp.fw_version_info.fw_build_timestamp,
+ sizeof(ab->qmi.target.fw_build_timestamp));
+ }
+
+ if (resp.fw_build_id_valid)
+ strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
+ sizeof(ab->qmi.target.fw_build_id));
+
+ if (resp.dev_mem_info_valid) {
+ for (i = 0; i < ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) {
+ ab->qmi.dev_mem[i].start =
+ resp.dev_mem[i].start;
+ ab->qmi.dev_mem[i].size =
+ resp.dev_mem[i].size;
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "devmem [%d] start ox%llx size %llu\n", i,
+ ab->qmi.dev_mem[i].start,
+ ab->qmi.dev_mem[i].size);
+ }
+ }
+
+ if (resp.eeprom_caldata_read_timeout_valid) {
+ ab->qmi.target.eeprom_caldata = resp.eeprom_caldata_read_timeout;
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi cal data supported from eeprom\n");
+ }
+
+ ath12k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
+ ab->qmi.target.chip_id, ab->qmi.target.chip_family,
+ ab->qmi.target.board_id, ab->qmi.target.soc_id);
+
+ ath12k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
+ ab->qmi.target.fw_version,
+ ab->qmi.target.fw_build_timestamp,
+ ab->qmi.target.fw_build_id);
+
+out:
+ return ret;
+}
+
+static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
+ const u8 *data, u32 len, u8 type)
+{
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ const u8 *temp = data;
+ int ret;
+ u32 remaining = len;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ memset(&resp, 0, sizeof(resp));
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = ab->qmi.target.board_id;
+ req->total_size_valid = 1;
+ req->total_size = remaining;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->bdf_type = type;
+ req->bdf_type_valid = 1;
+ req->end_valid = 1;
+ req->end = 0;
+
+ if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
+ req->data_valid = 0;
+ req->end = 1;
+ req->data_len = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ memcpy(req->data, temp, req->data_len);
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf download req fixed addr type %d\n",
+ type);
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
+ remaining = 0;
+ } else {
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "qmi bdf download request remaining %i\n",
+ remaining);
+ }
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
+ enum ath12k_qmi_bdf_type type)
+{
+ struct device *dev = ab->dev;
+ char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
+ struct ath12k_board_data bd;
+ u32 fw_size, file_type;
+ int ret = 0;
+ const u8 *tmp;
+
+ memset(&bd, 0, sizeof(bd));
+
+ switch (type) {
+ case ATH12K_QMI_BDF_TYPE_ELF:
+ ret = ath12k_core_fetch_bdf(ab, &bd);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to load bdf:\n");
+ goto out;
+ }
+
+ if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
+ type = ATH12K_QMI_BDF_TYPE_ELF;
+ else
+ type = ATH12K_QMI_BDF_TYPE_BIN;
+
+ break;
+ case ATH12K_QMI_BDF_TYPE_REGDB:
+ ret = ath12k_core_fetch_board_data_api_1(ab, &bd,
+ ATH12K_REGDB_FILE_NAME);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to load regdb bin:\n");
+ goto out;
+ }
+ break;
+ case ATH12K_QMI_BDF_TYPE_CALIBRATION:
+
+ if (ab->qmi.target.eeprom_caldata) {
+ file_type = ATH12K_QMI_FILE_TYPE_EEPROM;
+ tmp = filename;
+ fw_size = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ file_type = ATH12K_QMI_FILE_TYPE_CALDATA;
+
+ /* cal-<bus>-<id>.bin */
+ snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath12k_bus_str(ab->hif.bus), dev_name(dev));
+ fw_entry = ath12k_core_firmware_request(ab, filename);
+ if (!IS_ERR(fw_entry))
+ goto success;
+
+ fw_entry = ath12k_core_firmware_request(ab,
+ ATH12K_DEFAULT_CAL_FILE);
+ if (IS_ERR(fw_entry)) {
+ ret = PTR_ERR(fw_entry);
+ ath12k_warn(ab,
+ "qmi failed to load CAL data file:%s\n",
+ filename);
+ goto out;
+ }
+
+success:
+ fw_size = min_t(u32, ab->hw_params->fw.board_size,
+ fw_entry->size);
+ tmp = fw_entry->data;
+ }
+ ret = ath12k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to load caldata\n");
+ goto out_qmi_cal;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi caldata downloaded: type: %u\n",
+ file_type);
+
+out_qmi_cal:
+ if (!ab->qmi.target.eeprom_caldata)
+ release_firmware(fw_entry);
+ return ret;
+ default:
+ ath12k_warn(ab, "unknown file type for load %d", type);
+ goto out;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf_type %d\n", type);
+
+ fw_size = min_t(u32, ab->hw_params->fw.board_size, bd.len);
+
+ ret = ath12k_qmi_load_file_target_mem(ab, bd.data, fw_size, type);
+ if (ret < 0)
+ ath12k_warn(ab, "qmi failed to load bdf file\n");
+
+out:
+ ath12k_core_free_bdf(ab, &bd);
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi BDF download sequence completed\n");
+
+ return ret;
+}
+
+static int ath12k_qmi_m3_load(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ const struct firmware *fw;
+ char path[100];
+ int ret;
+
+ if (m3_mem->vaddr || m3_mem->size)
+ return 0;
+
+ fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
+ path, sizeof(path));
+ ath12k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ m3_mem->vaddr = dma_alloc_coherent(ab->dev,
+ fw->size, &m3_mem->paddr,
+ GFP_KERNEL);
+ if (!m3_mem->vaddr) {
+ ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n",
+ fw->size);
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ memcpy(m3_mem->vaddr, fw->data, fw->size);
+ m3_mem->size = fw->size;
+ release_firmware(fw);
+
+ return 0;
+}
+
+static void ath12k_qmi_m3_free(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+}
+
+static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ struct qmi_wlanfw_m3_info_req_msg_v01 req;
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ ret = ath12k_qmi_m3_load(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to load m3 firmware: %d", ret);
+ return ret;
+ }
+
+ req.addr = m3_mem->paddr;
+ req.size = m3_mem->size;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_M3_INFO_REQ_V01,
+ QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed M3 information request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
+ u32 mode)
+{
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_MODE_REQ_V01,
+ QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ if (mode == ATH12K_FIRMWARE_MODE_OFF && ret == -ENETRESET) {
+ ath12k_warn(ab, "WLFW service is dis-connected\n");
+ return 0;
+ }
+ ath12k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n",
+ mode, resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct ce_pipe_config *ce_cfg;
+ struct service_to_pipe *svc_cfg;
+ struct qmi_txn txn = {};
+ int ret = 0, pipe_num;
+
+ ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
+ svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->host_version_valid = 1;
+ strscpy(req->host_version, ATH12K_HOST_VERSION_STRING,
+ sizeof(req->host_version));
+
+ req->tgt_cfg_valid = 1;
+ /* This is number of CE configs */
+ req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
+ for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
+ req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
+ req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
+ req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
+ req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
+ req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ /* This is number of Service/CE configs */
+ req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
+ for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
+ req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
+ req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
+ req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+ }
+
+ /* set shadow v3 configuration */
+ if (ab->hw_params->supports_shadow_regs) {
+ req->shadow_reg_v3_valid = 1;
+ req->shadow_reg_v3_len = min_t(u32,
+ ab->qmi.ce_cfg.shadow_reg_v3_len,
+ QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01);
+ memcpy(&req->shadow_reg_v3, ab->qmi.ce_cfg.shadow_reg_v3,
+ sizeof(u32) * req->shadow_reg_v3_len);
+ } else {
+ req->shadow_reg_v3_valid = 0;
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_CFG_REQ_V01,
+ QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed wlan config request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan mode off\n");
+ return;
+ }
+}
+
+int ath12k_qmi_firmware_start(struct ath12k_base *ab,
+ u32 mode)
+{
+ int ret;
+
+ ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_qmi_wlanfw_mode_send(ab, mode);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi,
+ enum ath12k_qmi_event_type type,
+ void *data)
+{
+ struct ath12k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_fw_ind_register_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_respond_fw_mem_request(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_request_target_cap(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to req target capabilities:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_REGDB);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to load regdb file:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_ELF);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to load board data file:%d\n", ret);
+ return ret;
+ }
+
+ if (ab->hw_params->download_calib) {
+ ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_CALIBRATION);
+ if (ret < 0)
+ ath12k_warn(ab, "qmi failed to load calibrated data :%d\n", ret);
+ }
+
+ ret = ath12k_qmi_wlanfw_m3_info_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send m3 info req:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ath12k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+ const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
+ int i, ret;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware request memory request\n");
+
+ if (msg->mem_seg_len == 0 ||
+ msg->mem_seg_len > ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
+ ath12k_warn(ab, "Invalid memory segment length: %u\n",
+ msg->mem_seg_len);
+
+ ab->qmi.mem_seg_count = msg->mem_seg_len;
+
+ for (i = 0; i < qmi->mem_seg_count ; i++) {
+ ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
+ ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi mem seg type %d size %d\n",
+ msg->mem_seg[i].type, msg->mem_seg[i].size);
+ }
+
+ ret = ath12k_qmi_alloc_target_mem_chunk(ab);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
+ ret);
+ return;
+ }
+
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_REQUEST_MEM, NULL);
+}
+
+static void ath12k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware memory ready indication\n");
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_MEM_READY, NULL);
+}
+
+static void ath12k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware ready\n");
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_READY, NULL);
+}
+
+static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
+ .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01),
+ .fn = ath12k_qmi_msg_mem_request_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
+ .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01),
+ .fn = ath12k_qmi_msg_mem_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
+ .fn = ath12k_qmi_msg_fw_ready_cb,
+ },
+};
+
+static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
+ sizeof(*sq), 0);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to connect to remote service %d\n", ret);
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw qmi service connected\n");
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return ret;
+}
+
+static void ath12k_qmi_ops_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw del server\n");
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_SERVER_EXIT, NULL);
+}
+
+static const struct qmi_ops ath12k_qmi_ops = {
+ .new_server = ath12k_qmi_ops_new_server,
+ .del_server = ath12k_qmi_ops_del_server,
+};
+
+static void ath12k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
+ event_work);
+ struct ath12k_qmi_driver_event *event;
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath12k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ if (test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))
+ return;
+
+ switch (event->type) {
+ case ATH12K_QMI_EVENT_SERVER_ARRIVE:
+ ret = ath12k_qmi_event_server_arrive(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH12K_QMI_EVENT_SERVER_EXIT:
+ set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
+ break;
+ case ATH12K_QMI_EVENT_REQUEST_MEM:
+ ret = ath12k_qmi_event_mem_request(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH12K_QMI_EVENT_FW_MEM_READY:
+ ret = ath12k_qmi_event_load_bdf(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH12K_QMI_EVENT_FW_READY:
+ clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ ath12k_hal_dump_srng_stats(ab);
+ queue_work(ab->workqueue, &ab->restart_work);
+ break;
+ }
+
+ clear_bit(ATH12K_FLAG_CRASH_FLUSH,
+ &ab->dev_flags);
+ clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
+ ath12k_core_qmi_firmware_ready(ab);
+ set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
+ default:
+ ath12k_warn(ab, "invalid event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+int ath12k_qmi_init_service(struct ath12k_base *ab)
+{
+ int ret;
+
+ memset(&ab->qmi.target, 0, sizeof(struct target_info));
+ memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
+ ab->qmi.ab = ab;
+
+ ab->qmi.target_mem_mode = ATH12K_QMI_TARGET_MEM_MODE_DEFAULT;
+ ret = qmi_handle_init(&ab->qmi.handle, ATH12K_QMI_RESP_LEN_MAX,
+ &ath12k_qmi_ops, ath12k_qmi_msg_handlers);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to initialize qmi handle\n");
+ return ret;
+ }
+
+ ab->qmi.event_wq = alloc_workqueue("ath12k_qmi_driver_event",
+ WQ_UNBOUND, 1);
+ if (!ab->qmi.event_wq) {
+ ath12k_err(ab, "failed to allocate workqueue\n");
+ return -EFAULT;
+ }
+
+ INIT_LIST_HEAD(&ab->qmi.event_list);
+ spin_lock_init(&ab->qmi.event_lock);
+ INIT_WORK(&ab->qmi.event_work, ath12k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&ab->qmi.handle, ATH12K_QMI_WLFW_SERVICE_ID_V01,
+ ATH12K_QMI_WLFW_SERVICE_VERS_V01,
+ ab->qmi.service_ins_id);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to add qmi lookup\n");
+ destroy_workqueue(ab->qmi.event_wq);
+ return ret;
+ }
+
+ return ret;
+}
+
+void ath12k_qmi_deinit_service(struct ath12k_base *ab)
+{
+ qmi_handle_release(&ab->qmi.handle);
+ cancel_work_sync(&ab->qmi.event_work);
+ destroy_workqueue(ab->qmi.event_wq);
+ ath12k_qmi_m3_free(ab);
+ ath12k_qmi_free_target_mem_chunk(ab);
+}
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
new file mode 100644
index 000000000000..ad87f19903db
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_QMI_H
+#define ATH12K_QMI_H
+
+#include <linux/mutex.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define ATH12K_HOST_VERSION_STRING "WIN"
+#define ATH12K_QMI_WLANFW_TIMEOUT_MS 10000
+#define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE 64
+#define ATH12K_QMI_CALDB_ADDRESS 0x4BA00000
+#define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
+#define ATH12K_QMI_WLFW_NODE_ID_BASE 0x07
+#define ATH12K_QMI_WLFW_SERVICE_ID_V01 0x45
+#define ATH12K_QMI_WLFW_SERVICE_VERS_V01 0x01
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850 0x1
+
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274 0x07
+#define ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
+#define ATH12K_QMI_RESP_LEN_MAX 8192
+#define ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
+#define ATH12K_QMI_CALDB_SIZE 0x480000
+#define ATH12K_QMI_BDF_EXT_STR_LENGTH 0x20
+#define ATH12K_QMI_FW_MEM_REQ_SEGMENT_CNT 3
+#define ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01 4
+#define ATH12K_QMI_DEVMEM_CMEM_INDEX 0
+
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_FW_READY_IND_V01 0x0038
+
+#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
+#define ATH12K_FIRMWARE_MODE_OFF 4
+#define ATH12K_QMI_TARGET_MEM_MODE_DEFAULT 0
+
+#define ATH12K_BOARD_ID_DEFAULT 0xFF
+
+struct ath12k_base;
+
+enum ath12k_qmi_file_type {
+ ATH12K_QMI_FILE_TYPE_BDF_GOLDEN = 0,
+ ATH12K_QMI_FILE_TYPE_CALDATA = 2,
+ ATH12K_QMI_FILE_TYPE_EEPROM = 3,
+ ATH12K_QMI_MAX_FILE_TYPE = 4,
+};
+
+enum ath12k_qmi_bdf_type {
+ ATH12K_QMI_BDF_TYPE_BIN = 0,
+ ATH12K_QMI_BDF_TYPE_ELF = 1,
+ ATH12K_QMI_BDF_TYPE_REGDB = 4,
+ ATH12K_QMI_BDF_TYPE_CALIBRATION = 5,
+};
+
+enum ath12k_qmi_event_type {
+ ATH12K_QMI_EVENT_SERVER_ARRIVE,
+ ATH12K_QMI_EVENT_SERVER_EXIT,
+ ATH12K_QMI_EVENT_REQUEST_MEM,
+ ATH12K_QMI_EVENT_FW_MEM_READY,
+ ATH12K_QMI_EVENT_FW_READY,
+ ATH12K_QMI_EVENT_REGISTER_DRIVER,
+ ATH12K_QMI_EVENT_UNREGISTER_DRIVER,
+ ATH12K_QMI_EVENT_RECOVERY,
+ ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
+ ATH12K_QMI_EVENT_POWER_UP,
+ ATH12K_QMI_EVENT_POWER_DOWN,
+ ATH12K_QMI_EVENT_MAX,
+};
+
+struct ath12k_qmi_driver_event {
+ struct list_head list;
+ enum ath12k_qmi_event_type type;
+ void *data;
+};
+
+struct ath12k_qmi_ce_cfg {
+ const struct ce_pipe_config *tgt_ce;
+ int tgt_ce_len;
+ const struct service_to_pipe *svc_to_ce_map;
+ int svc_to_ce_map_len;
+ const u8 *shadow_reg;
+ int shadow_reg_len;
+ u32 *shadow_reg_v3;
+ int shadow_reg_v3_len;
+};
+
+struct ath12k_qmi_event_msg {
+ struct list_head list;
+ enum ath12k_qmi_event_type type;
+};
+
+struct target_mem_chunk {
+ u32 size;
+ u32 type;
+ dma_addr_t paddr;
+ union {
+ void __iomem *ioaddr;
+ void *addr;
+ } v;
+};
+
+struct target_info {
+ u32 chip_id;
+ u32 chip_family;
+ u32 board_id;
+ u32 soc_id;
+ u32 fw_version;
+ u32 eeprom_caldata;
+ char fw_build_timestamp[ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+ char fw_build_id[ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ char bdf_ext[ATH12K_QMI_BDF_EXT_STR_LENGTH];
+};
+
+struct m3_mem_region {
+ u32 size;
+ dma_addr_t paddr;
+ void *vaddr;
+};
+
+struct dev_mem_info {
+ u64 start;
+ u64 size;
+};
+
+struct ath12k_qmi {
+ struct ath12k_base *ab;
+ struct qmi_handle handle;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ struct ath12k_qmi_ce_cfg ce_cfg;
+ struct target_mem_chunk target_mem[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ u32 mem_seg_count;
+ u32 target_mem_mode;
+ bool target_mem_delayed;
+ u8 cal_done;
+ struct target_info target;
+ struct m3_mem_region m3_mem;
+ unsigned int service_ins_id;
+ struct dev_mem_info dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01];
+};
+
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 261
+#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 64
+#define QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01 3
+
+struct qmi_wlanfw_host_ddr_range {
+ u64 start;
+ u64 size;
+};
+
+enum ath12k_qmi_target_mem {
+ HOST_DDR_REGION_TYPE = 0x1,
+ BDF_MEM_REGION_TYPE = 0x2,
+ M3_DUMP_REGION_TYPE = 0x3,
+ CALDB_MEM_REGION_TYPE = 0x4,
+ PAGEABLE_MEM_REGION_TYPE = 0x9,
+};
+
+enum qmi_wlanfw_host_build_type {
+ WLANFW_HOST_BUILD_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLANFW_HOST_BUILD_TYPE_UNSPECIFIED_V01 = 0,
+ QMI_WLANFW_HOST_BUILD_TYPE_PRIMARY_V01 = 1,
+ QMI_WLANFW_HOST_BUILD_TYPE_SECONDARY_V01 = 2,
+ WLANFW_HOST_BUILD_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+#define QMI_WLFW_MAX_NUM_MLO_CHIPS_V01 3
+#define QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01 2
+
+struct wlfw_host_mlo_chip_info_s_v01 {
+ u8 chip_id;
+ u8 num_local_links;
+ u8 hw_link_id[QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01];
+ u8 valid_mlo_link_id[QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01];
+};
+
+enum ath12k_qmi_cnss_feature {
+ CNSS_FEATURE_MIN_ENUM_VAL_V01 = INT_MIN,
+ CNSS_QDSS_CFG_MISS_V01 = 3,
+ CNSS_MAX_FEATURE_V01 = 64,
+ CNSS_FEATURE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_host_cap_req_msg_v01 {
+ u8 num_clients_valid;
+ u32 num_clients;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+ u8 cal_duration_valid;
+ u16 cal_duraiton;
+ u8 platform_name_valid;
+ char platform_name[QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 + 1];
+ u8 ddr_range_valid;
+ struct qmi_wlanfw_host_ddr_range ddr_range[QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01];
+ u8 host_build_type_valid;
+ enum qmi_wlanfw_host_build_type host_build_type;
+ u8 mlo_capable_valid;
+ u8 mlo_capable;
+ u8 mlo_chip_id_valid;
+ u16 mlo_chip_id;
+ u8 mlo_group_id_valid;
+ u8 mlo_group_id;
+ u8 max_mlo_peer_valid;
+ u16 max_mlo_peer;
+ u8 mlo_num_chips_valid;
+ u8 mlo_num_chips;
+ u8 mlo_chip_info_valid;
+ struct wlfw_host_mlo_chip_info_s_v01 mlo_chip_info[QMI_WLFW_MAX_NUM_MLO_CHIPS_V01];
+ u8 feature_list_valid;
+ u64 feature_list;
+
+};
+
+struct qmi_wlanfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
+#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_IND_REGISTER_RESP_V01 0x0020
+#define QMI_WLANFW_CLIENT_ID 0x4b4e454c
+
+struct qmi_wlanfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+ u8 cal_done_enable_valid;
+ u8 cal_done_enable;
+};
+
+struct qmi_wlanfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1824
+#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 888
+#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLANFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01 2
+#define QMI_WLANFW_MAX_STR_LEN_V01 16
+
+struct qmi_wlanfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+enum qmi_wlanfw_mem_type_enum_v01 {
+ WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLANFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLANFW_MEM_TYPE_DDR_V01 = 1,
+ QMI_WLANFW_MEM_BDF_V01 = 2,
+ QMI_WLANFW_MEM_M3_V01 = 3,
+ QMI_WLANFW_MEM_CAL_V01 = 4,
+ QMI_WLANFW_MEM_DPD_V01 = 5,
+ WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_mem_seg_s_v01 {
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct qmi_wlanfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u8 restore;
+};
+
+struct qmi_wlanfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
+#define QMI_WLANFW_CAP_REQ_V01 0x0024
+#define QMI_WLANFW_CAP_RESP_V01 0x0024
+
+enum qmi_wlanfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01 {
+ u32 addr;
+};
+
+struct qmi_wlanfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct qmi_wlanfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct qmi_wlanfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct qmi_wlanfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct qmi_wlanfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct qmi_wlanfw_dev_mem_info_s_v01 {
+ u64 start;
+ u64 size;
+};
+
+enum qmi_wlanfw_cal_temp_id_enum_v01 {
+ QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4,
+ QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF,
+};
+
+enum qmi_wlanfw_rd_card_chain_cap_v01 {
+ WLFW_RD_CARD_CHAIN_CAP_MIN_VAL_V01 = INT_MIN,
+ WLFW_RD_CARD_CHAIN_CAP_UNSPECIFIED_V01 = 0,
+ WLFW_RD_CARD_CHAIN_CAP_1x1_V01 = 1,
+ WLFW_RD_CARD_CHAIN_CAP_2x2_V01 = 2,
+ WLFW_RD_CARD_CHAIN_CAP_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct qmi_wlanfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct qmi_wlanfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct qmi_wlanfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+ u8 voltage_mv_valid;
+ u32 voltage_mv;
+ u8 time_freq_hz_valid;
+ u32 time_freq_hz;
+ u8 otp_version_valid;
+ u32 otp_version;
+ u8 eeprom_caldata_read_timeout_valid;
+ u32 eeprom_caldata_read_timeout;
+ u8 fw_caps_valid;
+ u64 fw_caps;
+ u8 rd_card_chain_cap_valid;
+ enum qmi_wlanfw_rd_card_chain_cap_v01 rd_card_chain_cap;
+ u8 dev_mem_info_valid;
+ struct qmi_wlanfw_dev_mem_info_s_v01 dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01];
+};
+
+struct qmi_wlanfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01 0x0025
+/* TODO: Need to check with MCL and FW team that data can be pointer and
+ * can be last element in structure
+ */
+struct qmi_wlanfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum qmi_wlanfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+
+};
+
+struct qmi_wlanfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+#define QMI_WLANFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLANFW_M3_INFO_REQ_V01 0x003C
+
+struct qmi_wlanfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+struct qmi_wlanfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11
+#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
+#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLANFW_MAX_STR_LEN_V01 16
+#define QMI_WLANFW_MAX_NUM_CE_V01 12
+#define QMI_WLANFW_MAX_NUM_SVC_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01 60
+
+struct qmi_wlanfw_wlan_mode_req_msg_v01 {
+ u32 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+struct qmi_wlanfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01
+ tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01
+ svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct qmi_wlanfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v3_valid;
+ u32 shadow_reg_v3_len;
+ struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01
+ shadow_reg_v3[QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01];
+};
+
+struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+int ath12k_qmi_firmware_start(struct ath12k_base *ab,
+ u32 mode);
+void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
+void ath12k_qmi_event_work(struct work_struct *work);
+void ath12k_qmi_msg_recv_work(struct work_struct *work);
+void ath12k_qmi_deinit_service(struct ath12k_base *ab);
+int ath12k_qmi_init_service(struct ath12k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
new file mode 100644
index 000000000000..6ede91ebc8e1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/rtnetlink.h>
+#include "core.h"
+#include "debug.h"
+
+/* World regdom to be used in case default regd from fw is unavailable */
+#define ATH12K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
+#define ATH12K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+#define ATH12K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+
+#define ETSI_WEATHER_RADAR_BAND_LOW 5590
+#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
+#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
+
+static const struct ieee80211_regdomain ath12k_world_regd = {
+ .n_reg_rules = 3,
+ .alpha2 = "00",
+ .reg_rules = {
+ ATH12K_2GHZ_CH01_11,
+ ATH12K_5GHZ_5150_5350,
+ ATH12K_5GHZ_5725_5850,
+ }
+};
+
+static bool ath12k_regdom_changes(struct ath12k *ar, char *alpha2)
+{
+ const struct ieee80211_regdomain *regd;
+
+ regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
+ /* This can happen during wiphy registration where the previous
+ * user request is received before we update the regd received
+ * from firmware.
+ */
+ if (!regd)
+ return true;
+
+ return memcmp(regd->alpha2, alpha2, 2) != 0;
+}
+
+static void
+ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath12k_wmi_init_country_arg arg;
+ struct ath12k *ar = hw->priv;
+ int ret;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "Regulatory Notification received for %s\n", wiphy_name(wiphy));
+
+ /* Currently supporting only General User Hints. Cell base user
+ * hints to be handled later.
+ * Hints from other sources like Core, Beacons are not expected for
+ * self managed wiphy's
+ */
+ if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
+ ath12k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
+ return;
+ }
+
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "Country Setting is not allowed\n");
+ return;
+ }
+
+ if (!ath12k_regdom_changes(ar, request->alpha2)) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Country is already set\n");
+ return;
+ }
+
+ /* Set the country code to the firmware and wait for
+ * the WMI_REG_CHAN_LIST_CC EVENT for updating the
+ * reg info
+ */
+ arg.flags = ALPHA_IS_SET;
+ memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
+ arg.cc_info.alpha2[2] = 0;
+
+ ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+}
+
+int ath12k_reg_update_chan_list(struct ath12k *ar)
+{
+ struct ieee80211_supported_band **bands;
+ struct ath12k_wmi_scan_chan_list_arg *arg;
+ struct ieee80211_channel *channel;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ath12k_wmi_channel_arg *ch;
+ enum nl80211_band band;
+ int num_channels = 0;
+ int i, ret;
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ num_channels++;
+ }
+ }
+
+ if (WARN_ON(!num_channels))
+ return -EINVAL;
+
+ arg = kzalloc(struct_size(arg, channel, num_channels), GFP_KERNEL);
+
+ if (!arg)
+ return -ENOMEM;
+
+ arg->pdev_id = ar->pdev->pdev_id;
+ arg->nallchans = num_channels;
+
+ ch = arg->channel;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ /* TODO: Set to true/false based on some condition? */
+ ch->allow_ht = true;
+ ch->allow_vht = true;
+ ch->allow_he = true;
+
+ ch->dfs_set =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+ ch->is_chan_passive = !!(channel->flags &
+ IEEE80211_CHAN_NO_IR);
+ ch->is_chan_passive |= ch->dfs_set;
+ ch->mhz = channel->center_freq;
+ ch->cfreq1 = channel->center_freq;
+ ch->minpower = 0;
+ ch->maxpower = channel->max_power * 2;
+ ch->maxregpower = channel->max_reg_power * 2;
+ ch->antennamax = channel->max_antenna_gain * 2;
+
+ /* TODO: Use appropriate phymodes */
+ if (channel->band == NL80211_BAND_2GHZ)
+ ch->phy_mode = MODE_11G;
+ else
+ ch->phy_mode = MODE_11A;
+
+ if (channel->band == NL80211_BAND_6GHZ &&
+ cfg80211_channel_is_psc(channel))
+ ch->psc_channel = true;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ i, arg->nallchans,
+ ch->mhz, ch->maxpower, ch->maxregpower,
+ ch->antennamax, ch->phy_mode);
+
+ ch++;
+ /* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
+ * set_agile, reg_class_idx
+ */
+ }
+ }
+
+ ret = ath12k_wmi_send_scan_chan_list_cmd(ar, arg);
+ kfree(arg);
+
+ return ret;
+}
+
+static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
+ struct ieee80211_regdomain *regd_copy)
+{
+ u8 i;
+
+ /* The caller should have checked error conditions */
+ memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
+
+ for (i = 0; i < regd_orig->n_reg_rules; i++)
+ memcpy(&regd_copy->reg_rules[i], &regd_orig->reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+}
+
+int ath12k_regd_update(struct ath12k *ar, bool init)
+{
+ struct ieee80211_regdomain *regd, *regd_copy = NULL;
+ int ret, regd_len, pdev_id;
+ struct ath12k_base *ab;
+
+ ab = ar->ab;
+ pdev_id = ar->pdev_idx;
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (init) {
+ /* Apply the regd received during init through
+ * WMI_REG_CHAN_LIST_CC event. In case of failure to
+ * receive the regd, initialize with a default world
+ * regulatory.
+ */
+ if (ab->default_regd[pdev_id]) {
+ regd = ab->default_regd[pdev_id];
+ } else {
+ ath12k_warn(ab,
+ "failed to receive default regd during init\n");
+ regd = (struct ieee80211_regdomain *)&ath12k_world_regd;
+ }
+ } else {
+ regd = ab->new_regd[pdev_id];
+ }
+
+ if (!regd) {
+ ret = -EINVAL;
+ spin_unlock_bh(&ab->base_lock);
+ goto err;
+ }
+
+ regd_len = sizeof(*regd) + (regd->n_reg_rules *
+ sizeof(struct ieee80211_reg_rule));
+
+ regd_copy = kzalloc(regd_len, GFP_ATOMIC);
+ if (regd_copy)
+ ath12k_copy_regd(regd, regd_copy);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!regd_copy) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ rtnl_lock();
+ wiphy_lock(ar->hw->wiphy);
+ ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
+ wiphy_unlock(ar->hw->wiphy);
+ rtnl_unlock();
+
+ kfree(regd_copy);
+
+ if (ret)
+ goto err;
+
+ if (ar->state == ATH12K_STATE_ON) {
+ ret = ath12k_reg_update_chan_list(ar);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ ath12k_warn(ab, "failed to perform regd update : %d\n", ret);
+ return ret;
+}
+
+static enum nl80211_dfs_regions
+ath12k_map_fw_dfs_region(enum ath12k_dfs_region dfs_region)
+{
+ switch (dfs_region) {
+ case ATH12K_DFS_REG_FCC:
+ case ATH12K_DFS_REG_CN:
+ return NL80211_DFS_FCC;
+ case ATH12K_DFS_REG_ETSI:
+ case ATH12K_DFS_REG_KR:
+ return NL80211_DFS_ETSI;
+ case ATH12K_DFS_REG_MKK:
+ case ATH12K_DFS_REG_MKK_N:
+ return NL80211_DFS_JP;
+ default:
+ return NL80211_DFS_UNSET;
+ }
+}
+
+static u32 ath12k_map_fw_reg_flags(u16 reg_flags)
+{
+ u32 flags = 0;
+
+ if (reg_flags & REGULATORY_CHAN_NO_IR)
+ flags = NL80211_RRF_NO_IR;
+
+ if (reg_flags & REGULATORY_CHAN_RADAR)
+ flags |= NL80211_RRF_DFS;
+
+ if (reg_flags & REGULATORY_CHAN_NO_OFDM)
+ flags |= NL80211_RRF_NO_OFDM;
+
+ if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ if (reg_flags & REGULATORY_CHAN_NO_HT40)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ return flags;
+}
+
+static bool
+ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ if ((start_freq1 >= start_freq2 &&
+ start_freq1 < end_freq2) ||
+ (start_freq2 > start_freq1 &&
+ start_freq2 < end_freq1))
+ return true;
+
+ /* TODO: Should we restrict intersection feasibility
+ * based on min bandwidth of the intersected region also,
+ * say the intersected rule should have a min bandwidth
+ * of 20MHz?
+ */
+
+ return false;
+}
+
+static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2,
+ struct ieee80211_reg_rule *new_rule)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+ u32 freq_diff, max_bw;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
+ start_freq2);
+ new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
+
+ freq_diff = new_rule->freq_range.end_freq_khz -
+ new_rule->freq_range.start_freq_khz;
+ max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
+ rule2->freq_range.max_bandwidth_khz);
+ new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
+
+ new_rule->power_rule.max_antenna_gain =
+ min_t(u32, rule1->power_rule.max_antenna_gain,
+ rule2->power_rule.max_antenna_gain);
+
+ new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
+ rule2->power_rule.max_eirp);
+
+ /* Use the flags of both the rules */
+ new_rule->flags = rule1->flags | rule2->flags;
+
+ /* To be safe, lts use the max cac timeout of both rules */
+ new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
+ rule2->dfs_cac_ms);
+}
+
+static struct ieee80211_regdomain *
+ath12k_regd_intersect(struct ieee80211_regdomain *default_regd,
+ struct ieee80211_regdomain *curr_regd)
+{
+ u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
+ struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
+ struct ieee80211_regdomain *new_regd = NULL;
+ u8 i, j, k;
+
+ num_old_regd_rules = default_regd->n_reg_rules;
+ num_curr_regd_rules = curr_regd->n_reg_rules;
+ num_new_regd_rules = 0;
+
+ /* Find the number of intersecting rules to allocate new regd memory */
+ for (i = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath12k_reg_can_intersect(old_rule, curr_rule))
+ num_new_regd_rules++;
+ }
+ }
+
+ if (!num_new_regd_rules)
+ return NULL;
+
+ new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
+ sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+
+ if (!new_regd)
+ return NULL;
+
+ /* We set the new country and dfs region directly and only trim
+ * the freq, power, antenna gain by intersecting with the
+ * default regdomain. Also MAX of the dfs cac timeout is selected.
+ */
+ new_regd->n_reg_rules = num_new_regd_rules;
+ memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
+ new_regd->dfs_region = curr_regd->dfs_region;
+ new_rule = new_regd->reg_rules;
+
+ for (i = 0, k = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath12k_reg_can_intersect(old_rule, curr_rule))
+ ath12k_reg_intersect_rules(old_rule, curr_rule,
+ (new_rule + k++));
+ }
+ }
+ return new_regd;
+}
+
+static const char *
+ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_FCC:
+ return "FCC";
+ case NL80211_DFS_ETSI:
+ return "ETSI";
+ case NL80211_DFS_JP:
+ return "JP";
+ default:
+ return "UNSET";
+ }
+}
+
+static u16
+ath12k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
+{
+ u16 bw;
+
+ bw = end_freq - start_freq;
+ bw = min_t(u16, bw, max_bw);
+
+ if (bw >= 80 && bw < 160)
+ bw = 80;
+ else if (bw >= 40 && bw < 80)
+ bw = 40;
+ else if (bw < 40)
+ bw = 20;
+
+ return bw;
+}
+
+static void
+ath12k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
+ u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
+ u32 reg_flags)
+{
+ reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
+ reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
+ reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
+ reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
+ reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->flags = reg_flags;
+}
+
+static void
+ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
+ struct ieee80211_regdomain *regd,
+ struct ath12k_reg_rule *reg_rule,
+ u8 *rule_idx, u32 flags, u16 max_bw)
+{
+ u32 end_freq;
+ u16 bw;
+ u8 i;
+
+ i = *rule_idx;
+
+ bw = ath12k_reg_adjust_bw(reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
+
+ ath12k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
+ end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
+ else
+ end_freq = reg_rule->end_freq;
+
+ bw = ath12k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ max_bw);
+
+ i++;
+
+ ath12k_reg_update_rule(regd->reg_rules + i,
+ ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
+
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (end_freq == reg_rule->end_freq) {
+ regd->n_reg_rules--;
+ *rule_idx = i;
+ return;
+ }
+
+ bw = ath12k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, max_bw);
+
+ i++;
+
+ ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ *rule_idx = i;
+}
+
+struct ieee80211_regdomain *
+ath12k_reg_build_regd(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info, bool intersect)
+{
+ struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
+ struct ath12k_reg_rule *reg_rule;
+ u8 i = 0, j = 0, k = 0;
+ u8 num_rules;
+ u16 max_bw;
+ u32 flags;
+ char alpha2[3];
+
+ num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
+
+ /* FIXME: Currently taking reg rules for 6G only from Indoor AP mode list.
+ * This can be updated to choose the combination dynamically based on AP
+ * type and client type, after complete 6G regulatory support is added.
+ */
+ if (reg_info->is_ext_reg_event)
+ num_rules += reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP];
+
+ if (!num_rules)
+ goto ret;
+
+ /* Add max additional rules to accommodate weather radar band */
+ if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI)
+ num_rules += 2;
+
+ tmp_regd = kzalloc(sizeof(*tmp_regd) +
+ (num_rules * sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+ if (!tmp_regd)
+ goto ret;
+
+ memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ alpha2[2] = '\0';
+ tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
+
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
+ alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region),
+ reg_info->dfs_region, num_rules);
+ /* Update reg_rules[] below. Firmware is expected to
+ * send these rules in order(2G rules first and then 5G)
+ */
+ for (; i < num_rules; i++) {
+ if (reg_info->num_2g_reg_rules &&
+ (i < reg_info->num_2g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_2g_ptr + i;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_2g);
+ flags = 0;
+ } else if (reg_info->num_5g_reg_rules &&
+ (j < reg_info->num_5g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_5g_ptr + j++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_5g);
+
+ /* FW doesn't pass NL80211_RRF_AUTO_BW flag for
+ * BW Auto correction, we can enable this by default
+ * for all 5G rules here. The regulatory core performs
+ * BW correction if required and applies flags as
+ * per other BW rule flags we pass from here
+ */
+ flags = NL80211_RRF_AUTO_BW;
+ } else if (reg_info->is_ext_reg_event &&
+ reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] &&
+ (k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) {
+ reg_rule = reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP] + k++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]);
+ flags = NL80211_RRF_AUTO_BW;
+ } else {
+ break;
+ }
+
+ flags |= ath12k_map_fw_reg_flags(reg_rule->flags);
+
+ ath12k_reg_update_rule(tmp_regd->reg_rules + i,
+ reg_rule->start_freq,
+ reg_rule->end_freq, max_bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ /* Update dfs cac timeout if the dfs domain is ETSI and the
+ * new rule covers weather radar band.
+ * Default value of '0' corresponds to 60s timeout, so no
+ * need to update that for other rules.
+ */
+ if (flags & NL80211_RRF_DFS &&
+ reg_info->dfs_region == ATH12K_DFS_REG_ETSI &&
+ (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
+ reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
+ ath12k_reg_update_weather_radar_band(ab, tmp_regd,
+ reg_rule, &i,
+ flags, max_bw);
+ continue;
+ }
+
+ if (reg_info->is_ext_reg_event) {
+ ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms,
+ flags, reg_rule->psd_flag, reg_rule->psd_eirp);
+ } else {
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+ }
+
+ tmp_regd->n_reg_rules = i;
+
+ if (intersect) {
+ default_regd = ab->default_regd[reg_info->phy_id];
+
+ /* Get a new regd by intersecting the received regd with
+ * our default regd.
+ */
+ new_regd = ath12k_regd_intersect(default_regd, tmp_regd);
+ kfree(tmp_regd);
+ if (!new_regd) {
+ ath12k_warn(ab, "Unable to create intersected regdomain\n");
+ goto ret;
+ }
+ } else {
+ new_regd = tmp_regd;
+ }
+
+ret:
+ return new_regd;
+}
+
+void ath12k_regd_update_work(struct work_struct *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k,
+ regd_update_work);
+ int ret;
+
+ ret = ath12k_regd_update(ar, false);
+ if (ret) {
+ /* Firmware has already moved to the new regd. We need
+ * to maintain channel consistency across FW, Host driver
+ * and userspace. Hence as a fallback mechanism we can set
+ * the prev or default country code to the firmware.
+ */
+ /* TODO: Implement Fallback Mechanism */
+ }
+}
+
+void ath12k_reg_init(struct ath12k *ar)
+{
+ ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->reg_notifier = ath12k_reg_notifier;
+}
+
+void ath12k_reg_free(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->max_radios; i++) {
+ kfree(ab->default_regd[i]);
+ kfree(ab->new_regd[i]);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
new file mode 100644
index 000000000000..56d009a47234
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/reg.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_REG_H
+#define ATH12K_REG_H
+
+#include <linux/kernel.h>
+#include <net/regulatory.h>
+
+struct ath12k_base;
+struct ath12k;
+
+/* DFS regdomains supported by Firmware */
+enum ath12k_dfs_region {
+ ATH12K_DFS_REG_UNSET,
+ ATH12K_DFS_REG_FCC,
+ ATH12K_DFS_REG_ETSI,
+ ATH12K_DFS_REG_MKK,
+ ATH12K_DFS_REG_CN,
+ ATH12K_DFS_REG_KR,
+ ATH12K_DFS_REG_MKK_N,
+ ATH12K_DFS_REG_UNDEF,
+};
+
+enum ath12k_reg_cc_code {
+ REG_SET_CC_STATUS_PASS = 0,
+ REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ REG_INIT_ALPHA2_NOT_FOUND = 2,
+ REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ REG_SET_CC_STATUS_NO_MEMORY = 4,
+ REG_SET_CC_STATUS_FAIL = 5,
+};
+
+struct ath12k_reg_rule {
+ u16 start_freq;
+ u16 end_freq;
+ u16 max_bw;
+ u8 reg_power;
+ u8 ant_gain;
+ u16 flags;
+ bool psd_flag;
+ u16 psd_eirp;
+};
+
+struct ath12k_reg_info {
+ enum ath12k_reg_cc_code status_code;
+ u8 num_phy;
+ u8 phy_id;
+ u16 reg_dmn_pair;
+ u16 ctry_code;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ u32 dfs_region;
+ u32 phybitmap;
+ bool is_ext_reg_event;
+ u32 min_bw_2g;
+ u32 max_bw_2g;
+ u32 min_bw_5g;
+ u32 max_bw_5g;
+ u32 num_2g_reg_rules;
+ u32 num_5g_reg_rules;
+ struct ath12k_reg_rule *reg_rules_2g_ptr;
+ struct ath12k_reg_rule *reg_rules_5g_ptr;
+ enum wmi_reg_6g_client_type client_type;
+ bool rnr_tpe_usable;
+ bool unspecified_ap_usable;
+ /* TODO: All 6G related info can be stored only for required
+ * combination instead of all types, to optimize memory usage.
+ */
+ u8 domain_code_6g_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u8 domain_code_6g_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 domain_code_6g_super_id;
+ u32 min_bw_6g_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 max_bw_6g_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 min_bw_6g_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 max_bw_6g_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ struct ath12k_reg_rule *reg_rules_6g_ap_ptr[WMI_REG_CURRENT_MAX_AP_TYPE];
+ struct ath12k_reg_rule *reg_rules_6g_client_ptr
+ [WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+};
+
+void ath12k_reg_init(struct ath12k *ar);
+void ath12k_reg_free(struct ath12k_base *ab);
+void ath12k_regd_update_work(struct work_struct *work);
+struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info,
+ bool intersect);
+int ath12k_regd_update(struct ath12k *ar, bool init);
+int ath12k_reg_update_chan_list(struct ath12k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
new file mode 100644
index 000000000000..5feaff6450ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -0,0 +1,1441 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_RX_DESC_H
+#define ATH12K_RX_DESC_H
+
+enum rx_desc_decap_type {
+ RX_DESC_DECAP_TYPE_RAW,
+ RX_DESC_DECAP_TYPE_NATIVE_WIFI,
+ RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
+ RX_DESC_DECAP_TYPE_8023,
+};
+
+enum rx_desc_decrypt_status_code {
+ RX_DESC_DECRYPT_STATUS_CODE_OK,
+ RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME,
+ RX_DESC_DECRYPT_STATUS_CODE_DATA_ERR,
+ RX_DESC_DECRYPT_STATUS_CODE_KEY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_PEER_ENTRY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_OTHER,
+};
+
+#define RX_MPDU_START_INFO0_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO0_LMAC_PEER_ID_MSB GENMASK(6, 5)
+#define RX_MPDU_START_INFO0_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO0_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO0_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO0_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO0_RXDMA0_SRC_RING_SEL GENMASK(13, 11)
+#define RX_MPDU_START_INFO0_RXDMA0_DST_RING_SEL GENMASK(16, 14)
+#define RX_MPDU_START_INFO0_MCAST_ECHO_DROP_EN BIT(17)
+#define RX_MPDU_START_INFO0_WDS_LEARN_DETECT_EN BIT(18)
+#define RX_MPDU_START_INFO0_INTRA_BSS_CHECK_EN BIT(19)
+#define RX_MPDU_START_INFO0_USE_PPE BIT(20)
+#define RX_MPDU_START_INFO0_PPE_ROUTING_EN BIT(21)
+
+#define RX_MPDU_START_INFO1_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO1_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO1_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO1_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO2_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO2_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO2_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO2_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO2_MESH_STA GENMASK(9, 8)
+#define RX_MPDU_START_INFO2_BSSID_HIT BIT(10)
+#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(14, 11)
+#define RX_MPDU_START_INFO2_TID GENMASK(18, 15)
+
+#define RX_MPDU_START_INFO3_RXPCU_MPDU_FLTR GENMASK(1, 0)
+#define RX_MPDU_START_INFO3_SW_FRAME_GRP_ID GENMASK(8, 2)
+#define RX_MPDU_START_INFO3_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO3_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO3_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO3_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO3_AST_LOOKUP_VALID BIT(13)
+#define RX_MPDU_START_INFO3_RANGING BIT(14)
+
+#define RX_MPDU_START_INFO4_MPDU_FCTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO4_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO4_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO4_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO4_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO4_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO4_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO4_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO4_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO4_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO4_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO4_TO_DS BIT(17)
+#define RX_MPDU_START_INFO4_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO4_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO4_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO5_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO5_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO5_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO5_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO5_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO5_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO5_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO5_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO5_BAR_FRAME BIT(29)
+#define RX_MPDU_START_INFO5_RAW_MPDU BIT(30)
+
+#define RX_MPDU_START_INFO6_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO6_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO6_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO6_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO6_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO6_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO6_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO6_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO6_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO6_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO6_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO6_EOSP BIT(24)
+#define RX_MPDU_START_INFO6_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO6_ORDER BIT(26)
+#define RX_MPDU_START_INFO6_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO6_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO6_DIRECTED BIT(29)
+#define RX_MPDU_START_INFO6_AMSDU_PRESENT BIT(30)
+
+#define RX_MPDU_START_INFO7_VDEV_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO7_SERVICE_CODE GENMASK(16, 8)
+#define RX_MPDU_START_INFO7_PRIORITY_VALID BIT(17)
+#define RX_MPDU_START_INFO7_SRC_INFO GENMASK(29, 18)
+
+#define RX_MPDU_START_INFO8_AUTH_TO_SEND_WDS BIT(0)
+
+struct rx_mpdu_start_qcn9274 {
+ __le32 info0;
+ __le32 reo_queue_desc_lo;
+ __le32 info1;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info3;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+ __le32 info7;
+ u8 multi_link_addr1[ETH_ALEN];
+ u8 multi_link_addr2[ETH_ALEN];
+ __le32 info8;
+ __le32 res0;
+ __le32 res1;
+} __packed;
+
+/* rx_mpdu_start
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * lmac_peer_id_msb
+ *
+ * If use_flow_id_toeplitz_clfy is set and lmac_peer_id_'sb
+ * is 2'b00, Rx OLE uses a REO destination indicati'n of {1'b1,
+ * hash[3:0]} using the chosen Toeplitz hash from Common Parser
+ * if flow search fails.
+ * If use_flow_id_toeplitz_clfy is set and lmac_peer_id_msb
+ * 's not 2'b00, Rx OLE uses a REO destination indication of
+ * {lmac_peer_id_msb, hash[2:0]} using the chosen Toeplitz
+ * hash from Common Parser if flow search fails.
+ *
+ * use_flow_id_toeplitz_clfy
+ * Indication to Rx OLE to enable REO destination routing based
+ * on the chosen Toeplitz hash from Common Parser, in case
+ * flow search fails
+ *
+ * pkt_selection_fp_ucast_data
+ * Filter pass Unicast data frame (matching rxpcu_filter_pass
+ * and sw_frame_group_Unicast_data) routing selection
+ *
+ * pkt_selection_fp_mcast_data
+ * Filter pass Multicast data frame (matching rxpcu_filter_pass
+ * and sw_frame_group_Multicast_data) routing selection
+ *
+ * pkt_selection_fp_ctrl_bar
+ * Filter pass BAR frame (matching rxpcu_filter_pass
+ * and sw_frame_group_ctrl_1000) routing selection
+ *
+ * rxdma0_src_ring_selection
+ * Field only valid when for the received frame type the corresponding
+ * pkt_selection_fp_... bit is set
+ *
+ * rxdma0_dst_ring_selection
+ * Field only valid when for the received frame type the corresponding
+ * pkt_selection_fp_... bit is set
+ *
+ * mcast_echo_drop_enable
+ * If set, for multicast packets, multicast echo check (i.e.
+ * SA search with mcast_echo_check = 1) shall be performed
+ * by RXOLE, and any multicast echo packets should be indicated
+ * to RXDMA for release to WBM
+ *
+ * wds_learning_detect_en
+ * If set, WDS learning detection based on SA search and notification
+ * to FW (using RXDMA0 status ring) is enabled and the "timestamp"
+ * field in address search failure cache-only entry should
+ * be used to avoid multiple WDS learning notifications.
+ *
+ * intrabss_check_en
+ * If set, intra-BSS routing detection is enabled
+ *
+ * use_ppe
+ * Indicates to RXDMA to ignore the REO_destination_indication
+ * and use a programmed value corresponding to the REO2PPE
+ * ring
+ * This override to REO2PPE for packets requiring multiple
+ * buffers shall be disabled based on an RXDMA configuration,
+ * as PPE may not support such packets.
+ *
+ * Supported only in full AP chips, not in client/soft
+ * chips
+ *
+ * ppe_routing_enable
+ * Global enable/disable bit for routing to PPE, used to disable
+ * PPE routing even if RXOLE CCE or flow search indicate 'Use_PPE'
+ * This is set by SW for peers which are being handled by a
+ * host SW/accelerator subsystem that also handles packet
+ * uffer management for WiFi-to-PPE routing.
+ *
+ * This is cleared by SW for peers which are being handled
+ * by a different subsystem, completely disabling WiFi-to-PPE
+ * routing for such peers.
+ *
+ * rx_reo_queue_desc_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * rx_reo_queue_desc_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link
+ * descriptor belongs.
+ *
+ * pre_delim_err_warning
+ * Indicates that a delimiter FCS error was found in between the
+ * previous MPDU and this MPDU. Note that this is just a warning,
+ * and does not mean that this MPDU is corrupted in any way. If
+ * it is, there will be other errors indicated such as FCS or
+ * decrypt errors.
+ *
+ * first_delim_err
+ * Indicates that the first delimiter had a FCS failure.
+ *
+ * pn
+ * The PN number.
+ *
+ * epd_en
+ * Field only valid when AST_based_lookup_valid == 1.
+ * In case of ndp or phy_err or AST_based_lookup_valid == 0,
+ * this field will be set to 0
+ * If set to one use EPD instead of LPD
+ * In case of ndp or phy_err, this field will never be set.
+ *
+ * all_frames_shall_be_encrypted
+ * In case of ndp or phy_err or AST_based_lookup_valid == 0,
+ * this field will be set to 0
+ *
+ * When set, all frames (data only ?) shall be encrypted. If
+ * not, RX CRYPTO shall set an error flag.
+ *
+ *
+ * encrypt_type
+ * In case of ndp or phy_err or AST_based_lookup_valid == 0,
+ * this field will be set to 0
+ *
+ * Indicates type of decrypt cipher used (as defined in the
+ * peer entry)
+ *
+ * wep_key_width_for_variable_key
+ *
+ * Field only valid when key_type is set to wep_varied_width.
+ *
+ * mesh_sta
+ *
+ * bssid_hit
+ * When set, the BSSID of the incoming frame matched one of
+ * the 8 BSSID register values
+ * bssid_number
+ * Field only valid when bssid_hit is set.
+ * This number indicates which one out of the 8 BSSID register
+ * values matched the incoming frame
+ *
+ * tid
+ * Field only valid when mpdu_qos_control_valid is set
+ * The TID field in the QoS control field
+ *
+ * peer_meta_data
+ * Meta data that SW has programmed in the Peer table entry
+ * of the transmitting STA.
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * ndp_frame
+ * When set, the received frame was an NDP frame, and thus
+ * there will be no MPDU data.
+ * phy_err
+ * When set, a PHY error was received before MAC received any
+ * data, and thus there will be no MPDU data.
+ *
+ * phy_err_during_mpdu_header
+ * When set, a PHY error was received before MAC received the
+ * complete MPDU header which was needed for proper decoding
+ *
+ * protocol_version_err
+ * Set when RXPCU detected a version error in the Frame control
+ * field
+ *
+ * ast_based_lookup_valid
+ * When set, AST based lookup for this frame has found a valid
+ * result.
+ *
+ * ranging
+ * When set, a ranging NDPA or a ranging NDP was received.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ast_index
+ *
+ * This field indicates the index of the AST entry corresponding
+ * to this MPDU. It is provided by the GSE module instantiated
+ * in RXPCU.
+ * A value of 0xFFFF indicates an invalid AST index, meaning
+ * that No AST entry was found or NO AST search was performed
+ *
+ * sw_peer_id
+ * In case of ndp or phy_err or AST_based_lookup_valid == 0,
+ * this field will be set to 0
+ * This field indicates a unique peer identifier. It is set
+ * equal to field 'sw_peer_id' from the AST entry
+ *
+ * frame_control_valid
+ * When set, the field Mpdu_Frame_control_field has valid information
+ *
+ * frame_duration_valid
+ * When set, the field Mpdu_duration_field has valid information
+ *
+ * mac_addr_ad1..4_valid
+ * When set, the fields mac_addr_adx_..... have valid information
+ *
+ * mpdu_seq_ctrl_valid
+ *
+ * When set, the fields mpdu_sequence_control_field and mpdu_sequence_number
+ * have valid information as well as field
+ * For MPDUs without a sequence control field, this field will
+ * not be set.
+ *
+ * mpdu_qos_ctrl_valid, mpdu_ht_ctrl_valid
+ *
+ * When set, the field mpdu_qos_control_field, mpdu_ht_control has valid
+ * information, For MPDUs without a QoS,HT control field, this field
+ * will not be set.
+ *
+ * frame_encryption_info_valid
+ *
+ * When set, the encryption related info fields, like IV and
+ * PN are valid
+ * For MPDUs that are not encrypted, this will not be set.
+ *
+ * mpdu_fragment_number
+ *
+ * Field only valid when Mpdu_sequence_control_valid is set
+ * AND Fragment_flag is set. The fragment number from the 802.11 header
+ *
+ * more_fragment_flag
+ *
+ * The More Fragment bit setting from the MPDU header of the
+ * received frame
+ *
+ * fr_ds
+ *
+ * Field only valid when Mpdu_frame_control_valid is set
+ * Set if the from DS bit is set in the frame control.
+ *
+ * to_ds
+ *
+ * Field only valid when Mpdu_frame_control_valid is set
+ * Set if the to DS bit is set in the frame control.
+ *
+ * encrypted
+ *
+ * Field only valid when Mpdu_frame_control_valid is set.
+ * Protected bit from the frame control.
+ *
+ * mpdu_retry
+ * Field only valid when Mpdu_frame_control_valid is set.
+ * Retry bit from the frame control. Only valid when first_msdu is set
+ *
+ * mpdu_sequence_number
+ * Field only valid when Mpdu_sequence_control_valid is set.
+ *
+ * The sequence number from the 802.11 header.
+ * key_id
+ * The key ID octet from the IV.
+ * Field only valid when Frame_encryption_info_valid is set
+ *
+ * new_peer_entry
+ * Set if new RX_PEER_ENTRY TLV follows. If clear, RX_PEER_ENTRY
+ * doesn't follow so RX DECRYPTION module either uses old peer
+ * entry or not decrypt.
+ *
+ * decrypt_needed
+ * When RXPCU sets bit 'ast_index_not_found or ast_index_timeout',
+ * RXPCU will also ensure that this bit is NOT set. CRYPTO for that
+ * reason only needs to evaluate this bit and non of the other ones
+ *
+ * decap_type
+ * Used by the OLE during decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * rx_insert_vlan_c_tag_padding
+ * rx_insert_vlan_s_tag_padding
+ * Insert 4 byte of all zeros as VLAN tag or double VLAN tag if
+ * the rx payload does not have VLAN.
+ *
+ * strip_vlan_c_tag_decap
+ * strip_vlan_s_tag_decap
+ * Strip VLAN or double VLAN during decapsulation.
+ *
+ * pre_delim_count
+ * The number of delimiters before this MPDU. Note that this
+ * number is cleared at PPDU start. If this MPDU is the first
+ * received MPDU in the PPDU and this MPDU gets filtered-in,
+ * this field will indicate the number of delimiters located
+ * after the last MPDU in the previous PPDU.
+ *
+ * If this MPDU is located after the first received MPDU in
+ * an PPDU, this field will indicate the number of delimiters
+ * located between the previous MPDU and this MPDU.
+ *
+ * ampdu_flag
+ * Received frame was part of an A-MPDU.
+ *
+ * bar_frame
+ * Received frame is a BAR frame
+ *
+ * raw_mpdu
+ * Set when no 802.11 to nwifi/ethernet hdr conversion is done
+ *
+ * mpdu_length
+ * MPDU length before decapsulation.
+ *
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * ast_index_not_found
+ * Only valid when first_msdu is set. Indicates no AST matching
+ * entries within the max search count.
+ *
+ * ast_index_timeout
+ * Only valid when first_msdu is set. Indicates an unsuccessful
+ * search in the address search table due to timeout.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ *
+ * fragment_flag
+ * Fragment indication
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only
+ * set when first_msdu is set.
+ *
+ * u_apsd_trigger
+ * U-APSD trigger frame
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ * amsdu_present
+ * AMSDU present
+ *
+ * mpdu_frame_control_field
+ * Frame control field in header. Only valid when the field is marked valid.
+ *
+ * mpdu_duration_field
+ * Duration field in header. Only valid when the field is marked valid.
+ *
+ * mac_addr_adx
+ * MAC addresses in the received frame. Only valid when corresponding
+ * address valid bit is set
+ *
+ * mpdu_qos_control_field, mpdu_ht_control_field
+ * QoS/HT control fields from header. Valid only when corresponding fields
+ * are marked valid
+ *
+ * vdev_id
+ * Virtual device associated with this peer
+ * RXOLE uses this to determine intra-BSS routing.
+ *
+ * service_code
+ * Opaque service code between PPE and Wi-Fi
+ * This field gets passed on by REO to PPE in the EDMA descriptor
+ * ('REO_TO_PPE_RING').
+ *
+ * priority_valid
+ * This field gets passed on by REO to PPE in the EDMA descriptor
+ * ('REO_TO_PPE_RING').
+ *
+ * src_info
+ * Source (virtual) device/interface info. associated with
+ * this peer
+ * This field gets passed on by REO to PPE in the EDMA descriptor
+ * ('REO_TO_PPE_RING').
+ *
+ * multi_link_addr_ad1_ad2_valid
+ * If set, Rx OLE shall convert Address1 and Address2 of received
+ * data frames to multi-link addresses during decapsulation to eth/nwifi
+ *
+ * multi_link_addr_ad1,ad2
+ * Multi-link receiver address1,2. Only valid when corresponding
+ * valid bit is set
+ *
+ * authorize_to_send_wds
+ * If not set, RXDMA shall perform error-routing for WDS packets
+ * as the sender is not authorized and might misuse WDS frame
+ * format to inject packets with arbitrary DA/SA.
+ *
+ */
+
+enum rx_msdu_start_pkt_type {
+ RX_MSDU_START_PKT_TYPE_11A,
+ RX_MSDU_START_PKT_TYPE_11B,
+ RX_MSDU_START_PKT_TYPE_11N,
+ RX_MSDU_START_PKT_TYPE_11AC,
+ RX_MSDU_START_PKT_TYPE_11AX,
+};
+
+enum rx_msdu_start_sgi {
+ RX_MSDU_START_SGI_0_8_US,
+ RX_MSDU_START_SGI_0_4_US,
+ RX_MSDU_START_SGI_1_6_US,
+ RX_MSDU_START_SGI_3_2_US,
+};
+
+enum rx_msdu_start_recv_bw {
+ RX_MSDU_START_RECV_BW_20MHZ,
+ RX_MSDU_START_RECV_BW_40MHZ,
+ RX_MSDU_START_RECV_BW_80MHZ,
+ RX_MSDU_START_RECV_BW_160MHZ,
+};
+
+enum rx_msdu_start_reception_type {
+ RX_MSDU_START_RECEPTION_TYPE_SU,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+#define RX_MSDU_END_INFO1_REPORTED_MPDU_LENGTH GENMASK(13, 0)
+
+#define RX_MSDU_END_INFO2_CCE_SUPER_RULE GENMASK(13, 8)
+#define RX_MSDU_END_INFO2_CCND_TRUNCATE BIT(14)
+#define RX_MSDU_END_INFO2_CCND_CCE_DIS BIT(15)
+
+#define RX_MSDU_END_INFO3_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO3_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO3_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO3_SA_OFFSET_VALID BIT(13)
+
+#define RX_MSDU_END_INFO4_TCP_FLAG GENMASK(8, 0)
+#define RX_MSDU_END_INFO4_LRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO5_SA_IDX_TIMEOUT BIT(0)
+#define RX_MSDU_END_INFO5_DA_IDX_TIMEOUT BIT(1)
+#define RX_MSDU_END_INFO5_TO_DS BIT(2)
+#define RX_MSDU_END_INFO5_TID GENMASK(6, 3)
+#define RX_MSDU_END_INFO5_SA_IS_VALID BIT(7)
+#define RX_MSDU_END_INFO5_DA_IS_VALID BIT(8)
+#define RX_MSDU_END_INFO5_DA_IS_MCBC BIT(9)
+#define RX_MSDU_END_INFO5_L3_HDR_PADDING GENMASK(11, 10)
+#define RX_MSDU_END_INFO5_FIRST_MSDU BIT(12)
+#define RX_MSDU_END_INFO5_LAST_MSDU BIT(13)
+#define RX_MSDU_END_INFO5_FROM_DS BIT(14)
+#define RX_MSDU_END_INFO5_IP_CHKSUM_FAIL_COPY BIT(15)
+
+#define RX_MSDU_END_INFO6_MSDU_DROP BIT(0)
+#define RX_MSDU_END_INFO6_REO_DEST_IND GENMASK(5, 1)
+#define RX_MSDU_END_INFO6_FLOW_IDX GENMASK(25, 6)
+#define RX_MSDU_END_INFO6_USE_PPE BIT(26)
+#define RX_MSDU_END_INFO6_MESH_STA GENMASK(28, 27)
+#define RX_MSDU_END_INFO6_VLAN_CTAG_STRIPPED BIT(29)
+#define RX_MSDU_END_INFO6_VLAN_STAG_STRIPPED BIT(30)
+#define RX_MSDU_END_INFO6_FRAGMENT_FLAG BIT(31)
+
+#define RX_MSDU_END_INFO7_AGGR_COUNT GENMASK(7, 0)
+#define RX_MSDU_END_INFO7_FLOW_AGGR_CONTN BIT(8)
+#define RX_MSDU_END_INFO7_FISA_TIMEOUT BIT(9)
+#define RX_MSDU_END_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10)
+#define RX_MSDU_END_INFO7_MSDU_LIMIT_ERROR BIT(11)
+#define RX_MSDU_END_INFO7_FLOW_IDX_TIMEOUT BIT(12)
+#define RX_MSDU_END_INFO7_FLOW_IDX_INVALID BIT(13)
+#define RX_MSDU_END_INFO7_CCE_MATCH BIT(14)
+#define RX_MSDU_END_INFO7_AMSDU_PARSER_ERR BIT(15)
+
+#define RX_MSDU_END_INFO8_KEY_ID GENMASK(7, 0)
+
+#define RX_MSDU_END_INFO9_SERVICE_CODE GENMASK(14, 6)
+#define RX_MSDU_END_INFO9_PRIORITY_VALID BIT(15)
+#define RX_MSDU_END_INFO9_INRA_BSS BIT(16)
+#define RX_MSDU_END_INFO9_DEST_CHIP_ID GENMASK(18, 17)
+#define RX_MSDU_END_INFO9_MCAST_ECHO BIT(19)
+#define RX_MSDU_END_INFO9_WDS_LEARN_EVENT BIT(20)
+#define RX_MSDU_END_INFO9_WDS_ROAM_EVENT BIT(21)
+#define RX_MSDU_END_INFO9_WDS_KEEP_ALIVE_EVENT BIT(22)
+
+#define RX_MSDU_END_INFO10_MSDU_LENGTH GENMASK(13, 0)
+#define RX_MSDU_END_INFO10_STBC BIT(14)
+#define RX_MSDU_END_INFO10_IPSEC_ESP BIT(15)
+#define RX_MSDU_END_INFO10_L3_OFFSET GENMASK(22, 16)
+#define RX_MSDU_END_INFO10_IPSEC_AH BIT(23)
+#define RX_MSDU_END_INFO10_L4_OFFSET GENMASK(31, 24)
+
+#define RX_MSDU_END_INFO11_MSDU_NUMBER GENMASK(7, 0)
+#define RX_MSDU_END_INFO11_DECAP_FORMAT GENMASK(9, 8)
+#define RX_MSDU_END_INFO11_IPV4 BIT(10)
+#define RX_MSDU_END_INFO11_IPV6 BIT(11)
+#define RX_MSDU_END_INFO11_TCP BIT(12)
+#define RX_MSDU_END_INFO11_UDP BIT(13)
+#define RX_MSDU_END_INFO11_IP_FRAG BIT(14)
+#define RX_MSDU_END_INFO11_TCP_ONLY_ACK BIT(15)
+#define RX_MSDU_END_INFO11_DA_IS_BCAST_MCAST BIT(16)
+#define RX_MSDU_END_INFO11_SEL_TOEPLITZ_HASH GENMASK(18, 17)
+#define RX_MSDU_END_INFO11_IP_FIXED_HDR_VALID BIT(19)
+#define RX_MSDU_END_INFO11_IP_EXTN_HDR_VALID BIT(20)
+#define RX_MSDU_END_INFO11_IP_TCP_UDP_HDR_VALID BIT(21)
+#define RX_MSDU_END_INFO11_MESH_CTRL_PRESENT BIT(22)
+#define RX_MSDU_END_INFO11_LDPC BIT(23)
+#define RX_MSDU_END_INFO11_IP4_IP6_NXT_HDR GENMASK(31, 24)
+
+#define RX_MSDU_END_INFO12_USER_RSSI GENMASK(7, 0)
+#define RX_MSDU_END_INFO12_PKT_TYPE GENMASK(11, 8)
+#define RX_MSDU_END_INFO12_SGI GENMASK(13, 12)
+#define RX_MSDU_END_INFO12_RATE_MCS GENMASK(17, 14)
+#define RX_MSDU_END_INFO12_RECV_BW GENMASK(20, 18)
+#define RX_MSDU_END_INFO12_RECEPTION_TYPE GENMASK(23, 21)
+#define RX_MSDU_END_INFO12_MIMO_SS_BITMAP GENMASK(30, 24)
+#define RX_MSDU_END_INFO12_MIMO_DONE_COPY BIT(31)
+
+#define RX_MSDU_END_INFO13_FIRST_MPDU BIT(0)
+#define RX_MSDU_END_INFO13_MCAST_BCAST BIT(2)
+#define RX_MSDU_END_INFO13_AST_IDX_NOT_FOUND BIT(3)
+#define RX_MSDU_END_INFO13_AST_IDX_TIMEDOUT BIT(4)
+#define RX_MSDU_END_INFO13_POWER_MGMT BIT(5)
+#define RX_MSDU_END_INFO13_NON_QOS BIT(6)
+#define RX_MSDU_END_INFO13_NULL_DATA BIT(7)
+#define RX_MSDU_END_INFO13_MGMT_TYPE BIT(8)
+#define RX_MSDU_END_INFO13_CTRL_TYPE BIT(9)
+#define RX_MSDU_END_INFO13_MORE_DATA BIT(10)
+#define RX_MSDU_END_INFO13_EOSP BIT(11)
+#define RX_MSDU_END_INFO13_A_MSDU_ERROR BIT(12)
+#define RX_MSDU_END_INFO13_ORDER BIT(14)
+#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15)
+#define RX_MSDU_END_INFO13_OVERFLOW_ERR BIT(16)
+#define RX_MSDU_END_INFO13_MSDU_LEN_ERR BIT(17)
+#define RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL BIT(18)
+#define RX_MSDU_END_INFO13_IP_CKSUM_FAIL BIT(19)
+#define RX_MSDU_END_INFO13_SA_IDX_INVALID BIT(20)
+#define RX_MSDU_END_INFO13_DA_IDX_INVALID BIT(21)
+#define RX_MSDU_END_INFO13_AMSDU_ADDR_MISMATCH BIT(22)
+#define RX_MSDU_END_INFO13_RX_IN_TX_DECRYPT_BYP BIT(23)
+#define RX_MSDU_END_INFO13_ENCRYPT_REQUIRED BIT(24)
+#define RX_MSDU_END_INFO13_DIRECTED BIT(25)
+#define RX_MSDU_END_INFO13_BUFFER_FRAGMENT BIT(26)
+#define RX_MSDU_END_INFO13_MPDU_LEN_ERR BIT(27)
+#define RX_MSDU_END_INFO13_TKIP_MIC_ERR BIT(28)
+#define RX_MSDU_END_INFO13_DECRYPT_ERR BIT(29)
+#define RX_MSDU_END_INFO13_UNDECRYPT_FRAME_ERR BIT(30)
+#define RX_MSDU_END_INFO13_FCS_ERR BIT(31)
+
+#define RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE GENMASK(12, 10)
+#define RX_MSDU_END_INFO14_RX_BITMAP_NOT_UPDED BIT(13)
+#define RX_MSDU_END_INFO14_MSDU_DONE BIT(31)
+
+struct rx_msdu_end_qcn9274 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 info1;
+ __le16 info2;
+ __le16 cumulative_l3_checksum;
+ __le32 rule_indication0;
+ __le32 ipv6_options_crc;
+ __le16 info3;
+ __le16 l3_type;
+ __le32 rule_indication1;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info4;
+ __le16 window_size;
+ __le16 sa_sw_peer_id;
+ __le16 info5;
+ __le16 sa_idx;
+ __le16 da_idx_or_sw_peer_id;
+ __le32 info6;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 tcp_udp_cksum;
+ __le16 info7;
+ __le16 cumulative_ip_length;
+ __le32 info8;
+ __le32 info9;
+ __le32 info10;
+ __le32 info11;
+ __le16 vlan_ctag_ci;
+ __le16 vlan_stag_ci;
+ __le32 peer_meta_data;
+ __le32 info12;
+ __le32 flow_id_toeplitz;
+ __le32 ppdu_start_timestamp_63_32;
+ __le32 phy_meta_data;
+ __le32 ppdu_start_timestamp_31_0;
+ __le32 toeplitz_hash_2_or_4;
+ __le16 res0;
+ __le16 sa_15_0;
+ __le32 sa_47_16;
+ __le32 info13;
+ __le32 info14;
+} __packed;
+
+/* rx_msdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ip_hdr_cksum
+ * This can include the IP header checksum or the pseudo
+ * header checksum used by TCP/UDP checksum.
+ *
+ * reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when first_msdu is
+ * set. This field is taken directly from the length field of the
+ * A-MPDU delimiter or the preamble length field for non-A-MPDU
+ * frames.
+ *
+ * cce_super_rule
+ * Indicates the super filter rule.
+ *
+ * cce_classify_not_done_truncate
+ * Classification failed due to truncated frame.
+ *
+ * cce_classify_not_done_cce_dis
+ * Classification failed due to CCE global disable
+ *
+ * cumulative_l3_checksum
+ * FISA: IP header checksum including the total MSDU length
+ * that is part of this flow aggregated so far, reported if
+ * 'RXOLE_R0_FISA_CTRL. CHKSUM_CUM_IP_LEN_EN' is set
+ *
+ * rule_indication
+ * Bitmap indicating which of rules have matched.
+ *
+ * ipv6_options_crc
+ * 32 bit CRC computed out of IP v6 extension headers.
+ *
+ * da_offset
+ * Offset into MSDU buffer for DA.
+ *
+ * sa_offset
+ * Offset into MSDU buffer for SA.
+ *
+ * da_offset_valid
+ * da_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when DA is compressed.
+ *
+ * sa_offset_valid
+ * sa_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when SA is compressed.
+ *
+ * l3_type
+ * The 16-bit type value indicating the type of L3 later
+ * extracted from LLC/SNAP, set to zero if SNAP is not
+ * available.
+ *
+ * tcp_seq_number
+ * TCP sequence number.
+ *
+ * tcp_ack_number
+ * TCP acknowledge number.
+ *
+ * tcp_flag
+ * TCP flags {NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN}.
+ *
+ * lro_eligible
+ * Computed out of TCP and IP fields to indicate that this
+ * MSDU is eligible for LRO.
+ *
+ * window_size
+ * TCP receive window size.
+ *
+ * sa_sw_peer_id
+ * sw_peer_id from the address search entry corresponding to the
+ * source address of the MSDU.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful MAC source address search due to the
+ * expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful MAC destination address search due to
+ * the expiring of the search timer.
+ *
+ * to_ds
+ * Set if the to DS bit is set in the frame control.
+ *
+ * tid
+ * TID field in the QoS control field
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast of Broadcast address.
+ *
+ * l3_header_padding
+ * Number of bytes padded to make sure that the L3 header will
+ * always start of a Dword boundary.
+ *
+ * first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated MSDU
+ * frame: normal MPDU. Interior MSDU in an A-MSDU shall have both
+ * first_mpdu and last_mpdu bits set to 0.
+ *
+ * last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is only
+ * valid when last_msdu is set.
+ *
+ * fr_ds
+ * Set if the from DS bit is set in the frame control.
+ *
+ * ip_chksum_fail_copy
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx
+ * The offset in the address table which matches the MAC source
+ * address.
+ *
+ * da_idx_or_sw_peer_id
+ * Based on a register configuration in RXOLE, this field will
+ * contain:
+ * The offset in the address table which matches the MAC destination
+ * address
+ * OR:
+ * sw_peer_id from the address search entry corresponding to
+ * the destination address of the MSDU
+ *
+ * msdu_drop
+ * REO shall drop this MSDU and not forward it to any other ring.
+ *
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * flow_idx
+ * Flow table index.
+ *
+ * use_ppe
+ * Indicates to RXDMA to ignore the REO_destination_indication
+ * and use a programmed value corresponding to the REO2PPE
+ * ring
+ *
+ * mesh_sta
+ * When set, this is a Mesh (11s) STA.
+ *
+ * vlan_ctag_stripped
+ * Set by RXOLE if it stripped 4-bytes of C-VLAN Tag from the
+ * packet
+ *
+ * vlan_stag_stripped
+ * Set by RXOLE if it stripped 4-bytes of S-VLAN Tag from the
+ * packet
+ *
+ * fragment_flag
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame control
+ * or the fragment number is not zero. Only set when first_msdu
+ * is set.
+ *
+ * fse_metadata
+ * FSE related meta data.
+ *
+ * cce_metadata
+ * CCE related meta data.
+ *
+ * tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ * aggregation_count
+ * Number of MSDU's aggregated so far
+ *
+ * flow_aggregation_continuation
+ * To indicate that this MSDU can be aggregated with
+ * the previous packet with the same flow id
+ *
+ * fisa_timeout
+ * To indicate that the aggregation has restarted for
+ * this flow due to timeout
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus all the
+ * rest of the MSDUs will not be scattered and will not be
+ * decapsulated but will be DMA'ed in RAW format as a single MSDU.
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * cce_match
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * cumulative_ip_length
+ * Total MSDU length that is part of this flow aggregated
+ * so far
+ *
+ * key_id
+ * The key ID octet from the IV. Only valid when first_msdu is set.
+ *
+ * service_code
+ * Opaque service code between PPE and Wi-Fi
+ *
+ * priority_valid
+ * This field gets passed on by REO to PPE in the EDMA descriptor
+ *
+ * intra_bss
+ * This packet needs intra-BSS routing by SW as the 'vdev_id'
+ * for the destination is the same as 'vdev_id' (from 'RX_MPDU_PCU_START')
+ * that this MSDU was got in.
+ *
+ * dest_chip_id
+ * If intra_bss is set, copied by RXOLE from 'ADDR_SEARCH_ENTRY'
+ * to support intra-BSS routing with multi-chip multi-link
+ * operation. This indicates into which chip's TCL the packet should be
+ * queueued
+ *
+ * multicast_echo
+ * If set, this packet is a multicast echo, i.e. the DA is
+ * multicast and Rx OLE SA search with mcast_echo_check = 1
+ * passed. RXDMA should release such packets to WBM.
+ *
+ * wds_learning_event
+ * If set, this packet has an SA search failure with WDS learning
+ * enabled for the peer. RXOLE should route this TLV to the
+ * RXDMA0 status ring to notify FW.
+ *
+ * wds_roaming_event
+ * If set, this packet's SA 'Sw_peer_id' mismatches the 'Sw_peer_id'
+ * of the peer through which the packet was got, indicating
+ * the SA node has roamed. RXOLE should route this TLV to
+ * the RXDMA0 status ring to notify FW.
+ *
+ * wds_keep_alive_event
+ * If set, the AST timestamp for this packet's SA is older
+ * than the current timestamp by more than a threshold programmed
+ * in RXOLE. RXOLE should route this TLV to the RXDMA0 status
+ * ring to notify FW to keep the AST entry for the SA alive.
+ *
+ * msdu_length
+ * MSDU length in bytes after decapsulation.
+ * This field is still valid for MPDU frames without A-MSDU.
+ * It still represents MSDU length after decapsulation
+ *
+ * stbc
+ * When set, use STBC transmission rates.
+ *
+ * ipsec_esp
+ * Set if IPv4/v6 packet is using IPsec ESP.
+ *
+ * l3_offset
+ * Depending upon mode bit, this field either indicates the
+ * L3 offset in bytes from the start of the RX_HEADER or the IP
+ * offset in bytes from the start of the packet after
+ * decapsulation. The latter is only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ipsec_ah
+ * Set if IPv4/v6 packet is using IPsec AH
+ *
+ * l4_offset
+ * Depending upon mode bit, this field either indicates the
+ * L4 offset nin bytes from the start of RX_HEADER (only valid
+ * if either ipv4_proto or ipv6_proto is set to 1) or indicates
+ * the offset in bytes to the start of TCP or UDP header from
+ * the start of the IP header after decapsulation (Only valid if
+ * tcp_proto or udp_proto is set). The value 0 indicates that
+ * the offset is longer than 127 bytes.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_type
+ * Indicates the format after decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a fragmented
+ * IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * da_is_bcast_mcast
+ * The destination address is broadcast or multicast.
+ *
+ * toeplitz_hash
+ * Actual chosen Hash.
+ * 0 - Toeplitz hash of 2-tuple (IP source address, IP
+ * destination address)
+ * 1 - Toeplitz hash of 4-tuple (IP source address,
+ * IP destination address, L4 (TCP/UDP) source port,
+ * L4 (TCP/UDP) destination port)
+ * 2 - Toeplitz of flow_id
+ * 3 - Zero is used
+ *
+ * ip_fixed_header_valid
+ * Fixed 20-byte IPv4 header or 40-byte IPv6 header parsed
+ * fully within first 256 bytes of the packet
+ *
+ * ip_extn_header_valid
+ * IPv6/IPv6 header, including IPv4 options and
+ * recognizable extension headers parsed fully within first 256
+ * bytes of the packet
+ *
+ * tcp_udp_header_valid
+ * Fixed 20-byte TCP (excluding TCP options) or 8-byte UDP
+ * header parsed fully within first 256 bytes of the packet
+ *
+ * mesh_control_present
+ * When set, this MSDU includes the 'Mesh Control' field
+ *
+ * ldpc
+ *
+ * ip4_protocol_ip6_next_header
+ * For IPv4, this is the 8 bit protocol field set). For IPv6 this
+ * is the 8 bit next_header field.
+ *
+ *
+ * vlan_ctag_ci
+ * 2 bytes of C-VLAN Tag Control Information from WHO_L2_LLC
+ *
+ * vlan_stag_ci
+ * 2 bytes of S-VLAN Tag Control Information from WHO_L2_LLC
+ * in case of double VLAN
+ *
+ * peer_meta_data
+ * Meta data that SW has programmed in the Peer table entry
+ * of the transmitting STA.
+ *
+ * user_rssi
+ * RSSI for this user
+ *
+ * pkt_type
+ * Values are defined in enum %RX_MSDU_START_PKT_TYPE_*.
+ *
+ * sgi
+ * Field only valid when pkt type is HT, VHT or HE. Values are
+ * defined in enum %RX_MSDU_START_SGI_*.
+ *
+ * rate_mcs
+ * MCS Rate used.
+ *
+ * receive_bandwidth
+ * Full receive Bandwidth. Values are defined in enum
+ * %RX_MSDU_START_RECV_*.
+ *
+ * reception_type
+ * Indicates what type of reception this is and defined in enum
+ * %RX_MSDU_START_RECEPTION_TYPE_*.
+ *
+ * mimo_ss_bitmap
+ * Field only valid when
+ * Reception_type is RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO or
+ * RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO.
+ *
+ * Bitmap, with each bit indicating if the related spatial
+ * stream is used for this STA
+ *
+ * LSB related to SS 0
+ *
+ * 0 - spatial stream not used for this reception
+ * 1 - spatial stream used for this reception
+ *
+ * msdu_done_copy
+ * If set indicates that the RX packet data, RX header data,
+ * RX PPDU start descriptor, RX MPDU start/end descriptor,
+ * RX MSDU start/end descriptors and RX Attention descriptor
+ * are all valid. This bit is in the last 64-bit of the descriptor
+ * expected to be subscribed in future hardware.
+ *
+ * flow_id_toeplitz
+ * Toeplitz hash of 5-tuple
+ * {IP source address, IP destination address, IP source port, IP
+ * destination port, L4 protocol} in case of non-IPSec.
+ *
+ * In case of IPSec - Toeplitz hash of 4-tuple
+ * {IP source address, IP destination address, SPI, L4 protocol}
+ *
+ * The relevant Toeplitz key registers are provided in RxOLE's
+ * instance of common parser module. These registers are separate
+ * from the Toeplitz keys used by ASE/FSE modules inside RxOLE.
+ * The actual value will be passed on from common parser module
+ * to RxOLE in one of the WHO_* TLVs.
+ *
+ * ppdu_start_timestamp
+ * Timestamp that indicates when the PPDU that contained this MPDU
+ * started on the medium.
+ *
+ * phy_meta_data
+ * SW programmed Meta data provided by the PHY. Can be used for SW
+ * to indicate the channel the device is on.
+ *
+ * toeplitz_hash_2_or_4
+ * Controlled by multiple RxOLE registers for TCP/UDP over
+ * IPv4/IPv6 - Either, Toeplitz hash computed over 2-tuple
+ * IPv4 or IPv6 src/dest addresses is reported; or, Toeplitz
+ * hash computed over 4-tuple IPv4 or IPv6 src/dest addresses
+ * and src/dest ports is reported. The Flow_id_toeplitz hash
+ * can also be reported here. Usually the hash reported here
+ * is the one used for hash-based REO routing (see use_flow_id_toeplitz_clfy
+ * in 'RXPT_CLASSIFY_INFO').
+ *
+ * sa
+ * Source MAC address
+ *
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * ast_index_not_found
+ * Only valid when first_msdu is set. Indicates no AST matching
+ * entries within the max search count.
+ *
+ * ast_index_timeout
+ * Only valid when first_msdu is set. Indicates an unsuccessful
+ * search in the address search table due to timeout.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * a_msdu_error
+ * Set if number of MSDUs in A-MSDU is above a threshold or if the
+ * size of the MSDU is invalid. This receive buffer will contain
+ * all of the remainder of MSDUs in this MPDU w/o decapsulation.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only
+ * set when first_msdu is set.
+ *
+ * wifi_parser_error
+ * Indicates that the WiFi frame has one of the following errors
+ *
+ * overflow_err
+ * RXPCU Receive FIFO ran out of space to receive the full MPDU.
+ * Therefore this MPDU is terminated early and is thus corrupted.
+ *
+ * This MPDU will not be ACKed.
+ *
+ * RXPCU might still be able to correctly receive the following
+ * MPDUs in the PPDU if enough fifo space became available in time.
+ *
+ * mpdu_length_err
+ * Set by RXPCU if the expected MPDU length does not correspond
+ * with the actually received number of bytes in the MPDU.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * amsdu_addr_mismatch
+ * Indicates that an A-MSDU with 'from DS = 0' had an SA mismatching
+ * TA or an A-MDU with 'to DS = 0' had a DA mismatching RA
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is busy
+ * with TX packet processing.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values
+ * are defined in enum %RX_DESC_DECRYPT_STATUS_CODE_*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ *
+ */
+
+/* TODO: Move to compact TLV approach
+ * By default these tlv's are not aligned to 128b boundary
+ * Need to remove unused qwords and make them compact/aligned
+ */
+struct hal_rx_desc_qcn9274 {
+ struct rx_msdu_end_qcn9274 msdu_end;
+ struct rx_mpdu_start_qcn9274 mpdu_start;
+ u8 msdu_payload[];
+} __packed;
+
+#define RX_BE_PADDING0_BYTES 8
+#define RX_BE_PADDING1_BYTES 8
+
+#define HAL_RX_BE_PKT_HDR_TLV_LEN 112
+
+struct rx_pkt_hdr_tlv {
+ __le64 tag;
+ __le64 phy_ppdu_id;
+ u8 rx_pkt_hdr[HAL_RX_BE_PKT_HDR_TLV_LEN];
+};
+
+struct hal_rx_desc_wcn7850 {
+ __le64 msdu_end_tag;
+ struct rx_msdu_end_qcn9274 msdu_end;
+ u8 rx_padding0[RX_BE_PADDING0_BYTES];
+ __le64 mpdu_start_tag;
+ struct rx_mpdu_start_qcn9274 mpdu_start;
+ struct rx_pkt_hdr_tlv pkt_hdr_tlv;
+ u8 msdu_payload[];
+};
+
+struct hal_rx_desc {
+ union {
+ struct hal_rx_desc_qcn9274 qcn9274;
+ struct hal_rx_desc_wcn7850 wcn7850;
+ } u;
+} __packed;
+
+#define MAX_USER_POS 8
+#define MAX_MU_GROUP_ID 64
+#define MAX_MU_GROUP_SHOW 16
+#define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW)
+
+#define HAL_RX_RU_ALLOC_TYPE_MAX 6
+#define RU_26 1
+#define RU_52 2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+
+#endif /* ATH12K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/trace.c b/drivers/net/wireless/ath/ath12k/trace.c
new file mode 100644
index 000000000000..0d0edf4204b7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath12k/trace.h b/drivers/net/wireless/ath/ath12k/trace.h
new file mode 100644
index 000000000000..f72096684b74
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/trace.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH12K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH12K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath12k
+
+TRACE_EVENT(ath12k_htt_pktlog,
+ TP_PROTO(struct ath12k *ar, const void *buf, u16 buf_len,
+ u32 pktlog_checksum),
+
+ TP_ARGS(ar, buf, buf_len, pktlog_checksum),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, buf_len)
+ __field(u32, pktlog_checksum)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->buf_len = buf_len;
+ __entry->pktlog_checksum = pktlog_checksum;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %u pktlog_checksum %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len,
+ __entry->pktlog_checksum
+ )
+);
+
+TRACE_EVENT(ath12k_htt_ppdu_stats,
+ TP_PROTO(struct ath12k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __field(u32, info)
+ __field(u32, sync_tstmp_lo_us)
+ __field(u32, sync_tstmp_hi_us)
+ __field(u32, mlo_offset_lo)
+ __field(u32, mlo_offset_hi)
+ __field(u32, mlo_offset_clks)
+ __field(u32, mlo_comp_clks)
+ __field(u32, mlo_comp_timer)
+ __dynamic_array(u8, ppdu, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ __entry->info = ar->pdev->timestamp.info;
+ __entry->sync_tstmp_lo_us = ar->pdev->timestamp.sync_timestamp_hi_us;
+ __entry->sync_tstmp_hi_us = ar->pdev->timestamp.sync_timestamp_lo_us;
+ __entry->mlo_offset_lo = ar->pdev->timestamp.mlo_offset_lo;
+ __entry->mlo_offset_hi = ar->pdev->timestamp.mlo_offset_hi;
+ __entry->mlo_offset_clks = ar->pdev->timestamp.mlo_offset_clks;
+ __entry->mlo_comp_clks = ar->pdev->timestamp.mlo_comp_clks;
+ __entry->mlo_comp_timer = ar->pdev->timestamp.mlo_comp_timer;
+ memcpy(__get_dynamic_array(ppdu), data, len);
+ ),
+
+ TP_printk(
+ "%s %s ppdu len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath12k_htt_rxdesc,
+ TP_PROTO(struct ath12k *ar, const void *data, size_t type, size_t len),
+
+ TP_ARGS(ar, data, type, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __field(u16, type)
+ __field(u32, info)
+ __field(u32, sync_tstmp_lo_us)
+ __field(u32, sync_tstmp_hi_us)
+ __field(u32, mlo_offset_lo)
+ __field(u32, mlo_offset_hi)
+ __field(u32, mlo_offset_clks)
+ __field(u32, mlo_comp_clks)
+ __field(u32, mlo_comp_timer)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ __entry->type = type;
+ __entry->info = ar->pdev->timestamp.info;
+ __entry->sync_tstmp_lo_us = ar->pdev->timestamp.sync_timestamp_hi_us;
+ __entry->sync_tstmp_hi_us = ar->pdev->timestamp.sync_timestamp_lo_us;
+ __entry->mlo_offset_lo = ar->pdev->timestamp.mlo_offset_lo;
+ __entry->mlo_offset_hi = ar->pdev->timestamp.mlo_offset_hi;
+ __entry->mlo_offset_clks = ar->pdev->timestamp.mlo_offset_clks;
+ __entry->mlo_comp_clks = ar->pdev->timestamp.mlo_comp_clks;
+ __entry->mlo_comp_timer = ar->pdev->timestamp.mlo_comp_timer;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
new file mode 100644
index 000000000000..f6df14149531
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -0,0 +1,6600 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "peer.h"
+
+struct ath12k_wmi_svc_ready_parse {
+ bool wmi_svc_bitmap_done;
+};
+
+struct ath12k_wmi_dma_ring_caps_parse {
+ struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
+ u32 n_dma_ring_caps;
+};
+
+struct ath12k_wmi_service_ext_arg {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct ath12k_wmi_ppe_threshold_arg ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 num_hw_modes;
+ u32 num_phy;
+};
+
+struct ath12k_wmi_svc_rdy_ext_parse {
+ struct ath12k_wmi_service_ext_arg arg;
+ const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
+ const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
+ u32 n_hw_mode_caps;
+ u32 tot_phy_id;
+ struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
+ struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
+ u32 n_mac_phy_caps;
+ const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
+ const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
+ u32 n_ext_hal_reg_caps;
+ struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
+ bool hw_mode_done;
+ bool mac_phy_done;
+ bool ext_hal_reg_done;
+ bool mac_phy_chainmask_combo_done;
+ bool mac_phy_chainmask_cap_done;
+ bool oem_dma_ring_cap_done;
+ bool dma_ring_cap_done;
+};
+
+struct ath12k_wmi_svc_rdy_ext2_parse {
+ struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
+ bool dma_ring_cap_done;
+};
+
+struct ath12k_wmi_rdy_parse {
+ u32 num_extra_mac_addr;
+};
+
+struct ath12k_wmi_dma_buf_release_arg {
+ struct ath12k_wmi_dma_buf_release_fixed_params fixed;
+ const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
+ const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
+ u32 num_buf_entry;
+ u32 num_meta;
+ bool buf_entry_done;
+ bool meta_data_done;
+};
+
+struct ath12k_wmi_tlv_policy {
+ size_t min_len;
+};
+
+struct wmi_tlv_mgmt_rx_parse {
+ const struct ath12k_wmi_mgmt_rx_params *fixed;
+ const u8 *frame_buf;
+ bool frame_buf_done;
+};
+
+static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
+ [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
+ [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
+ [WMI_TAG_SERVICE_READY_EVENT] = {
+ .min_len = sizeof(struct wmi_service_ready_event) },
+ [WMI_TAG_SERVICE_READY_EXT_EVENT] = {
+ .min_len = sizeof(struct wmi_service_ready_ext_event) },
+ [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
+ .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
+ [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
+ .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
+ [WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_start_resp_event) },
+ [WMI_TAG_PEER_DELETE_RESP_EVENT] = {
+ .min_len = sizeof(struct wmi_peer_delete_resp_event) },
+ [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
+ .min_len = sizeof(struct wmi_bcn_tx_status_event) },
+ [WMI_TAG_VDEV_STOPPED_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_stopped_event) },
+ [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
+ .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
+ [WMI_TAG_MGMT_RX_HDR] = {
+ .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
+ [WMI_TAG_MGMT_TX_COMPL_EVENT] = {
+ .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
+ [WMI_TAG_SCAN_EVENT] = {
+ .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
+ .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TAG_ROAM_EVENT] = {
+ .min_len = sizeof(struct wmi_roam_event) },
+ [WMI_TAG_CHAN_INFO_EVENT] = {
+ .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
+ .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
+ [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
+ [WMI_TAG_READY_EVENT] = {
+ .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
+ [WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
+ .min_len = sizeof(struct wmi_service_available_event) },
+ [WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
+ .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
+ [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
+ .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
+ [WMI_TAG_HOST_SWFDA_EVENT] = {
+ .min_len = sizeof(struct wmi_fils_discovery_event) },
+ [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
+ .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
+ [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+};
+
+static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
+{
+ return le32_encode_bits(cmd, WMI_TLV_TAG) |
+ le32_encode_bits(len, WMI_TLV_LEN);
+}
+
+static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
+{
+ return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
+}
+
+void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config)
+{
+ config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+
+ if (ab->num_radios == 2) {
+ config->num_peers = TARGET_NUM_PEERS(DBS);
+ config->num_tids = TARGET_NUM_TIDS(DBS);
+ } else if (ab->num_radios == 3) {
+ config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
+ config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
+ } else {
+ /* Control should not reach here */
+ config->num_peers = TARGET_NUM_PEERS(SINGLE);
+ config->num_tids = TARGET_NUM_TIDS(SINGLE);
+ }
+ config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+
+ if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+ config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
+ else
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config->dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config->rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+ /* Indicates host supports peer map v3 and unmap v2 support */
+ config->peer_map_unmap_version = 0x32;
+ config->twt_ap_pdev_count = ab->num_radios;
+ config->twt_ap_sta_count = 1000;
+}
+
+void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config)
+{
+ config->num_vdevs = 4;
+ config->num_peers = 16;
+ config->num_tids = 32;
+
+ config->num_offload_peers = 3;
+ config->num_offload_reorder_buffs = 3;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = 0;
+ config->num_mcast_table_elems = 0;
+ config->mcast2ucast_mode = 0;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = 0;
+ config->dma_burst_size = 0;
+ config->rx_skip_defrag_timeout_dup_detection_check = 0;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = 2;
+ config->num_msdu_desc = 0x400;
+ config->beacon_tx_offload_max_vdev = 2;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+
+ config->peer_map_unmap_version = 0x1;
+ config->use_pdev_id = 1;
+ config->max_frag_entries = 0xa;
+ config->num_tdls_vdevs = 0x1;
+ config->num_tdls_conn_table_entries = 8;
+ config->beacon_tx_offload_max_vdev = 0x2;
+ config->num_multicast_filter_entries = 0x20;
+ config->num_wow_filters = 0x16;
+ config->num_keep_alive_pattern = 0;
+}
+
+#define PRIMAP(_hw_mode_) \
+ [_hw_mode_] = _hw_mode_##_PRI
+
+static const int ath12k_hw_mode_pri_map[] = {
+ PRIMAP(WMI_HOST_HW_MODE_SINGLE),
+ PRIMAP(WMI_HOST_HW_MODE_DBS),
+ PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
+ PRIMAP(WMI_HOST_HW_MODE_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
+ /* keep last */
+ PRIMAP(WMI_HOST_HW_MODE_MAX),
+};
+
+static int
+ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
+ tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
+ ath12k_wmi_tlv_policies[tlv_tag].min_len &&
+ ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ ath12k_wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+static const void **
+ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+
+ if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
+ return -ENOMEM;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab;
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ wait_event_timeout(wmi_sc->tx_credits_wq, ({
+ ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), WMI_SEND_TIMEOUT_HZ);
+
+ if (ret == -EAGAIN)
+ ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
+
+ return ret;
+}
+
+static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
+ const void *ptr,
+ struct ath12k_wmi_service_ext_arg *arg)
+{
+ const struct wmi_service_ready_ext_event *ev = ptr;
+ int i;
+
+ if (!ev)
+ return -EINVAL;
+
+ /* Move this to host based bitmap */
+ arg->default_conc_scan_config_bits =
+ le32_to_cpu(ev->default_conc_scan_config_bits);
+ arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
+ arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
+ arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
+ arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
+ arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
+ arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
+
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ arg->ppet.ppet16_ppet8_ru3_ru0[i] =
+ le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
+
+ return 0;
+}
+
+static int
+ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
+ struct ath12k_wmi_svc_rdy_ext_parse *svc,
+ u8 hw_mode_id, u8 phy_id,
+ struct ath12k_pdev *pdev)
+{
+ const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
+ const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
+ const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
+ const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
+ struct ath12k_band_cap *cap_band;
+ struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
+ u32 phy_map;
+ u32 hw_idx, phy_idx = 0;
+ int i;
+
+ if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
+ return -EINVAL;
+
+ for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
+ if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
+ break;
+
+ phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
+ phy_idx = fls(phy_map);
+ }
+
+ if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
+ return -EINVAL;
+
+ phy_idx += phy_id;
+ if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
+ return -EINVAL;
+
+ mac_caps = wmi_mac_phy_caps + phy_idx;
+
+ pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
+ pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
+
+ /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
+ * band to band for a single radio, need to see how this should be
+ * handled.
+ */
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+ pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
+ pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
+ } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+ pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
+ pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
+ pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
+ pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
+ pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
+ } else {
+ return -EINVAL;
+ }
+
+ /* tx/rx chainmask reported from fw depends on the actual hw chains used,
+ * For example, for 4x4 capable macphys, first 4 chains can be used for first
+ * mac and the remaing 4 chains can be used for the second mac or vice-versa.
+ * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
+ * will be advertised for second mac or vice-versa. Compute the shift value
+ * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
+ * mac80211.
+ */
+ pdev_cap->tx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
+ pdev_cap->rx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
+
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
+ cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
+ cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
+ cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
+ cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
+ cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
+ for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
+ cap_band->he_cap_phy_info[i] =
+ le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
+
+ cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
+ cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
+
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
+ le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
+ }
+
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+ cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
+ cap_band->max_bw_supported =
+ le32_to_cpu(mac_caps->max_bw_supported_5g);
+ cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
+ cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
+ cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
+ cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
+ for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
+ cap_band->he_cap_phy_info[i] =
+ le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
+
+ cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
+ cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
+
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
+ le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
+
+ cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+ cap_band->max_bw_supported =
+ le32_to_cpu(mac_caps->max_bw_supported_5g);
+ cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
+ cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
+ cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
+ cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
+ for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
+ cap_band->he_cap_phy_info[i] =
+ le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
+
+ cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
+ cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
+
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
+ le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
+ }
+
+ return 0;
+}
+
+static int
+ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
+ const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
+ const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
+ u8 phy_idx,
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
+{
+ const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
+
+ if (!reg_caps || !ext_caps)
+ return -EINVAL;
+
+ if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
+ return -EINVAL;
+
+ ext_reg_cap = &ext_caps[phy_idx];
+
+ param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
+ param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
+ param->eeprom_reg_domain_ext =
+ le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
+ param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
+ param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
+ /* check if param->wireless_mode is needed */
+ param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
+ param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
+ param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
+ param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
+
+ return 0;
+}
+
+static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
+ const void *evt_buf,
+ struct ath12k_wmi_target_cap_arg *cap)
+{
+ const struct wmi_service_ready_event *ev = evt_buf;
+
+ if (!ev) {
+ ath12k_err(ab, "%s: failed by NULL param\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cap->phy_capability = le32_to_cpu(ev->phy_capability);
+ cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
+ cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
+ cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
+ cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
+ cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
+ cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
+ cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
+ cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
+ cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
+ cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
+ cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
+ cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
+ cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
+ cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
+ cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
+ cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
+
+ return 0;
+}
+
+/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
+ * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
+ * 4-byte word.
+ */
+static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
+ const u32 *wmi_svc_bm)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
+ do {
+ if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, wmi->wmi_ab->svc_map);
+ } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
+ }
+}
+
+static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_ready_parse *svc_ready = data;
+ struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
+ u16 expect_len;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EVENT:
+ if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
+ return -EINVAL;
+ break;
+
+ case WMI_TAG_ARRAY_UINT32:
+ if (!svc_ready->wmi_svc_bitmap_done) {
+ expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
+ if (len < expect_len) {
+ ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
+
+ svc_ready->wmi_svc_bitmap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k_wmi_svc_ready_parse svc_ready = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_svc_rdy_parse,
+ &svc_ready);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len)
+{
+ struct sk_buff *skb;
+ struct ath12k_base *ab = wmi_sc->ab;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath12k_warn(ab, "unaligned WMI skb data\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_mgmt_send_cmd *cmd;
+ struct wmi_tlv *frame_tlv;
+ struct sk_buff *skb;
+ u32 buf_len;
+ int ret, len;
+
+ buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
+
+ len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_send_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->desc_id = cpu_to_le32(buf_id);
+ cmd->chanfreq = 0;
+ cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
+ cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
+ cmd->frame_len = cpu_to_le32(frame->len);
+ cmd->buf_len = cpu_to_le32(buf_len);
+ cmd->tx_params_valid = 0;
+
+ frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
+
+ memcpy(frame_tlv->value, frame->data, buf_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
+ struct ath12k_wmi_vdev_create_arg *args)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+ struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
+ struct wmi_tlv *tlv;
+ int ret, len;
+ void *ptr;
+
+ /* It can be optimized my sending tx/rx chain configuration
+ * only for supported bands instead of always sending it for
+ * both the bands.
+ */
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(args->if_id);
+ cmd->vdev_type = cpu_to_le32(args->type);
+ cmd->vdev_subtype = cpu_to_le32(args->subtype);
+ cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
+ cmd->pdev_id = cpu_to_le32(args->pdev_id);
+ cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ptr = skb->data + sizeof(*cmd);
+ len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
+
+ ptr += TLV_HDR_SIZE;
+ txrx_streams = ptr;
+ len = sizeof(*txrx_streams);
+ txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
+ len);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->supported_tx_streams =
+ args->chains[NL80211_BAND_2GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ args->chains[NL80211_BAND_2GHZ].rx;
+
+ txrx_streams++;
+ txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
+ len);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->supported_tx_streams =
+ args->chains[NL80211_BAND_5GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ args->chains[NL80211_BAND_5GHZ].rx;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
+ args->if_id, args->type, args->subtype,
+ macaddr, args->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to submit WMI_VDEV_CREATE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
+ struct wmi_vdev_start_req_arg *arg)
+{
+ memset(chan, 0, sizeof(*chan));
+
+ chan->mhz = cpu_to_le32(arg->freq);
+ chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
+ if (arg->mode == MODE_11AC_VHT80_80)
+ chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
+ else
+ chan->band_center_freq2 = 0;
+
+ chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
+ if (arg->passive)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
+ if (arg->allow_ibss)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
+ if (arg->allow_ht)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
+ if (arg->allow_vht)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
+ if (arg->allow_he)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
+ if (arg->ht40plus)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
+ if (arg->chan_radar)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
+ if (arg->freq2_radar)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
+
+ chan->reg_info_1 = le32_encode_bits(arg->max_power,
+ WMI_CHAN_REG_INFO1_MAX_PWR) |
+ le32_encode_bits(arg->max_reg_power,
+ WMI_CHAN_REG_INFO1_MAX_REG_PWR);
+
+ chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
+ WMI_CHAN_REG_INFO2_ANT_MAX) |
+ le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
+}
+
+int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ struct ath12k_wmi_channel_params *chan;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return -EINVAL;
+
+ len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
+ cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
+ cmd->dtim_period = cpu_to_le32(arg->dtim_period);
+ cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
+ cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
+ cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
+ cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
+ cmd->regdomain = cpu_to_le32(arg->regdomain);
+ cmd->he_ops = cpu_to_le32(arg->he_ops);
+
+ if (!restart) {
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+ if (arg->hidden_ssid)
+ cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
+ if (arg->pmf_enabled)
+ cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
+ }
+
+ cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
+
+ ptr = skb->data + sizeof(*cmd);
+ chan = ptr;
+
+ ath12k_wmi_put_wmi_channel(chan, arg);
+
+ chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
+ sizeof(*chan));
+ ptr += sizeof(*chan);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
+ restart ? "restart" : "start", arg->vdev_id,
+ arg->freq, arg->mode);
+
+ if (restart)
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_RESTART_REQUEST_CMDID);
+ else
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_START_REQUEST_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
+ restart ? "restart" : "start");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_up_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = cpu_to_le32(aid);
+
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
+ struct ath12k_wmi_peer_create_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
+ cmd->peer_type = cpu_to_le32(arg->peer_type);
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI peer create vdev_id %d peer_addr %pM\n",
+ arg->vdev_id, arg->peer_addr);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
+ const u8 *peer_addr, u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
+ struct ath12k_wmi_pdev_set_regdomain_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+ sizeof(*cmd));
+
+ cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
+ cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
+ cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
+ cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
+ cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
+ cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
+ arg->current_rd_in_use, arg->current_rd_2g,
+ arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
+ sizeof(*cmd));
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->param_id = cpu_to_le32(param_id);
+ cmd->param_value = cpu_to_le32(param_val);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_val);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
+ u8 peer_addr[ETH_ALEN],
+ u32 peer_tid_bitmap,
+ u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
+ vdev_id, peer_addr, peer_tid_bitmap);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
+ int vdev_id, const u8 *addr,
+ dma_addr_t paddr, u8 tid,
+ u8 ba_window_size_valid,
+ u32 ba_window_size)
+{
+ struct wmi_peer_reorder_queue_setup_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->tid = cpu_to_le32(tid);
+ cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
+ cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
+ cmd->queue_no = cpu_to_le32(tid);
+ cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
+ cmd->ba_window_size = cpu_to_le32(ba_window_size);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
+ addr, vdev_id, tid);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
+ struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_reorder_queue_remove_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
+ arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+ cmd->param_id = cpu_to_le32(param_id);
+ cmd->param_value = cpu_to_le32(param_value);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev set param %d pdev id %d value %d\n",
+ param_id, pdev_id, param_value);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_set_ps_mode_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = cpu_to_le32(enable);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev set psmode %d vdev id %d\n",
+ enable, vdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
+ u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
+ sizeof(*cmd));
+
+ cmd->suspend_opt = cpu_to_le32(suspend_opt);
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev suspend pdev_id %d\n", pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_resume_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_resume_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev resume pdev id %d\n", pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+/* TODO FW Support for the cmd is not available yet.
+ * Can be tested once the command and corresponding
+ * event is implemented in FW
+ */
+int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
+ enum wmi_bss_chan_info_req_type type)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_bss_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ sizeof(*cmd));
+ cmd->req_type = cpu_to_le32(type);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI bss chan info req type %d\n", type);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
+ struct ath12k_wmi_ap_ps_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->param = cpu_to_le32(arg->param);
+ cmd->value = cpu_to_le32(arg->value);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
+ arg->vdev_id, peer_addr, arg->param, arg->value);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
+ u32 param, u32 param_value)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->param = cpu_to_le32(param);
+ cmd->value = cpu_to_le32(param_value);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI set sta ps vdev_id %d param %d value %d\n",
+ vdev_id, param, param_value);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
+ len);
+
+ cmd->type = cpu_to_le32(type);
+ cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->param_id = cpu_to_le32(param_id);
+ cmd->param_value = cpu_to_le32(param_value);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev id 0x%x set param %d value %d\n",
+ vdev_id, param_id, param_value);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_get_pdev_temperature_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_bcn_offload_ctrl_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
+ vdev_id, bcn_ctrl_op);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_bcn_tmpl_cmd *cmd;
+ struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(bcn->len, 4);
+
+ len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
+ cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
+ cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
+ cmd->buf_len = cpu_to_le32(bcn->len);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ bcn_prb_info = ptr;
+ len = sizeof(*bcn_prb_info);
+ bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
+ len);
+ bcn_prb_info->caps = 0;
+ bcn_prb_info->erp = 0;
+
+ ptr += sizeof(*bcn_prb_info);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_install_key(struct ath12k *ar,
+ struct wmi_vdev_install_key_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len, key_len_aligned;
+
+ /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
+ * length is specifed in cmd->key_len.
+ */
+ key_len_aligned = roundup(arg->key_len, 4);
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ cmd->key_idx = cpu_to_le32(arg->key_idx);
+ cmd->key_flags = cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = cpu_to_le32(arg->key_cipher);
+ cmd->key_len = cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->key_rsc_counter)
+ cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
+ memcpy(tlv->value, arg->key_data, arg->key_len);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+ struct ath12k_wmi_peer_assoc_arg *arg,
+ bool hw_crypto_disabled)
+{
+ cmd->peer_flags = 0;
+
+ if (arg->is_wme_set) {
+ if (arg->qos_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
+ if (arg->apsd_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
+ if (arg->ht_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
+ if (arg->bw_40)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
+ if (arg->bw_80)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
+ if (arg->bw_160)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
+
+ /* Typically if STBC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (arg->stbc_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
+
+ /* Typically if LDPC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (arg->ldpc_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
+
+ if (arg->static_mimops_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
+ if (arg->dynamic_mimops_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
+ if (arg->spatial_mux_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
+ if (arg->vht_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
+ if (arg->he_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
+ if (arg->twt_requester)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
+ if (arg->twt_responder)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
+ }
+
+ /* Suppress authorization for all AUTH modes that need 4-way handshake
+ * (during re-association).
+ * Authorization will be done for these modes on key installation.
+ */
+ if (arg->auth_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
+ if (arg->need_ptk_4_way) {
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
+ if (!hw_crypto_disabled)
+ cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
+ }
+ if (arg->need_gtk_2_way)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
+ /* safe mode bypass the 4-way handshake */
+ if (arg->safe_mode_enabled)
+ cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
+ WMI_PEER_NEED_GTK_2_WAY));
+
+ if (arg->is_pmf_enabled)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
+
+ /* Disable AMSDU for station transmit, if user configures it */
+ /* Disable AMSDU for AP transmit to 11n Stations, if user configures
+ * it
+ * if (arg->amsdu_disable) Add after FW support
+ **/
+
+ /* Target asserts if node is marked HT and all MCS is set to 0.
+ * Mark the node as non-HT if all the mcs rates are disabled through
+ * iwpriv
+ **/
+ if (arg->peer_ht_rates.num_rates == 0)
+ cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
+}
+
+int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_assoc_complete_cmd *cmd;
+ struct ath12k_wmi_vht_rate_set_params *mcs;
+ struct ath12k_wmi_he_rate_set_params *he_mcs;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 peer_legacy_rates_align;
+ u32 peer_ht_rates_align;
+ int i, ret, len;
+
+ peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
+ sizeof(u32));
+ peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
+ sizeof(u32));
+
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
+ TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
+ sizeof(*mcs) + TLV_HDR_SIZE +
+ (sizeof(*he_mcs) * arg->peer_he_mcs_count);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+
+ cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
+ cmd->peer_associd = cpu_to_le32(arg->peer_associd);
+
+ ath12k_wmi_copy_peer_flags(cmd, arg,
+ test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
+ &ar->ab->dev_flags));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
+
+ cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
+ cmd->peer_caps = cpu_to_le32(arg->peer_caps);
+ cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
+ cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
+ cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
+ cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
+ cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
+ cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
+
+ /* Update 11ax capabilities */
+ cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
+ cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
+ cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
+ cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
+ cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
+ for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
+ cmd->peer_he_cap_phy[i] =
+ cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
+ cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
+ cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
+ cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
+
+ /* Update peer legacy rate information */
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
+
+ ptr += TLV_HDR_SIZE;
+
+ cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ memcpy(ptr, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ /* Update peer HT rate information */
+ ptr += peer_legacy_rates_align;
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
+ ptr += TLV_HDR_SIZE;
+ cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
+ memcpy(ptr, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ /* VHT Rates */
+ ptr += peer_ht_rates_align;
+
+ mcs = ptr;
+
+ mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
+ sizeof(*mcs));
+
+ cmd->peer_nss = cpu_to_le32(arg->peer_nss);
+
+ /* Update bandwidth-NSS mapping */
+ cmd->peer_bw_rxnss_override = 0;
+ cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
+
+ if (arg->vht_capable) {
+ mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
+ mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
+ mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
+ mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
+ }
+
+ /* HE Rates */
+ cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
+ cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
+
+ ptr += sizeof(*mcs);
+
+ len = arg->peer_he_mcs_count * sizeof(*he_mcs);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
+ ptr += TLV_HDR_SIZE;
+
+ /* Loop through the HE rate set */
+ for (i = 0; i < arg->peer_he_mcs_count; i++) {
+ he_mcs = ptr;
+ he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
+ sizeof(*he_mcs));
+
+ he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
+ he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
+ ptr += sizeof(*he_mcs);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
+ cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
+ cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
+ cmd->peer_listen_intval, cmd->peer_ht_caps,
+ cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
+ cmd->peer_mpdu_density,
+ cmd->peer_vht_caps, cmd->peer_he_cap_info,
+ cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
+ cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
+ cmd->peer_he_cap_phy[2],
+ cmd->peer_bw_rxnss_override);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PEER_ASSOC_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+void ath12k_wmi_start_scan_init(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_active_2g = 0;
+ arg->dwell_time_passive = 150;
+ arg->dwell_time_active_6g = 40;
+ arg->dwell_time_passive_6g = 30;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
+ WMI_SCAN_EVENT_COMPLETED |
+ WMI_SCAN_EVENT_BSS_CHANNEL |
+ WMI_SCAN_EVENT_FOREIGN_CHAN |
+ WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->num_bssid = 1;
+
+ /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
+ * ZEROs in probe request
+ */
+ eth_broadcast_addr(arg->bssid_list[0].addr);
+}
+
+static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
+ struct ath12k_wmi_scan_req_arg *arg)
+{
+ /* Scan events subscription */
+ if (arg->scan_ev_started)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
+ if (arg->scan_ev_completed)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
+ if (arg->scan_ev_bss_chan)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
+ if (arg->scan_ev_foreign_chan)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
+ if (arg->scan_ev_dequeued)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
+ if (arg->scan_ev_preempted)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
+ if (arg->scan_ev_start_failed)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
+ if (arg->scan_ev_restarted)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
+ if (arg->scan_ev_foreign_chn_exit)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
+ if (arg->scan_ev_suspended)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
+ if (arg->scan_ev_resumed)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
+
+ /** Set scan control flags */
+ cmd->scan_ctrl_flags = 0;
+ if (arg->scan_f_passive)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
+ if (arg->scan_f_strict_passive_pch)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
+ if (arg->scan_f_promisc_mode)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
+ if (arg->scan_f_capture_phy_err)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
+ if (arg->scan_f_half_rate)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
+ if (arg->scan_f_quarter_rate)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
+ if (arg->scan_f_cck_rates)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
+ if (arg->scan_f_ofdm_rates)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
+ if (arg->scan_f_chan_stat_evnt)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
+ if (arg->scan_f_filter_prb_req)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+ if (arg->scan_f_bcast_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
+ if (arg->scan_f_offchan_mgmt_tx)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
+ if (arg->scan_f_offchan_data_tx)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
+ if (arg->scan_f_force_active_dfs_chn)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
+ if (arg->scan_f_add_tpc_ie_in_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
+ if (arg->scan_f_add_ds_ie_in_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
+ if (arg->scan_f_add_spoofed_mac_in_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
+ if (arg->scan_f_add_rand_seq_in_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ if (arg->scan_f_en_ie_whitelist_in_probe)
+ cmd->scan_ctrl_flags |=
+ cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
+
+ cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
+ WMI_SCAN_DWELL_MODE_MASK);
+}
+
+int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_start_scan_cmd *cmd;
+ struct ath12k_wmi_ssid_params *ssid = NULL;
+ struct ath12k_wmi_mac_addr_params *bssid;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *tmp_ptr;
+ u8 extraie_len_with_pad = 0;
+ struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
+ struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
+
+ len = sizeof(*cmd);
+
+ len += TLV_HDR_SIZE;
+ if (arg->num_chan)
+ len += arg->num_chan * sizeof(u32);
+
+ len += TLV_HDR_SIZE;
+ if (arg->num_ssids)
+ len += arg->num_ssids * sizeof(*ssid);
+
+ len += TLV_HDR_SIZE;
+ if (arg->num_bssid)
+ len += sizeof(*bssid) * arg->num_bssid;
+
+ len += TLV_HDR_SIZE;
+ if (arg->extraie.len)
+ extraie_len_with_pad =
+ roundup(arg->extraie.len, sizeof(u32));
+ len += extraie_len_with_pad;
+
+ if (arg->num_hint_bssid)
+ len += TLV_HDR_SIZE +
+ arg->num_hint_bssid * sizeof(*hint_bssid);
+
+ if (arg->num_hint_s_ssid)
+ len += TLV_HDR_SIZE +
+ arg->num_hint_s_ssid * sizeof(*s_ssid);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
+ sizeof(*cmd));
+
+ cmd->scan_id = cpu_to_le32(arg->scan_id);
+ cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->scan_priority = cpu_to_le32(arg->scan_priority);
+ cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
+
+ ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
+
+ cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
+ cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
+ cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
+ cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
+ cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
+ cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
+ cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
+ cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
+ cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
+ cmd->idle_time = cpu_to_le32(arg->idle_time);
+ cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
+ cmd->probe_delay = cpu_to_le32(arg->probe_delay);
+ cmd->burst_duration = cpu_to_le32(arg->burst_duration);
+ cmd->num_chan = cpu_to_le32(arg->num_chan);
+ cmd->num_bssid = cpu_to_le32(arg->num_bssid);
+ cmd->num_ssids = cpu_to_le32(arg->num_ssids);
+ cmd->ie_len = cpu_to_le32(arg->extraie.len);
+ cmd->n_probes = cpu_to_le32(arg->n_probes);
+
+ ptr += sizeof(*cmd);
+
+ len = arg->num_chan * sizeof(u32);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
+ ptr += TLV_HDR_SIZE;
+ tmp_ptr = (u32 *)ptr;
+
+ memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
+
+ ptr += len;
+
+ len = arg->num_ssids * sizeof(*ssid);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
+
+ ptr += TLV_HDR_SIZE;
+
+ if (arg->num_ssids) {
+ ssid = ptr;
+ for (i = 0; i < arg->num_ssids; ++i) {
+ ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
+ memcpy(ssid->ssid, arg->ssid[i].ssid,
+ arg->ssid[i].ssid_len);
+ ssid++;
+ }
+ }
+
+ ptr += (arg->num_ssids * sizeof(*ssid));
+ len = arg->num_bssid * sizeof(*bssid);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
+
+ ptr += TLV_HDR_SIZE;
+ bssid = ptr;
+
+ if (arg->num_bssid) {
+ for (i = 0; i < arg->num_bssid; ++i) {
+ ether_addr_copy(bssid->addr,
+ arg->bssid_list[i].addr);
+ bssid++;
+ }
+ }
+
+ ptr += arg->num_bssid * sizeof(*bssid);
+
+ len = extraie_len_with_pad;
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
+ ptr += TLV_HDR_SIZE;
+
+ if (arg->extraie.len)
+ memcpy(ptr, arg->extraie.ptr,
+ arg->extraie.len);
+
+ ptr += extraie_len_with_pad;
+
+ if (arg->num_hint_s_ssid) {
+ len = arg->num_hint_s_ssid * sizeof(*s_ssid);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
+ ptr += TLV_HDR_SIZE;
+ s_ssid = ptr;
+ for (i = 0; i < arg->num_hint_s_ssid; ++i) {
+ s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
+ s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
+ s_ssid++;
+ }
+ ptr += len;
+ }
+
+ if (arg->num_hint_bssid) {
+ len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
+ ptr += TLV_HDR_SIZE;
+ hint_bssid = ptr;
+ for (i = 0; i < arg->num_hint_bssid; ++i) {
+ hint_bssid->freq_flags =
+ arg->hint_bssid[i].freq_flags;
+ ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
+ &hint_bssid->bssid.addr[0]);
+ hint_bssid++;
+ }
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_START_SCAN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_cancel_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->requestor = cpu_to_le32(arg->requester);
+ cmd->scan_id = cpu_to_le32(arg->scan_id);
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
+ /* stop the scan with the corresponding scan_id */
+ if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
+ /* Cancelling all scans */
+ cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
+ } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
+ /* Cancelling VAP scans */
+ cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
+ } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
+ /* Cancelling specific scan */
+ cmd->req_type = WMI_SCAN_STOP_ONE;
+ } else {
+ ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
+ arg->req_type);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_STOP_SCAN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_chan_list_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct ath12k_wmi_channel_params *chan_info;
+ struct ath12k_wmi_channel_arg *channel_arg;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
+ __le32 *reg1, *reg2;
+
+ channel_arg = &arg->channel[0];
+ while (arg->nallchans) {
+ len = sizeof(*cmd) + TLV_HDR_SIZE;
+ max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
+ sizeof(*chan_info);
+
+ num_send_chans = min(arg->nallchans, max_chan_limit);
+
+ arg->nallchans -= num_send_chans;
+ len += sizeof(*chan_info) * num_send_chans;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
+ cmd->num_scan_chans = cpu_to_le32(num_send_chans);
+ if (num_sends)
+ cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
+ num_send_chans, len, cmd->pdev_id, num_sends);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ len = sizeof(*chan_info) * num_send_chans;
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
+ len);
+ ptr += TLV_HDR_SIZE;
+
+ for (i = 0; i < num_send_chans; ++i) {
+ chan_info = ptr;
+ memset(chan_info, 0, sizeof(*chan_info));
+ len = sizeof(*chan_info);
+ chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
+ len);
+
+ reg1 = &chan_info->reg_info_1;
+ reg2 = &chan_info->reg_info_2;
+ chan_info->mhz = cpu_to_le32(channel_arg->mhz);
+ chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
+ chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
+
+ if (channel_arg->is_chan_passive)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
+ if (channel_arg->allow_he)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
+ else if (channel_arg->allow_vht)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
+ else if (channel_arg->allow_ht)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
+ if (channel_arg->half_rate)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
+ if (channel_arg->quarter_rate)
+ chan_info->info |=
+ cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
+
+ if (channel_arg->psc_channel)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
+
+ chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
+ WMI_CHAN_INFO_MODE);
+ *reg1 |= le32_encode_bits(channel_arg->minpower,
+ WMI_CHAN_REG_INFO1_MIN_PWR);
+ *reg1 |= le32_encode_bits(channel_arg->maxpower,
+ WMI_CHAN_REG_INFO1_MAX_PWR);
+ *reg1 |= le32_encode_bits(channel_arg->maxregpower,
+ WMI_CHAN_REG_INFO1_MAX_REG_PWR);
+ *reg1 |= le32_encode_bits(channel_arg->reg_class_id,
+ WMI_CHAN_REG_INFO1_REG_CLS);
+ *reg2 |= le32_encode_bits(channel_arg->antennamax,
+ WMI_CHAN_REG_INFO2_ANT_MAX);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
+ i, chan_info->mhz, chan_info->info);
+
+ ptr += sizeof(*chan_info);
+
+ channel_arg++;
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ num_sends++;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_set_wmm_params_cmd *cmd;
+ struct wmi_wmm_params *wmm_param;
+ struct wmi_wmm_params_arg *wmi_wmm_arg;
+ struct sk_buff *skb;
+ int ret, ac;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->wmm_param_type = 0;
+
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ switch (ac) {
+ case WME_AC_BE:
+ wmi_wmm_arg = &param->ac_be;
+ break;
+ case WME_AC_BK:
+ wmi_wmm_arg = &param->ac_bk;
+ break;
+ case WME_AC_VI:
+ wmi_wmm_arg = &param->ac_vi;
+ break;
+ case WME_AC_VO:
+ wmi_wmm_arg = &param->ac_vo;
+ break;
+ }
+
+ wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
+ wmm_param->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ sizeof(*wmm_param));
+
+ wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
+ wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
+ wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
+ wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
+ wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
+ wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ ac, wmm_param->aifs, wmm_param->cwmin,
+ wmm_param->cwmax, wmm_param->txoplimit,
+ wmm_param->acm, wmm_param->no_ack);
+ }
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
+ u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_dfs_phyerr_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ sizeof(*cmd));
+
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = cpu_to_le32(tid);
+ cmd->initiator = cpu_to_le32(initiator);
+ cmd->reasoncode = cpu_to_le32(reason);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_DELBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = cpu_to_le32(tid);
+ cmd->statuscode = cpu_to_le32(status);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = cpu_to_le32(tid);
+ cmd->buffersize = cpu_to_le32(buf_size);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
+ struct ath12k_wmi_init_country_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_init_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_country_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
+ sizeof(*cmd));
+
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ switch (arg->flags) {
+ case ALPHA_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
+ memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
+ break;
+ case CC_IS_SET:
+ cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
+ cmd->cc_info.country_code =
+ cpu_to_le32(arg->cc_info.country_code);
+ break;
+ case REGDMN_IS_SET:
+ cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
+ cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_SET_INIT_COUNTRY_CMDID);
+
+out:
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
+ len);
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+ cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
+ cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
+ cmd->congestion_thresh_setup =
+ cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
+ cmd->congestion_thresh_teardown =
+ cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
+ cmd->congestion_thresh_critical =
+ cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
+ cmd->interference_thresh_teardown =
+ cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
+ cmd->interference_thresh_setup =
+ cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
+ cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
+ cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
+ cmd->no_of_bcast_mcast_slots =
+ cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
+ cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
+ cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
+ cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
+ cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
+ cmd->remove_sta_slot_interval =
+ cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
+ /* TODO add MBSSID support */
+ cmd->mbss_support = 0;
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_ENABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_disable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
+ len);
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_DISABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_spatial_reuse_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+ len);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->enable = cpu_to_le32(he_obss_pd->enable);
+ cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
+ cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_color_collision_cfg_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+ len);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
+ cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
+ cmd->current_bss_color = cpu_to_le32(bss_color);
+ cmd->detection_period_ms = cpu_to_le32(period);
+ cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
+ cmd->free_slot_expiry_time_ms = 0;
+ cmd->flags = 0;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
+ cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
+ cmd->detection_period_ms, cmd->scan_period_ms);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
+ bool enable)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_bss_color_change_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+ len);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->enable = enable ? cpu_to_le32(1) : 0;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi_send_bss_color_change_enable id %d enable %d\n",
+ cmd->vdev_id, cmd->enable);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct sk_buff *tmpl)
+{
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len;
+ struct wmi_fils_discovery_tmpl_cmd *cmd;
+
+ aligned_len = roundup(tmpl->len, 4);
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev %i set FILS discovery template\n", vdev_id);
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->buf_len = cpu_to_le32(tmpl->len);
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
+ memcpy(tlv->value, tmpl->data, tmpl->len);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "WMI vdev %i failed to send FILS discovery template command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct sk_buff *tmpl)
+{
+ struct wmi_probe_tmpl_cmd *cmd;
+ struct ath12k_wmi_bcn_prb_info_params *probe_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(tmpl->len, 4);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev %i set probe response template\n", vdev_id);
+
+ len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->buf_len = cpu_to_le32(tmpl->len);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ probe_info = ptr;
+ len = sizeof(*probe_info);
+ probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
+ len);
+ probe_info->caps = 0;
+ probe_info->erp = 0;
+
+ ptr += sizeof(*probe_info);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
+ memcpy(tlv->value, tmpl->data, tmpl->len);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "WMI vdev %i failed to send probe response template command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
+ bool unsol_bcast_probe_resp_enabled)
+{
+ struct sk_buff *skb;
+ int ret, len;
+ struct wmi_fils_discovery_cmd *cmd;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev %i set %s interval to %u TU\n",
+ vdev_id, unsol_bcast_probe_resp_enabled ?
+ "unsolicited broadcast probe response" : "FILS discovery",
+ interval);
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_fils_discovery_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
+ len);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->interval = cpu_to_le32(interval);
+ cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "WMI vdev %i failed to send FILS discovery enable/disable command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+static void
+ath12k_fill_band_to_mac_param(struct ath12k_base *soc,
+ struct ath12k_wmi_pdev_band_arg *arg)
+{
+ u8 i;
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
+ struct ath12k_pdev *pdev;
+
+ for (i = 0; i < soc->num_radios; i++) {
+ pdev = &soc->pdevs[i];
+ hal_reg_cap = &soc->hal_reg_cap[i];
+ arg[i].pdev_id = pdev->pdev_id;
+
+ switch (pdev->cap.supported_bands) {
+ case WMI_HOST_WLAN_2G_5G_CAP:
+ arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ case WMI_HOST_WLAN_2G_CAP:
+ arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
+ break;
+ case WMI_HOST_WLAN_5G_CAP:
+ arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
+ arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
+ struct ath12k_wmi_resource_config_arg *tg_cfg)
+{
+ wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
+ wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
+ wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
+ wmi_cfg->num_offload_reorder_buffs =
+ cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
+ wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
+ wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
+ wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
+ wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
+ wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
+ wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
+ wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
+ wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
+ wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
+ wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
+ wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
+ wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
+ wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
+ wmi_cfg->roam_offload_max_ap_profiles =
+ cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
+ wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
+ wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
+ wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
+ wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
+ wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
+ wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
+ wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
+ wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
+ cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
+ wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
+ wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
+ wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
+ wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
+ wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
+ wmi_cfg->num_tdls_conn_table_entries =
+ cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
+ wmi_cfg->beacon_tx_offload_max_vdev =
+ cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
+ wmi_cfg->num_multicast_filter_entries =
+ cpu_to_le32(tg_cfg->num_multicast_filter_entries);
+ wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
+ wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
+ wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
+ wmi_cfg->max_tdls_concurrent_sleep_sta =
+ cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
+ wmi_cfg->max_tdls_concurrent_buffer_sta =
+ cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
+ wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
+ wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
+ wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
+ wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
+ wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
+ wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
+ wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
+ wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
+ wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
+ wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
+ wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
+ wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
+ wmi_cfg->host_service_flags =
+ cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+}
+
+static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
+ struct ath12k_wmi_init_cmd_arg *arg)
+{
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct sk_buff *skb;
+ struct wmi_init_cmd *cmd;
+ struct ath12k_wmi_resource_config_params *cfg;
+ struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
+ struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
+ struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
+ struct wmi_tlv *tlv;
+ size_t ret, len;
+ void *ptr;
+ u32 hw_mode_len = 0;
+ u16 idx;
+
+ if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
+ hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
+ (arg->num_band_to_mac * sizeof(*band_to_mac));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
+ (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
+ sizeof(*cmd));
+
+ ptr = skb->data + sizeof(*cmd);
+ cfg = ptr;
+
+ ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
+
+ cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
+ sizeof(*cfg));
+
+ ptr += sizeof(*cfg);
+ host_mem_chunks = ptr + TLV_HDR_SIZE;
+ len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
+
+ for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
+ host_mem_chunks[idx].tlv_header =
+ ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+ len);
+
+ host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
+ host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
+ host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
+ arg->mem_chunks[idx].req_id,
+ (u64)arg->mem_chunks[idx].paddr,
+ arg->mem_chunks[idx].len);
+ }
+ cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
+ len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
+
+ /* num_mem_chunks is zero */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
+ ptr += TLV_HDR_SIZE + len;
+
+ if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
+ hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
+ hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ sizeof(*hw_mode));
+
+ hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
+ hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
+
+ ptr += sizeof(*hw_mode);
+
+ len = arg->num_band_to_mac * sizeof(*band_to_mac);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
+
+ ptr += TLV_HDR_SIZE;
+ len = sizeof(*band_to_mac);
+
+ for (idx = 0; idx < arg->num_band_to_mac; idx++) {
+ band_to_mac = (void *)ptr;
+
+ band_to_mac->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
+ len);
+ band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
+ band_to_mac->start_freq =
+ cpu_to_le32(arg->band_to_mac[idx].start_freq);
+ band_to_mac->end_freq =
+ cpu_to_le32(arg->band_to_mac[idx].end_freq);
+ ptr += sizeof(*band_to_mac);
+ }
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
+ int pdev_id)
+{
+ struct ath12k_wmi_pdev_lro_config_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
+ sizeof(*cmd));
+
+ get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
+ get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
+
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send lro cfg req wmi cmd\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
+ enum wmi_host_hw_mode_config_type mode)
+{
+ struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
+ struct sk_buff *skb;
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ int len;
+ int ret;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ sizeof(*cmd));
+
+ cmd->pdev_id = WMI_PDEV_ID_SOC;
+ cmd->hw_mode_index = cpu_to_le32(mode);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_cmd_init(struct ath12k_base *ab)
+{
+ struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab;
+ struct ath12k_wmi_init_cmd_arg arg = {};
+
+ ab->hw_params->wmi_init(ab, &arg.res_cfg);
+
+ arg.num_mem_chunks = wmi_sc->num_mem_chunks;
+ arg.hw_mode_id = wmi_sc->preferred_hw_mode;
+ arg.mem_chunks = wmi_sc->mem_chunks;
+
+ if (ab->hw_params->single_pdev_only)
+ arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
+
+ arg.num_band_to_mac = ab->num_radios;
+ ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
+
+ return ath12k_init_cmd_send(&wmi_sc->wmi[0], &arg);
+}
+
+int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
+ struct ath12k_wmi_vdev_spectral_conf_arg *arg)
+{
+ struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->scan_count = cpu_to_le32(arg->scan_count);
+ cmd->scan_period = cpu_to_le32(arg->scan_period);
+ cmd->scan_priority = cpu_to_le32(arg->scan_priority);
+ cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
+ cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
+ cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
+ cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
+ cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
+ cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
+ cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
+ cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
+ cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
+ cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
+ cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
+ cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
+ cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
+ cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
+ cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI spectral scan config cmd vdev_id 0x%x\n",
+ arg->vdev_id);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send spectral scan config wmi cmd\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->trigger_cmd = cpu_to_le32(trigger);
+ cmd->enable_cmd = cpu_to_le32(enable);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI spectral enable cmd vdev id 0x%x\n",
+ vdev_id);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb,
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send spectral enable wmi cmd\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
+ struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
+{
+ struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
+ sizeof(*cmd));
+
+ cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
+ cmd->module_id = cpu_to_le32(arg->module_id);
+ cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
+ cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
+ cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
+ cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
+ cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
+ cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
+ cmd->num_elems = cpu_to_le32(arg->num_elems);
+ cmd->buf_size = cpu_to_le32(arg->buf_size);
+ cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
+ cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
+ arg->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send dma ring cfg req wmi cmd\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_buf_release_arg *arg = data;
+
+ if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
+ return -EPROTO;
+
+ if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
+ return -ENOBUFS;
+
+ arg->num_buf_entry++;
+ return 0;
+}
+
+static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_buf_release_arg *arg = data;
+
+ if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
+ return -EPROTO;
+
+ if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
+ return -ENOBUFS;
+
+ arg->num_meta++;
+
+ return 0;
+}
+
+static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_buf_release_arg *arg = data;
+ const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
+ u32 pdev_id;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_DMA_BUF_RELEASE:
+ fixed = ptr;
+ arg->fixed = *fixed;
+ pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
+ arg->fixed.pdev_id = cpu_to_le32(pdev_id);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!arg->buf_entry_done) {
+ arg->num_buf_entry = 0;
+ arg->buf_entry = ptr;
+
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_dma_buf_entry_parse,
+ arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ arg->buf_entry_done = true;
+ } else if (!arg->meta_data_done) {
+ arg->num_meta = 0;
+ arg->meta_data = ptr;
+
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_dma_buf_meta_parse,
+ arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ arg->meta_data_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_wmi_dma_buf_release_arg arg = {};
+ struct ath12k_dbring_buf_release_event param;
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_dma_buf_parse,
+ &arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
+ return;
+ }
+
+ param.fixed = arg.fixed;
+ param.buf_entry = arg.buf_entry;
+ param.num_buf_entry = arg.num_buf_entry;
+ param.meta_data = arg.meta_data;
+ param.num_meta = arg.num_meta;
+
+ ret = ath12k_dbring_buffer_release_event(ab, &param);
+ if (ret) {
+ ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
+ return;
+ }
+}
+
+static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
+ u32 phy_map = 0;
+
+ if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
+ return -ENOBUFS;
+
+ hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
+ hw_mode_id);
+ svc_rdy_ext->n_hw_mode_caps++;
+
+ phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
+ svc_rdy_ext->tot_phy_id += fls(phy_map);
+
+ return 0;
+}
+
+static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
+ enum wmi_host_hw_mode_config_type mode, pref;
+ u32 i;
+ int ret;
+
+ svc_rdy_ext->n_hw_mode_caps = 0;
+ svc_rdy_ext->hw_mode_caps = ptr;
+
+ ret = ath12k_wmi_tlv_iter(soc, ptr, len,
+ ath12k_wmi_hw_mode_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath12k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
+ hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
+ mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
+ pref = soc->wmi_ab.preferred_hw_mode;
+
+ if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
+ svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
+ soc->wmi_ab.preferred_hw_mode = mode;
+ }
+ }
+
+ ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
+ soc->wmi_ab.preferred_hw_mode);
+ if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
+ return -ENOBUFS;
+
+ len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
+ if (!svc_rdy_ext->n_mac_phy_caps) {
+ svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
+ GFP_ATOMIC);
+ if (!svc_rdy_ext->mac_phy_caps)
+ return -ENOMEM;
+ }
+
+ memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
+ svc_rdy_ext->n_mac_phy_caps++;
+ return 0;
+}
+
+static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
+ return -ENOBUFS;
+
+ svc_rdy_ext->n_ext_hal_reg_caps++;
+ return 0;
+}
+
+static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
+ int ret;
+ u32 i;
+
+ svc_rdy_ext->n_ext_hal_reg_caps = 0;
+ svc_rdy_ext->ext_hal_reg_caps = ptr;
+ ret = ath12k_wmi_tlv_iter(soc, ptr, len,
+ ath12k_wmi_ext_hal_reg_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath12k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
+ ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->ext_hal_reg_caps, i,
+ &reg_cap);
+ if (ret) {
+ ath12k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
+ soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
+ }
+ return 0;
+}
+
+static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
+ u16 len, const void *ptr,
+ void *data)
+{
+ struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
+ u32 phy_id_map;
+ int pdev_index = 0;
+ int ret;
+
+ svc_rdy_ext->soc_hal_reg_caps = ptr;
+ svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
+
+ soc->num_radios = 0;
+ phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
+
+ while (phy_id_map && soc->num_radios < MAX_RADIOS) {
+ ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
+ svc_rdy_ext,
+ hw_mode_id, soc->num_radios,
+ &soc->pdevs[pdev_index]);
+ if (ret) {
+ ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
+ soc->num_radios);
+ return ret;
+ }
+
+ soc->num_radios++;
+
+ /* For single_pdev_only targets,
+ * save mac_phy capability in the same pdev
+ */
+ if (soc->hw_params->single_pdev_only)
+ pdev_index = 0;
+ else
+ pdev_index = soc->num_radios;
+
+ /* TODO: mac_phy_cap prints */
+ phy_id_map >>= 1;
+ }
+
+ if (soc->hw_params->single_pdev_only) {
+ soc->num_radios = 1;
+ soc->pdevs[0].pdev_id = 0;
+ }
+
+ return 0;
+}
+
+static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_ring_caps_parse *parse = data;
+
+ if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
+ return -EPROTO;
+
+ parse->n_dma_ring_caps++;
+ return 0;
+}
+
+static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
+ u32 num_cap)
+{
+ size_t sz;
+ void *ptr;
+
+ sz = num_cap * sizeof(struct ath12k_dbring_cap);
+ ptr = kzalloc(sz, GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ ab->db_caps = ptr;
+ ab->num_db_cap = num_cap;
+
+ return 0;
+}
+
+static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
+{
+ kfree(ab->db_caps);
+ ab->db_caps = NULL;
+}
+
+static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
+ struct ath12k_wmi_dma_ring_caps_params *dma_caps;
+ struct ath12k_dbring_cap *dir_buff_caps;
+ int ret;
+ u32 i;
+
+ dma_caps_parse->n_dma_ring_caps = 0;
+ dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_dma_ring_caps_parse,
+ dma_caps_parse);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
+ return ret;
+ }
+
+ if (!dma_caps_parse->n_dma_ring_caps)
+ return 0;
+
+ if (ab->num_db_cap) {
+ ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
+ return 0;
+ }
+
+ ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
+ if (ret)
+ return ret;
+
+ dir_buff_caps = ab->db_caps;
+ for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
+ if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
+ ath12k_warn(ab, "Invalid module id %d\n",
+ le32_to_cpu(dma_caps[i].module_id));
+ ret = -EINVAL;
+ goto free_dir_buff;
+ }
+
+ dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
+ dir_buff_caps[i].pdev_id =
+ DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
+ dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
+ dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
+ dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
+ }
+
+ return 0;
+
+free_dir_buff:
+ ath12k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EXT_EVENT:
+ ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
+ &svc_rdy_ext->arg);
+ if (ret) {
+ ath12k_warn(ab, "unable to extract ext params\n");
+ return ret;
+ }
+ break;
+
+ case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
+ svc_rdy_ext->hw_caps = ptr;
+ svc_rdy_ext->arg.num_hw_modes =
+ le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
+ break;
+
+ case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
+ ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+ break;
+
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!svc_rdy_ext->hw_mode_done) {
+ ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->hw_mode_done = true;
+ } else if (!svc_rdy_ext->mac_phy_done) {
+ svc_rdy_ext->n_mac_phy_caps = 0;
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_mac_phy_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ svc_rdy_ext->mac_phy_done = true;
+ } else if (!svc_rdy_ext->ext_hal_reg_done) {
+ ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->ext_hal_reg_done = true;
+ } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
+ svc_rdy_ext->mac_phy_chainmask_combo_done = true;
+ } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
+ svc_rdy_ext->mac_phy_chainmask_cap_done = true;
+ } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
+ svc_rdy_ext->oem_dma_ring_cap_done = true;
+ } else if (!svc_rdy_ext->dma_ring_cap_done) {
+ ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
+ &svc_rdy_ext->dma_caps_parse);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->dma_ring_cap_done = true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_svc_rdy_ext_parse,
+ &svc_rdy_ext);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse tlv %d\n", ret);
+ goto err;
+ }
+
+ if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
+ complete(&ab->wmi_ab.service_ready);
+
+ kfree(svc_rdy_ext.mac_phy_caps);
+ return 0;
+
+err:
+ ath12k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!parse->dma_ring_cap_done) {
+ ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
+ &parse->dma_caps_parse);
+ if (ret)
+ return ret;
+
+ parse->dma_ring_cap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_svc_rdy_ext2_parse,
+ &svc_rdy_ext2);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
+ goto err;
+ }
+
+ complete(&ab->wmi_ab.service_ready);
+
+ return 0;
+
+err:
+ ath12k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_start_resp_event *vdev_rsp)
+{
+ const void **tb;
+ const struct wmi_vdev_start_resp_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch vdev start resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_rsp = *ev;
+
+ kfree(tb);
+ return 0;
+}
+
+static struct ath12k_reg_rule
+*create_ext_reg_rules_from_wmi(u32 num_reg_rules,
+ struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
+{
+ struct ath12k_reg_rule *reg_rule_ptr;
+ u32 count;
+
+ reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
+ GFP_ATOMIC);
+
+ if (!reg_rule_ptr)
+ return NULL;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ reg_rule_ptr[count].start_freq =
+ le32_get_bits(wmi_reg_rule[count].freq_info,
+ REG_RULE_START_FREQ);
+ reg_rule_ptr[count].end_freq =
+ le32_get_bits(wmi_reg_rule[count].freq_info,
+ REG_RULE_END_FREQ);
+ reg_rule_ptr[count].max_bw =
+ le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_MAX_BW);
+ reg_rule_ptr[count].reg_power =
+ le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_REG_PWR);
+ reg_rule_ptr[count].ant_gain =
+ le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_ANT_GAIN);
+ reg_rule_ptr[count].flags =
+ le32_get_bits(wmi_reg_rule[count].flag_info,
+ REG_RULE_FLAGS);
+ reg_rule_ptr[count].psd_flag =
+ le32_get_bits(wmi_reg_rule[count].psd_power_info,
+ REG_RULE_PSD_INFO);
+ reg_rule_ptr[count].psd_eirp =
+ le32_get_bits(wmi_reg_rule[count].psd_power_info,
+ REG_RULE_PSD_EIRP);
+ }
+
+ return reg_rule_ptr;
+}
+
+static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ struct ath12k_reg_info *reg_info)
+{
+ const void **tb;
+ const struct wmi_reg_chan_list_cc_ext_event *ev;
+ struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
+ u32 num_2g_reg_rules, num_5g_reg_rules;
+ u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 total_reg_rules = 0;
+ int ret, i, j;
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
+ reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
+ reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
+ le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
+ reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
+ le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
+ reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
+ le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
+ le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
+ reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
+ le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
+ reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
+ le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
+ }
+
+ num_2g_reg_rules = reg_info->num_2g_reg_rules;
+ total_reg_rules += num_2g_reg_rules;
+ num_5g_reg_rules = reg_info->num_5g_reg_rules;
+ total_reg_rules += num_5g_reg_rules;
+
+ if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
+ ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
+ num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
+
+ if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
+ ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
+ i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ total_reg_rules += num_6g_reg_rules_ap[i];
+ }
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
+ reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
+ total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
+
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
+ reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
+ total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
+
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
+ reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
+ total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
+
+ if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6G_REG_RULES) {
+ ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
+ i);
+ kfree(tb);
+ return -EINVAL;
+ }
+ }
+
+ if (!total_reg_rules) {
+ ath12k_warn(ab, "No reg rules available\n");
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
+
+ /* FIXME: Currently FW includes 6G reg rule also in 5G rule
+ * list for country US.
+ * Having same 6G reg rule in 5G and 6G rules list causes
+ * intersect check to be true, and same rules will be shown
+ * multiple times in iw cmd. So added hack below to avoid
+ * parsing 6G rule from 5G reg rule list, and this can be
+ * removed later, after FW updates to remove 6G reg rule
+ * from 5G rules list.
+ */
+ if (memcmp(reg_info->alpha2, "US", 2) == 0) {
+ reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
+ num_5g_reg_rules = reg_info->num_5g_reg_rules;
+ }
+
+ reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
+ reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
+ reg_info->num_phy = le32_to_cpu(ev->num_phy);
+ reg_info->phy_id = le32_to_cpu(ev->phy_id);
+ reg_info->ctry_code = le32_to_cpu(ev->country_id);
+ reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
+
+ switch (le32_to_cpu(ev->status_code)) {
+ case WMI_REG_SET_CC_STATUS_PASS:
+ reg_info->status_code = REG_SET_CC_STATUS_PASS;
+ break;
+ case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
+ reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
+ break;
+ case WMI_REG_INIT_ALPHA2_NOT_FOUND:
+ reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
+ break;
+ case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
+ reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
+ break;
+ case WMI_REG_SET_CC_STATUS_NO_MEMORY:
+ reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
+ break;
+ case WMI_REG_SET_CC_STATUS_FAIL:
+ reg_info->status_code = REG_SET_CC_STATUS_FAIL;
+ break;
+ }
+
+ reg_info->is_ext_reg_event = true;
+
+ reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
+ reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
+ reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
+ reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
+ reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
+ reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
+ reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
+ reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
+ reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
+ reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
+ le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
+ reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
+ le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
+ reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
+ le32_to_cpu(ev->min_bw_6g_client_sp[i]);
+ reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
+ le32_to_cpu(ev->max_bw_6g_client_sp[i]);
+ reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
+ le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
+ reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
+ le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
+ __func__, reg_info->alpha2, reg_info->dfs_region,
+ reg_info->min_bw_2g, reg_info->max_bw_2g,
+ reg_info->min_bw_5g, reg_info->max_bw_5g);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "num_2g_reg_rules %d num_5g_reg_rules %d",
+ num_2g_reg_rules, num_5g_reg_rules);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
+ num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
+ num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
+ num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
+ num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
+ num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
+
+ ext_wmi_reg_rule =
+ (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
+ + sizeof(*ev)
+ + sizeof(struct wmi_tlv));
+
+ if (num_2g_reg_rules) {
+ reg_info->reg_rules_2g_ptr =
+ create_ext_reg_rules_from_wmi(num_2g_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_2g_ptr) {
+ kfree(tb);
+ ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (num_5g_reg_rules) {
+ ext_wmi_reg_rule += num_2g_reg_rules;
+ reg_info->reg_rules_5g_ptr =
+ create_ext_reg_rules_from_wmi(num_5g_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_5g_ptr) {
+ kfree(tb);
+ ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ ext_wmi_reg_rule += num_5g_reg_rules;
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ reg_info->reg_rules_6g_ap_ptr[i] =
+ create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_6g_ap_ptr[i]) {
+ kfree(tb);
+ ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
+ return -ENOMEM;
+ }
+
+ ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
+ }
+
+ for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->reg_rules_6g_client_ptr[j][i] =
+ create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
+ kfree(tb);
+ ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
+ return -ENOMEM;
+ }
+
+ ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
+ }
+ }
+
+ reg_info->client_type = le32_to_cpu(ev->client_type);
+ reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
+ reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
+ reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
+ le32_to_cpu(ev->domain_code_6g_ap_lpi);
+ reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
+ le32_to_cpu(ev->domain_code_6g_ap_sp);
+ reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
+ le32_to_cpu(ev->domain_code_6g_ap_vlp);
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
+ le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
+ reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
+ le32_to_cpu(ev->domain_code_6g_client_sp[i]);
+ reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
+ le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
+ }
+
+ reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
+ reg_info->client_type, reg_info->domain_code_6g_super_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_event *peer_del_resp)
+{
+ const void **tb;
+ const struct wmi_peer_delete_resp_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch peer delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(peer_del_resp, 0, sizeof(*peer_del_resp));
+
+ peer_del_resp->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer_del_resp->peer_macaddr.addr,
+ ev->peer_macaddr.addr);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_delete_resp_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch vdev delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = le32_to_cpu(ev->vdev_id);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
+ u32 len, u32 *vdev_id,
+ u32 *tx_status)
+{
+ const void **tb;
+ const struct wmi_bcn_tx_status_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch bcn tx status ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = le32_to_cpu(ev->vdev_id);
+ *tx_status = le32_to_cpu(ev->tx_status);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_stopped_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch vdev stop ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = le32_to_cpu(ev->vdev_id);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_mgmt_rx_parse *parse = data;
+
+ switch (tag) {
+ case WMI_TAG_MGMT_RX_HDR:
+ parse->fixed = ptr;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ if (!parse->frame_buf_done) {
+ parse->frame_buf = ptr;
+ parse->frame_buf_done = true;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ struct ath12k_wmi_mgmt_rx_arg *hdr)
+{
+ struct wmi_tlv_mgmt_rx_parse parse = { };
+ const struct ath12k_wmi_mgmt_rx_params *ev;
+ const u8 *frame;
+ int i, ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_tlv_mgmt_rx_parse,
+ &parse);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
+ return ret;
+ }
+
+ ev = parse.fixed;
+ frame = parse.frame_buf;
+
+ if (!ev || !frame) {
+ ath12k_warn(ab, "failed to fetch mgmt rx hdr");
+ return -EPROTO;
+ }
+
+ hdr->pdev_id = le32_to_cpu(ev->pdev_id);
+ hdr->chan_freq = le32_to_cpu(ev->chan_freq);
+ hdr->channel = le32_to_cpu(ev->channel);
+ hdr->snr = le32_to_cpu(ev->snr);
+ hdr->rate = le32_to_cpu(ev->rate);
+ hdr->phy_mode = le32_to_cpu(ev->phy_mode);
+ hdr->buf_len = le32_to_cpu(ev->buf_len);
+ hdr->status = le32_to_cpu(ev->status);
+ hdr->flags = le32_to_cpu(ev->flags);
+ hdr->rssi = a_sle32_to_cpu(ev->rssi);
+ hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
+
+ for (i = 0; i < ATH_MAX_ANTENNA; i++)
+ hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
+
+ if (skb->len < (frame - skb->data) + hdr->buf_len) {
+ ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, hdr->buf_len);
+
+ return 0;
+}
+
+static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
+ u32 status)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath12k_skb_cb *skb_cb;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ msdu = idr_find(&ar->txmgmt_idr, desc_id);
+
+ if (!msdu) {
+ ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
+ desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ return -ENOENT;
+ }
+
+ idr_remove(&ar->txmgmt_idr, desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+ dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ /* WARN when we received this event without doing any mgmt tx */
+ if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
+ WARN_ON_ONCE(1);
+
+ return 0;
+}
+
+static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ struct wmi_mgmt_tx_compl_event *param)
+{
+ const void **tb;
+ const struct wmi_mgmt_tx_compl_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ param->pdev_id = ev->pdev_id;
+ param->desc_id = ev->desc_id;
+ param->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath12k_wmi_event_scan_started(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_STARTING:
+ ar->scan.state = ATH12K_SCAN_RUNNING;
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath12k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ __ath12k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_STARTING:
+ ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_STARTING:
+ ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ break;
+ }
+}
+
+static const char *
+ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ return "foreign channel exit";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_scan_event *scan_evt_param)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_SCAN_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch scan ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ scan_evt_param->event_type = ev->event_type;
+ scan_evt_param->reason = ev->reason;
+ scan_evt_param->channel_freq = ev->channel_freq;
+ scan_evt_param->scan_req_id = ev->scan_req_id;
+ scan_evt_param->scan_id = ev->scan_id;
+ scan_evt_param->vdev_id = ev->vdev_id;
+ scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_sta_kickout_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch peer sta kickout ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_roam_event *roam_ev)
+{
+ const void **tb;
+ const struct wmi_roam_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_ROAM_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch roam ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ roam_ev->vdev_id = ev->vdev_id;
+ roam_ev->reason = ev->reason;
+ roam_ev->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int freq_to_idx(struct ath12k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
+ u32 len, struct wmi_chan_info_event *ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ch_info_ev->err_code = ev->err_code;
+ ch_info_ev->freq = ev->freq;
+ ch_info_ev->cmd_flags = ev->cmd_flags;
+ ch_info_ev->noise_floor = ev->noise_floor;
+ ch_info_ev->rx_clear_count = ev->rx_clear_count;
+ ch_info_ev->cycle_count = ev->cycle_count;
+ ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ ch_info_ev->rx_frame_count = ev->rx_frame_count;
+ ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
+ ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
+ ch_info_ev->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_pdev_bss_chan_info_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ bss_ch_info_ev->pdev_id = ev->pdev_id;
+ bss_ch_info_ev->freq = ev->freq;
+ bss_ch_info_ev->noise_floor = ev->noise_floor;
+ bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
+ bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
+ bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
+ bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
+ bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
+ bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
+ bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
+ bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
+ bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
+ bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_install_key_complete_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_install_key_compl_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch vdev install key compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = le32_to_cpu(ev->vdev_id);
+ arg->macaddr = ev->peer_macaddr.addr;
+ arg->key_idx = le32_to_cpu(ev->key_idx);
+ arg->key_flags = le32_to_cpu(ev->key_flags);
+ arg->status = le32_to_cpu(ev->status);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
+{
+ const void **tb;
+ const struct wmi_peer_assoc_conf_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch peer assoc conf ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
+ peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
+ u32 len, const struct wmi_pdev_temperature_event *ev)
+{
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch pdev temp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
+{
+ /* try to send pending beacons first. they take priority */
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+}
+
+static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+static bool ath12k_reg_is_world_alpha(char *alpha)
+{
+ return alpha[0] == '0' && alpha[1] == '0';
+}
+
+static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k_reg_info *reg_info = NULL;
+ struct ieee80211_regdomain *regd = NULL;
+ bool intersect = false;
+ int ret = 0, pdev_idx, i, j;
+ struct ath12k *ar;
+
+ reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+ if (!reg_info) {
+ ret = -ENOMEM;
+ goto fallback;
+ }
+
+ ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
+
+ if (ret) {
+ ath12k_warn(ab, "failed to extract regulatory info from received event\n");
+ goto fallback;
+ }
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ goto mem_free;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback.
+ */
+ if (ab->hw_params->single_pdev_only &&
+ pdev_idx < ab->hw_params->num_rxmda_per_pdev)
+ goto mem_free;
+ else
+ goto fallback;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp(ab->default_regd[pdev_idx]->alpha2,
+ reg_info->alpha2, 2))
+ goto mem_free;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath12k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ regd = ath12k_reg_build_regd(ab, reg_info, intersect);
+ if (!regd) {
+ ath12k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ spin_lock(&ab->base_lock);
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ /* Once mac is registered, ar is valid and all CC events from
+ * fw is considered to be received due to user requests
+ * currently.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ ieee80211_queue_work(ar->hw, &ar->regd_update_work);
+ } else {
+ /* Multiple events for the same *ar is not expected. But we
+ * can still clear any previously stored default_regd if we
+ * are receiving this event for the same radio by mistake.
+ * NULL pointer handling will be taken care by kfree itself.
+ */
+ kfree(ab->default_regd[pdev_idx]);
+ /* This regd would be applied during mac registration */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock(&ab->base_lock);
+
+ goto mem_free;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+mem_free:
+ if (reg_info) {
+ kfree(reg_info->reg_rules_2g_ptr);
+ kfree(reg_info->reg_rules_5g_ptr);
+ if (reg_info->is_ext_reg_event) {
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
+ kfree(reg_info->reg_rules_6g_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
+ kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
+ }
+ kfree(reg_info);
+ }
+ return ret;
+}
+
+static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_rdy_parse *rdy_parse = data;
+ struct wmi_ready_event fixed_param;
+ struct ath12k_wmi_mac_addr_params *addr_list;
+ struct ath12k_pdev *pdev;
+ u32 num_mac_addr;
+ int i;
+
+ switch (tag) {
+ case WMI_TAG_READY_EVENT:
+ memset(&fixed_param, 0, sizeof(fixed_param));
+ memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
+ min_t(u16, sizeof(fixed_param), len));
+ ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
+ rdy_parse->num_extra_mac_addr =
+ le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
+
+ ether_addr_copy(ab->mac_addr,
+ fixed_param.ready_event_min.mac_addr.addr);
+ ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
+ ab->wmi_ready = true;
+ break;
+ case WMI_TAG_ARRAY_FIXED_STRUCT:
+ addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
+ num_mac_addr = rdy_parse->num_extra_mac_addr;
+
+ if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
+ break;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
+ }
+ ab->pdevs_macaddr_valid = true;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k_wmi_rdy_parse rdy_parse = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_rdy_parse, &rdy_parse);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ complete(&ab->wmi_ab.unified_ready);
+ return 0;
+}
+
+static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_delete_resp_event peer_del_resp;
+ struct ath12k *ar;
+
+ if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
+ ath12k_warn(ab, "failed to extract peer delete resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
+ peer_del_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->peer_delete_done);
+ rcu_read_unlock();
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
+ peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
+}
+
+static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k *ar;
+ u32 vdev_id = 0;
+
+ if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
+ ath12k_warn(ab, "failed to extract vdev delete resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_delete_done);
+
+ rcu_read_unlock();
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
+ vdev_id);
+}
+
+static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
+{
+ switch (vdev_resp_status) {
+ case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
+ return "invalid vdev id";
+ case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
+ return "not supported";
+ case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
+ return "dfs violation";
+ case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
+ return "invalid regdomain";
+ default:
+ return "unknown";
+ }
+}
+
+static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_resp_event vdev_start_resp;
+ struct ath12k *ar;
+ u32 status;
+
+ if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
+ ath12k_warn(ab, "failed to extract vdev start resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
+ vdev_start_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->last_wmi_vdev_start_status = 0;
+
+ status = le32_to_cpu(vdev_start_resp.status);
+
+ if (WARN_ON_ONCE(status)) {
+ ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
+ status, ath12k_wmi_vdev_resp_print(status));
+ ar->last_wmi_vdev_start_status = status;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
+ vdev_start_resp.vdev_id);
+}
+
+static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ u32 vdev_id, tx_status;
+
+ if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
+ &vdev_id, &tx_status) != 0) {
+ ath12k_warn(ab, "failed to extract bcn tx status");
+ return;
+ }
+}
+
+static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k *ar;
+ u32 vdev_id = 0;
+
+ if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
+ ath12k_warn(ab, "failed to extract vdev stopped event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
+}
+
+static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
+ struct ath12k *ar;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+ struct ieee80211_supported_band *sband;
+
+ if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
+ ath12k_warn(ab, "failed to extract mgmt rx event");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ memset(status, 0, sizeof(*status));
+
+ ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
+ rx_ev.status);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
+
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
+ rx_ev.pdev_id);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
+ WMI_RX_STATUS_ERR_CRC))) {
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
+ status->band = NL80211_BAND_6GHZ;
+ } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.phy_mode == MODE_11B &&
+ (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
+
+ sband = &ar->mac.sbands[status->band];
+
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+ status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ /* In case of PMF, FW delivers decrypted frames with Protected Bit set
+ * including group privacy action frames.
+ */
+ if (ieee80211_has_protected(hdr->frame_control)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_robust_mgmt_frame(skb)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ /* TODO: Pending handle beacon implementation
+ *if (ieee80211_is_beacon(hdr->frame_control))
+ * ath12k_mac_handle_beacon(ar, skb);
+ */
+
+ ath12k_dbg(ab, ATH12K_DBG_MGMT,
+ "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath12k_dbg(ab, ATH12K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
+ struct ath12k *ar;
+
+ if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
+ ath12k_warn(ab, "failed to extract mgmt tx compl event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
+ tx_compl_param.pdev_id);
+ goto exit;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
+ le32_to_cpu(tx_compl_param.status));
+
+ ath12k_dbg(ab, ATH12K_DBG_MGMT,
+ "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
+ tx_compl_param.pdev_id, tx_compl_param.desc_id,
+ tx_compl_param.status);
+
+exit:
+ rcu_read_unlock();
+}
+
+static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH12K_SCAN_ABORTING &&
+ ar->scan.vdev_id == vdev_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k *ar;
+ struct wmi_scan_event scan_ev = {0};
+
+ if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
+ ath12k_warn(ab, "failed to extract scan event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ /* In case the scan was cancelled, ex. during interface teardown,
+ * the interface will not be found in active interfaces.
+ * Rather, in such scenarios, iterate over the active pdev's to
+ * search 'ar' if the corresponding 'ar' scan is ABORTING and the
+ * aborting scan's vdev id matches this event info.
+ */
+ if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
+ le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED)
+ ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id));
+ else
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
+
+ if (!ar) {
+ ath12k_warn(ab, "Received scan event for unknown vdev");
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
+ le32_to_cpu(scan_ev.reason)),
+ le32_to_cpu(scan_ev.event_type),
+ le32_to_cpu(scan_ev.reason),
+ le32_to_cpu(scan_ev.channel_freq),
+ le32_to_cpu(scan_ev.scan_req_id),
+ le32_to_cpu(scan_ev.scan_id),
+ le32_to_cpu(scan_ev.vdev_id),
+ ath12k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (le32_to_cpu(scan_ev.event_type)) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath12k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath12k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath12k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath12k_warn(ab, "received scan start failure event\n");
+ ath12k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_read_unlock();
+}
+
+static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_sta_kickout_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath12k_peer *peer;
+ struct ath12k *ar;
+
+ if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
+ ath12k_warn(ab, "failed to extract peer sta kickout event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
+
+ if (!peer) {
+ ath12k_warn(ab, "peer not found %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
+ peer->vdev_id);
+ goto exit;
+ }
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arg.mac_addr, NULL);
+ if (!sta) {
+ ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
+ arg.mac_addr);
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_roam_event roam_ev = {};
+ struct ath12k *ar;
+
+ if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
+ ath12k_warn(ab, "failed to extract roam event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in roam ev %d",
+ roam_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
+ ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+
+ switch (le32_to_cpu(roam_ev.reason)) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ /* TODO: Pending beacon miss and connection_loss_work
+ * implementation
+ * ath12k_mac_handle_beacon_miss(ar, vdev_id);
+ */
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_chan_info_event ch_info_ev = {0};
+ struct ath12k *ar;
+ struct survey_info *survey;
+ int idx;
+ /* HW channel counters frequency value in hertz */
+ u32 cc_freq_hz = ab->cc_freq_hz;
+
+ if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ ath12k_warn(ab, "failed to extract chan info event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
+ ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
+ ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
+ ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
+ ch_info_ev.mac_clk_mhz);
+
+ if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in chan info ev %d",
+ ch_info_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_STARTING:
+ ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ break;
+ }
+
+ idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ /* If FW provides MAC clock frequency in Mhz, overriding the initialized
+ * HW channel counters frequency value
+ */
+ if (ch_info_ev.mac_clk_mhz)
+ cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
+ survey = &ar->survey[idx];
+ memset(survey, 0, sizeof(*survey));
+ survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
+ survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+ survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
+ survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
+ cc_freq_hz);
+ }
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static void
+ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
+ struct survey_info *survey;
+ struct ath12k *ar;
+ u32 cc_freq_hz = ab->cc_freq_hz;
+ u64 busy, total, tx, rx, rx_bss;
+ int idx;
+
+ if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
+ ath12k_warn(ab, "failed to extract pdev bss chan info event");
+ return;
+ }
+
+ busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
+
+ total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.cycle_count_low);
+
+ tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
+
+ rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
+
+ rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
+ bss_ch_info_ev.noise_floor, busy, total,
+ tx, rx, rx_bss);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
+
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
+ bss_ch_info_ev.pdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ bss_ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor);
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+
+ rcu_read_unlock();
+}
+
+static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
+ struct ath12k *ar;
+
+ if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
+ ath12k_warn(ab, "failed to extract install key compl event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
+ install_key_compl.key_idx, install_key_compl.key_flags,
+ install_key_compl.macaddr, install_key_compl.status);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
+ install_key_compl.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->install_key_status = 0;
+
+ if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
+ ath12k_warn(ab, "install key failed for %pM status %d\n",
+ install_key_compl.macaddr, install_key_compl.status);
+ ar->install_key_status = install_key_compl.status;
+ }
+
+ complete(&ar->install_key_done);
+ rcu_read_unlock();
+}
+
+static void ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_service_available_event *ev;
+ int ret;
+ int i, j;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch svc available ev");
+ kfree(tb);
+ return;
+ }
+
+ /* TODO: Use wmi_service_segment_offset information to get the service
+ * especially when more services are advertised in multiple sevice
+ * available events.
+ */
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
+ ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
+
+ kfree(tb);
+}
+
+static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+ struct ath12k *ar;
+
+ if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
+ ath12k_warn(ab, "failed to extract peer assoc conf event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "peer assoc conf ev vdev id %d macaddr %pM\n",
+ peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
+
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
+ peer_assoc_conf.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->peer_assoc_done);
+ rcu_read_unlock();
+}
+
+static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+}
+
+/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
+ * is not part of BDF CTL(Conformance test limits) table entries.
+ */
+static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_ctl_failsafe_chk_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
+ kfree(tb);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev ctl failsafe check ev status %d\n",
+ ev->ctl_failsafe_status);
+
+ /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
+ * to 10 dBm else the CTL power entry in the BDF would be picked up.
+ */
+ if (ev->ctl_failsafe_status != 0)
+ ath12k_warn(ab, "pdev ctl failsafe failure status %d",
+ ev->ctl_failsafe_status);
+
+ kfree(tb);
+}
+
+static void
+ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
+ const struct ath12k_wmi_pdev_csa_event *ev,
+ const u32 *vdev_ids)
+{
+ int i;
+ struct ath12k_vif *arvif;
+
+ /* Finish CSA once the switch count becomes NULL */
+ if (ev->current_switch_count)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
+
+ if (!arvif) {
+ ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
+ vdev_ids[i]);
+ continue;
+ }
+
+ if (arvif->is_up && arvif->vif->bss_conf.csa_active)
+ ieee80211_csa_finish(arvif->vif);
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct ath12k_wmi_pdev_csa_event *ev;
+ const u32 *vdev_ids;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
+ vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
+
+ if (!ev || !vdev_ids) {
+ ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
+ kfree(tb);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev csa switch count %d for pdev %d, num_vdevs %d",
+ ev->current_switch_count, ev->pdev_id,
+ ev->num_vdevs);
+
+ ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
+
+ kfree(tb);
+}
+
+static void
+ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct ath12k_wmi_pdev_radar_event *ev;
+ struct ath12k *ar;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
+
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
+ kfree(tb);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
+ ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+
+ if (!ar) {
+ ath12k_warn(ab, "radar detected in invalid pdev %d\n",
+ ev->pdev_id);
+ goto exit;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
+ ev->pdev_id);
+
+ if (ar->dfs_block_radar_events)
+ ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
+ else
+ ieee80211_radar_detected(ar->hw);
+
+exit:
+ kfree(tb);
+}
+
+static void
+ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k *ar;
+ struct wmi_pdev_temperature_event ev = {0};
+
+ if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+ ath12k_warn(ab, "failed to extract pdev temperature event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+ return;
+ }
+}
+
+static void ath12k_fils_discovery_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_fils_discovery_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab,
+ "failed to parse FILS discovery event tlv %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch FILS discovery event\n");
+ kfree(tb);
+ return;
+ }
+
+ ath12k_warn(ab,
+ "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
+ ev->vdev_id, ev->fils_tt, ev->tbtt);
+
+ kfree(tb);
+}
+
+static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_probe_resp_tx_status_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab,
+ "failed to parse probe response transmission status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
+ if (!ev) {
+ ath12k_warn(ab,
+ "failed to fetch probe response transmission status event");
+ kfree(tb);
+ return;
+ }
+
+ if (ev->tx_status)
+ ath12k_warn(ab,
+ "Probe response transmission failed for vdev_id %u, status %u\n",
+ ev->vdev_id, ev->tx_status);
+
+ kfree(tb);
+}
+
+static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
+
+ if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+ goto out;
+
+ switch (id) {
+ /* Process all the WMI events here */
+ case WMI_SERVICE_READY_EVENTID:
+ ath12k_service_ready_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT_EVENTID:
+ ath12k_service_ready_ext_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT2_EVENTID:
+ ath12k_service_ready_ext2_event(ab, skb);
+ break;
+ case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
+ ath12k_reg_chan_list_event(ab, skb);
+ break;
+ case WMI_READY_EVENTID:
+ ath12k_ready_event(ab, skb);
+ break;
+ case WMI_PEER_DELETE_RESP_EVENTID:
+ ath12k_peer_delete_resp_event(ab, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath12k_vdev_start_resp_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath12k_bcn_tx_status_event(ab, skb);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath12k_vdev_stopped_event(ab, skb);
+ break;
+ case WMI_MGMT_RX_EVENTID:
+ ath12k_mgmt_rx_event(ab, skb);
+ /* mgmt_rx_event() owns the skb now! */
+ return;
+ case WMI_MGMT_TX_COMPLETION_EVENTID:
+ ath12k_mgmt_tx_compl_event(ab, skb);
+ break;
+ case WMI_SCAN_EVENTID:
+ ath12k_scan_event(ab, skb);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath12k_peer_sta_kickout_event(ab, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath12k_roam_event(ab, skb);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath12k_chan_info_event(ab, skb);
+ break;
+ case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath12k_pdev_bss_chan_info_event(ab, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath12k_vdev_install_key_compl_event(ab, skb);
+ break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath12k_service_available_event(ab, skb);
+ break;
+ case WMI_PEER_ASSOC_CONF_EVENTID:
+ ath12k_peer_assoc_conf_event(ab, skb);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath12k_update_stats_event(ab, skb);
+ break;
+ case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
+ ath12k_pdev_ctl_failsafe_check_event(ab, skb);
+ break;
+ case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
+ ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
+ break;
+ case WMI_PDEV_TEMPERATURE_EVENTID:
+ ath12k_wmi_pdev_temperature_event(ab, skb);
+ break;
+ case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
+ ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
+ break;
+ case WMI_HOST_FILS_DISCOVERY_EVENTID:
+ ath12k_fils_discovery_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
+ ath12k_probe_resp_tx_status_event(ab, skb);
+ break;
+ /* add Unsupported events here */
+ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+ case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+ case WMI_TWT_ENABLE_EVENTID:
+ case WMI_TWT_DISABLE_EVENTID:
+ case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "ignoring unsupported event 0x%x\n", id);
+ break;
+ case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
+ ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
+ break;
+ case WMI_VDEV_DELETE_RESP_EVENTID:
+ ath12k_vdev_delete_resp_event(ab, skb);
+ break;
+ /* TODO: Add remaining events */
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
+ u32 pdev_idx)
+{
+ int status;
+ u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+ struct ath12k_htc_svc_conn_req conn_req = {};
+ struct ath12k_htc_svc_conn_resp conn_resp = {};
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
+ conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = svc_id[pdev_idx];
+
+ status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
+ ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
+ ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+
+ return 0;
+}
+
+static int
+ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
+ struct wmi_unit_test_cmd ut_cmd,
+ u32 *test_args)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_unit_test_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 *ut_cmd_args;
+ int buf_len, arg_len;
+ int ret;
+ int i;
+
+ arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
+ buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_unit_test_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
+ sizeof(ut_cmd));
+
+ cmd->vdev_id = ut_cmd.vdev_id;
+ cmd->module_id = ut_cmd.module_id;
+ cmd->num_args = ut_cmd.num_args;
+ cmd->diag_token = ut_cmd.diag_token;
+
+ ptr = skb->data + sizeof(ut_cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
+
+ ptr += TLV_HDR_SIZE;
+
+ ut_cmd_args = ptr;
+ for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
+ ut_cmd_args[i] = test_args[i];
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI unit test : module %d vdev %d n_args %d token %d\n",
+ cmd->module_id, cmd->vdev_id, cmd->num_args,
+ cmd->diag_token);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_simulate_radar(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ u32 dfs_args[DFS_MAX_TEST_ARGS];
+ struct wmi_unit_test_cmd wmi_ut;
+ bool arvif_found = false;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arvif_found = true;
+ break;
+ }
+ }
+
+ if (!arvif_found)
+ return -EINVAL;
+
+ dfs_args[DFS_TEST_CMDID] = 0;
+ dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
+ /* Currently we could pass segment_id(b0 - b1), chirp(b2)
+ * freq offset (b3 - b10) to unit test. For simulation
+ * purpose this can be set to 0 which is valid.
+ */
+ dfs_args[DFS_TEST_RADAR_PARAM] = 0;
+
+ wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
+ wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
+ wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
+ wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
+
+ return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
+}
+
+int ath12k_wmi_connect(struct ath12k_base *ab)
+{
+ u32 i;
+ u8 wmi_ep_count;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > ab->hw_params->max_radios)
+ return -1;
+
+ for (i = 0; i < wmi_ep_count; i++)
+ ath12k_connect_pdev_htc_service(ab, i);
+
+ return 0;
+}
+
+static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
+{
+ if (WARN_ON(pdev_id >= MAX_RADIOS))
+ return;
+
+ /* TODO: Deinit any pdev specific wmi resource */
+}
+
+int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
+ u8 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi_handle;
+
+ if (pdev_id >= ab->hw_params->max_radios)
+ return -EINVAL;
+
+ wmi_handle = &ab->wmi_ab.wmi[pdev_id];
+
+ wmi_handle->wmi_ab = &ab->wmi_ab;
+
+ ab->wmi_ab.ab = ab;
+ /* TODO: Init remaining resource specific to pdev */
+
+ return 0;
+}
+
+int ath12k_wmi_attach(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_pdev_attach(ab, 0);
+ if (ret)
+ return ret;
+
+ ab->wmi_ab.ab = ab;
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+
+ /* It's overwritten when service_ext_ready is handled */
+ if (ab->hw_params->single_pdev_only)
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
+
+ /* TODO: Init remaining wmi soc resources required */
+ init_completion(&ab->wmi_ab.service_ready);
+ init_completion(&ab->wmi_ab.unified_ready);
+
+ return 0;
+}
+
+void ath12k_wmi_detach(struct ath12k_base *ab)
+{
+ int i;
+
+ /* TODO: Deinit wmi resource specific to SOC as required */
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++)
+ ath12k_wmi_pdev_detach(ab, i);
+
+ ath12k_wmi_free_dbring_caps(ab);
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
new file mode 100644
index 000000000000..84e3fb918e43
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -0,0 +1,4803 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_WMI_H
+#define ATH12K_WMI_H
+
+#include <net/mac80211.h>
+#include "htc.h"
+
+/* Naming conventions for structures:
+ *
+ * _cmd means that this is a firmware command sent from host to firmware.
+ *
+ * _event means that this is a firmware event sent from firmware to host
+ *
+ * _params is a structure which is embedded either into _cmd or _event (or
+ * both), it is not sent individually.
+ *
+ * _arg is used inside the host, the firmware does not see that at all.
+ */
+
+struct ath12k_base;
+struct ath12k;
+
+/* There is no signed version of __le32, so for a temporary solution come
+ * up with our own version. The idea is from fs/ntfs/endian.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+ return (__force a_sle32)cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+ return le32_to_cpu((__force __le32)val);
+}
+
+/* defines to set Packet extension values which can be 0 us, 8 usec or 16 usec */
+#define MAX_HE_NSS 8
+#define MAX_HE_MODULATION 8
+#define MAX_HE_RU 4
+#define HE_MODULATION_NONE 7
+#define HE_PET_0_USEC 0
+#define HE_PET_8_USEC 1
+#define HE_PET_16_USEC 2
+
+#define WMI_MAX_CHAINS 8
+
+#define WMI_MAX_NUM_SS MAX_HE_NSS
+#define WMI_MAX_NUM_RU MAX_HE_RU
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+struct wmi_cmd_hdr {
+ __le32 cmd_id;
+} __packed;
+
+struct wmi_tlv {
+ __le32 header;
+ u8 value[];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define TLV_HDR_SIZE sizeof_field(struct wmi_tlv, header)
+
+#define WMI_CMD_HDR_CMD_ID GENMASK(23, 0)
+#define WMI_MAX_MEM_REQS 32
+#define ATH12K_MAX_HW_LISTEN_INTERVAL 5
+
+#define WMI_HOST_RC_DS_FLAG 0x01
+#define WMI_HOST_RC_CW40_FLAG 0x02
+#define WMI_HOST_RC_SGI_FLAG 0x04
+#define WMI_HOST_RC_HT_FLAG 0x08
+#define WMI_HOST_RC_RTSCTS_FLAG 0x10
+#define WMI_HOST_RC_TX_STBC_FLAG 0x20
+#define WMI_HOST_RC_RX_STBC_FLAG 0xC0
+#define WMI_HOST_RC_RX_STBC_FLAG_S 6
+#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100
+#define WMI_HOST_RC_TS_FLAG 0x200
+#define WMI_HOST_RC_UAPSD_FLAG 0x400
+
+#define WMI_HT_CAP_ENABLED 0x0001
+#define WMI_HT_CAP_HT20_SGI 0x0002
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004
+#define WMI_HT_CAP_TX_STBC 0x0008
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+#define WMI_HT_CAP_RX_LDPC 0x1000
+#define WMI_HT_CAP_TX_LDPC 0x2000
+#define WMI_HT_CAP_IBF_BFER 0x4000
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_HT_CAP_RX_STBC_1SS 0x0010
+#define WMI_HT_CAP_RX_STBC_2SS 0x0020
+#define WMI_HT_CAP_RX_STBC_3SS 0x0030
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_VHT_CAP_RX_STBC_1SS 0x00000100
+#define WMI_VHT_CAP_RX_STBC_2SS 0x00000200
+#define WMI_VHT_CAP_RX_STBC_3SS 0x00000300
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+#define WLAN_SCAN_MAX_HINT_S_SSID 10
+#define WLAN_SCAN_MAX_HINT_BSSID 10
+#define MAX_RNR_BSS 5
+
+#define WLAN_SCAN_MAX_HINT_S_SSID 10
+#define WLAN_SCAN_MAX_HINT_BSSID 10
+#define MAX_RNR_BSS 5
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
+
+#define WMI_BA_MODE_BUFFER_SIZE_256 3
+
+/* HW mode config type replicated from FW header
+ * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active.
+ * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands,
+ * one in 2G and another in 5G.
+ * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in
+ * same band; no tx allowed.
+ * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band.
+ * Support for both PHYs within one band is planned
+ * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES),
+ * but could be extended to other bands in the future.
+ * The separation of the band between the two PHYs needs
+ * to be communicated separately.
+ * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS
+ * as in WMI_HW_MODE_SBS, and 3rd on the other band
+ * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and
+ * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G).
+ * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode.
+ */
+enum wmi_host_hw_mode_config_type {
+ WMI_HOST_HW_MODE_SINGLE = 0,
+ WMI_HOST_HW_MODE_DBS = 1,
+ WMI_HOST_HW_MODE_SBS_PASSIVE = 2,
+ WMI_HOST_HW_MODE_SBS = 3,
+ WMI_HOST_HW_MODE_DBS_SBS = 4,
+ WMI_HOST_HW_MODE_DBS_OR_SBS = 5,
+
+ /* keep last */
+ WMI_HOST_HW_MODE_MAX
+};
+
+/* HW mode priority values used to detect the preferred HW mode
+ * on the available modes.
+ */
+enum wmi_host_hw_mode_priority {
+ WMI_HOST_HW_MODE_DBS_SBS_PRI,
+ WMI_HOST_HW_MODE_DBS_PRI,
+ WMI_HOST_HW_MODE_DBS_OR_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PASSIVE_PRI,
+ WMI_HOST_HW_MODE_SINGLE_PRI,
+
+ /* keep last the lowest priority */
+ WMI_HOST_HW_MODE_MAX_PRI
+};
+
+enum WMI_HOST_WLAN_BAND {
+ WMI_HOST_WLAN_2G_CAP = 1,
+ WMI_HOST_WLAN_5G_CAP = 2,
+ WMI_HOST_WLAN_2G_5G_CAP = 3,
+};
+
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV = 0x4,
+ WMI_GRP_VDEV = 0x5,
+ WMI_GRP_PEER = 0x6,
+ WMI_GRP_MGMT = 0x7,
+ WMI_GRP_BA_NEG = 0x8,
+ WMI_GRP_STA_PS = 0x9,
+ WMI_GRP_DFS = 0xa,
+ WMI_GRP_ROAM = 0xb,
+ WMI_GRP_OFL_SCAN = 0xc,
+ WMI_GRP_P2P = 0xd,
+ WMI_GRP_AP_PS = 0xe,
+ WMI_GRP_RATE_CTRL = 0xf,
+ WMI_GRP_PROFILE = 0x10,
+ WMI_GRP_SUSPEND = 0x11,
+ WMI_GRP_BCN_FILTER = 0x12,
+ WMI_GRP_WOW = 0x13,
+ WMI_GRP_RTT = 0x14,
+ WMI_GRP_SPECTRAL = 0x15,
+ WMI_GRP_STATS = 0x16,
+ WMI_GRP_ARP_NS_OFL = 0x17,
+ WMI_GRP_NLO_OFL = 0x18,
+ WMI_GRP_GTK_OFL = 0x19,
+ WMI_GRP_CSA_OFL = 0x1a,
+ WMI_GRP_CHATTER = 0x1b,
+ WMI_GRP_TID_ADDBA = 0x1c,
+ WMI_GRP_MISC = 0x1d,
+ WMI_GRP_GPIO = 0x1e,
+ WMI_GRP_FWTEST = 0x1f,
+ WMI_GRP_TDLS = 0x20,
+ WMI_GRP_RESMGR = 0x21,
+ WMI_GRP_STA_SMPS = 0x22,
+ WMI_GRP_WLAN_HB = 0x23,
+ WMI_GRP_RMC = 0x24,
+ WMI_GRP_MHF_OFL = 0x25,
+ WMI_GRP_LOCATION_SCAN = 0x26,
+ WMI_GRP_OEM = 0x27,
+ WMI_GRP_NAN = 0x28,
+ WMI_GRP_COEX = 0x29,
+ WMI_GRP_OBSS_OFL = 0x2a,
+ WMI_GRP_LPI = 0x2b,
+ WMI_GRP_EXTSCAN = 0x2c,
+ WMI_GRP_DHCP_OFL = 0x2d,
+ WMI_GRP_IPA = 0x2e,
+ WMI_GRP_MDNS_OFL = 0x2f,
+ WMI_GRP_SAP_OFL = 0x30,
+ WMI_GRP_OCB = 0x31,
+ WMI_GRP_SOC = 0x32,
+ WMI_GRP_PKT_FILTER = 0x33,
+ WMI_GRP_MAWC = 0x34,
+ WMI_GRP_PMF_OFFLOAD = 0x35,
+ WMI_GRP_BPF_OFFLOAD = 0x36,
+ WMI_GRP_NAN_DATA = 0x37,
+ WMI_GRP_PROTOTYPE = 0x38,
+ WMI_GRP_MONITOR = 0x39,
+ WMI_GRP_REGULATORY = 0x3a,
+ WMI_GRP_HW_DATA_FILTER = 0x3b,
+ WMI_GRP_WLM = 0x3c,
+ WMI_GRP_11K_OFFLOAD = 0x3d,
+ WMI_GRP_TWT = 0x3e,
+ WMI_GRP_MOTION_DET = 0x3f,
+ WMI_GRP_SPATIAL_REUSE = 0x40,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+enum wmi_tlv_cmd_id {
+ WMI_CMD_UNSUPPORTED = 0,
+ WMI_INIT_CMDID = 0x1,
+ WMI_START_SCAN_CMDID = WMI_TLV_CMD(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID,
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_PDEV_DUMP_CMDID,
+ WMI_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_PDEV_FIPS_CMDID,
+ WMI_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_PDEV_GET_TPC_CMDID,
+ WMI_MIB_STATS_ENABLE_CMDID,
+ WMI_PDEV_SET_PCL_CMDID,
+ WMI_PDEV_SET_HW_MODE_CMDID,
+ WMI_PDEV_SET_MAC_CONFIG_CMDID,
+ WMI_PDEV_SET_ANTENNA_MODE_CMDID,
+ WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID,
+ WMI_PDEV_WAL_POWER_DEBUG_CMDID,
+ WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID,
+ WMI_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_PDEV_GET_ANTDIV_STATUS_CMDID,
+ WMI_PDEV_GET_CHIP_POWER_STATS_CMDID,
+ WMI_PDEV_SET_STATS_THRESHOLD_CMDID,
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PKT_ROUTING_CMDID,
+ WMI_PDEV_CHECK_CAL_VERSION_CMDID,
+ WMI_PDEV_SET_DIVERSITY_GAIN_CMDID,
+ WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PMK_CACHE_CMDID,
+ WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID,
+ WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID,
+ WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID,
+ WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID,
+ WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
+ WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
+ WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+ WMI_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_VDEV_WMM_ADDTS_CMDID,
+ WMI_VDEV_WMM_DELTS_CMDID,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_VDEV_PLMREQ_START_CMDID,
+ WMI_VDEV_PLMREQ_STOP_CMDID,
+ WMI_VDEV_TSF_TSTAMP_ACTION_CMDID,
+ WMI_VDEV_SET_IE_CMDID,
+ WMI_VDEV_RATEMASK_CMDID,
+ WMI_VDEV_ATF_REQUEST_CMDID,
+ WMI_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_VDEV_SET_QUIET_MODE_CMDID,
+ WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+ WMI_PEER_INFO_REQ_CMDID,
+ WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID,
+ WMI_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_PEER_ATF_REQUEST_CMDID,
+ WMI_PEER_BWF_REQUEST_CMDID,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
+ WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
+ WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+ WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+ WMI_MGMT_TX_SEND_CMDID,
+ WMI_OFFCHAN_DATA_TX_SEND_CMDID,
+ WMI_PDEV_SEND_FD_CMDID,
+ WMI_BCN_OFFLOAD_CTRL_CMDID,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
+ WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
+ WMI_FILS_DISCOVERY_TMPL_CMDID,
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+ WMI_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID,
+ WMI_VDEV_ADFS_CH_CFG_CMDID,
+ WMI_VDEV_ADFS_OCAC_ABORT_CMDID,
+ WMI_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+ WMI_ROAM_CHAN_LIST,
+ WMI_ROAM_SCAN_CMD,
+ WMI_ROAM_SYNCH_COMPLETE,
+ WMI_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_ROAM_INVOKE_CMDID,
+ WMI_ROAM_FILTER_CMDID,
+ WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID,
+ WMI_ROAM_CONFIGURE_MAWC_CMDID,
+ WMI_ROAM_SET_MBO_PARAM_CMDID,
+ WMI_ROAM_PER_CONFIG_CMDID,
+ WMI_ROAM_BTM_CONFIG_CMDID,
+ WMI_ENABLE_FILS_CMDID,
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_START_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID,
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_AP_PS_EGAP_PARAM_CMDID,
+ WMI_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_GRP_RATE_CTRL),
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+ WMI_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_EXTWOW_ENABLE_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID,
+ WMI_WOW_UDP_SVC_OFLD_CMDID,
+ WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID,
+ WMI_WOW_SET_ACTION_WAKE_UP_CMDID,
+ WMI_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_TLV_CMD(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_REQUEST_STATS_EXT_CMDID,
+ WMI_REQUEST_LINK_STATS_CMDID,
+ WMI_START_LINK_STATS_CMDID,
+ WMI_CLEAR_LINK_STATS_CMDID,
+ WMI_GET_FW_MEM_DUMP_CMDID,
+ WMI_DEBUG_MESG_FLUSH_CMDID,
+ WMI_DIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_REQUEST_WLAN_STATS_CMDID,
+ WMI_REQUEST_RCPI_CMDID,
+ WMI_REQUEST_PEER_STATS_INFO_CMDID,
+ WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
+ WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_APFIND_CMDID,
+ WMI_PASSPOINT_LIST_CONFIG_CMDID,
+ WMI_NLO_CONFIGURE_MAWC_CMDID,
+ WMI_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_STA_KEEPALIVE_CMDID,
+ WMI_BA_REQ_SSN_CMDID,
+ WMI_ECHO_CMDID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+ WMI_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_THERMAL_MGMT_CMDID,
+ WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_OCB_SET_SCHED_CMDID,
+ WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID,
+ WMI_LRO_CONFIG_CMDID,
+ WMI_TRANSFER_DATA_TO_FLASH_CMDID,
+ WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID,
+ WMI_VDEV_WISA_CMDID,
+ WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
+ WMI_READ_DATA_FROM_FLASH_CMDID,
+ WMI_THERM_THROT_SET_CONF_CMDID,
+ WMI_RUNTIME_DPD_RECAL_CMDID,
+ WMI_GET_TPC_POWER_CMDID,
+ WMI_IDLE_TRIGGER_MONITOR_CMDID,
+ WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+ WMI_TXBF_CMDID,
+ WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_FWTEST),
+ WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_UNIT_TEST_CMDID,
+ WMI_FWTEST_CMDID,
+ WMI_QBOOST_CFG_CMDID,
+ WMI_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_TDLS_PEER_UPDATE_CMDID,
+ WMI_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_RESMGR_ADAPTIVE_OCS_EN_DIS_CMDID = WMI_TLV_CMD(WMI_GRP_RESMGR),
+ WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_STA_SMPS_PARAM_CMDID,
+ WMI_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_WLAN_HB),
+ WMI_HB_SET_TCP_PARAMS_CMDID,
+ WMI_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_HB_SET_UDP_PARAMS_CMDID,
+ WMI_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_RMC_CONFIG_CMDID,
+ WMI_RMC_SET_MANUAL_LEADER_CMDID,
+ WMI_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_MHF_OFL),
+ WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_BATCH_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_DISABLE_CMDID,
+ WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_REQUEST_CMDID,
+ WMI_LPI_OEM_REQ_CMDID,
+ WMI_NAN_CMDID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_CHAN_AVOID_UPDATE_CMDID,
+ WMI_COEX_CONFIG_CMDID,
+ WMI_CHAN_AVOID_RPT_ALLOW_CMDID,
+ WMI_COEX_GET_ANTENNA_ISOLATION_CMDID,
+ WMI_SAR_LIMITS_CMDID,
+ WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
+ WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID,
+ WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_START_SCAN_CMDID,
+ WMI_LPI_STOP_SCAN_CMDID,
+ WMI_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_STOP_CMDID,
+ WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_MAWC_CMDID,
+ WMI_SET_DHCP_SERVER_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_DHCP_OFL),
+ WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_GRP_IPA),
+ WMI_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_MDNS_SET_FQDN_CMDID,
+ WMI_MDNS_SET_RESPONSE_CMDID,
+ WMI_MDNS_GET_STATS_CMDID,
+ WMI_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_SET_BLACKLIST_PARAM_CMDID,
+ WMI_OCB_SET_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_SET_UTC_TIME_CMDID,
+ WMI_OCB_START_TIMING_ADVERT_CMDID,
+ WMI_OCB_STOP_TIMING_ADVERT_CMDID,
+ WMI_OCB_GET_TSF_TIMER_CMDID,
+ WMI_DCC_GET_STATS_CMDID,
+ WMI_DCC_CLEAR_STATS_CMDID,
+ WMI_DCC_UPDATE_NDL_CMDID,
+ WMI_SOC_SET_PCL_CMDID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_SET_HW_MODE_CMDID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID,
+ WMI_SOC_SET_ANTENNA_MODE_CMDID,
+ WMI_PACKET_FILTER_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_PKT_FILTER),
+ WMI_PACKET_FILTER_ENABLE_CMDID,
+ WMI_MAWC_SENSOR_REPORT_IND_CMDID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID = WMI_TLV_CMD(WMI_GRP_PMF_OFFLOAD),
+ WMI_BPF_GET_CAPABILITY_CMDID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_GET_VDEV_STATS_CMDID,
+ WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID,
+ WMI_MNT_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_MONITOR),
+ WMI_SET_CURRENT_COUNTRY_CMDID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_SCAN_START_CMDID,
+ WMI_11D_SCAN_STOP_CMDID,
+ WMI_SET_INIT_COUNTRY_CMDID,
+ WMI_NDI_GET_CAP_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_REQ_CMDID,
+ WMI_NDP_RESPONDER_REQ_CMDID,
+ WMI_NDP_END_REQ_CMDID,
+ WMI_HW_DATA_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_HW_DATA_FILTER),
+ WMI_TWT_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_CMDID,
+ WMI_TWT_ADD_DIALOG_CMDID,
+ WMI_TWT_DEL_DIALOG_CMDID,
+ WMI_TWT_PAUSE_DIALOG_CMDID,
+ WMI_TWT_RESUME_DIALOG_CMDID,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
+ WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+};
+
+enum wmi_tlv_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+ WMI_PDEV_DUMP_EVENTID,
+ WMI_TX_PAUSE_EVENTID,
+ WMI_DFS_RADAR_EVENTID,
+ WMI_PDEV_L1SS_TRACK_EVENTID,
+ WMI_PDEV_TEMPERATURE_EVENTID,
+ WMI_SERVICE_READY_EXT_EVENTID,
+ WMI_PDEV_FIPS_EVENTID,
+ WMI_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_PDEV_TPC_EVENTID,
+ WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_PDEV_SET_HW_MODE_RESP_EVENTID,
+ WMI_PDEV_HW_MODE_TRANSITION_EVENTID,
+ WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID,
+ WMI_PDEV_ANTDIV_STATUS_EVENTID,
+ WMI_PDEV_CHIP_POWER_STATS_EVENTID,
+ WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID,
+ WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_CHECK_CAL_VERSION_EVENTID,
+ WMI_PDEV_DIV_RSSI_ANTID_EVENTID,
+ WMI_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_PDEV_UPDATE_CTLTABLE_EVENTID,
+ WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
+ WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
+ WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
+ WMI_PDEV_CSC_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID,
+ WMI_PDEV_RAP_INFO_EVENTID,
+ WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
+ WMI_SERVICE_READY_EXT2_EVENTID,
+ WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_VDEV_TSF_REPORT_EVENTID,
+ WMI_VDEV_DELETE_RESP_EVENTID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID,
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_INFO_EVENTID,
+ WMI_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_PEER_STATE_EVENTID,
+ WMI_PEER_ASSOC_CONF_EVENTID,
+ WMI_PEER_DELETE_RESP_EVENTID,
+ WMI_PEER_RATECODE_LIST_EVENTID,
+ WMI_WDS_PEER_EVENTID,
+ WMI_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_PEER_ANTDIV_INFO_EVENTID,
+ WMI_PEER_RESERVED0_EVENTID,
+ WMI_PEER_RESERVED1_EVENTID,
+ WMI_PEER_RESERVED2_EVENTID,
+ WMI_PEER_RESERVED3_EVENTID,
+ WMI_PEER_RESERVED4_EVENTID,
+ WMI_PEER_RESERVED5_EVENTID,
+ WMI_PEER_RESERVED6_EVENTID,
+ WMI_PEER_RESERVED7_EVENTID,
+ WMI_PEER_RESERVED8_EVENTID,
+ WMI_PEER_RESERVED9_EVENTID,
+ WMI_PEER_RESERVED10_EVENTID,
+ WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_MGMT_TX_COMPLETION_EVENTID,
+ WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+ WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
+ WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
+ WMI_HOST_FILS_DISCOVERY_EVENTID,
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_BA_RSP_SSN_EVENTID,
+ WMI_AGGR_STATE_TRIG_EVENTID,
+ WMI_ROAM_EVENTID = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+ WMI_ROAM_SYNCH_EVENTID,
+ WMI_P2P_DISC_EVENTID = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_NOA_EVENTID,
+ WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID,
+ WMI_AP_PS_EGAP_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_PDEV_RESUME_EVENTID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_WOW_INITIAL_WAKEUP_EVENTID,
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+ WMI_STATS_EXT_EVENTID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_IFACE_LINK_STATS_EVENTID,
+ WMI_PEER_LINK_STATS_EVENTID,
+ WMI_RADIO_LINK_STATS_EVENTID,
+ WMI_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_INST_RSSI_STATS_EVENTID,
+ WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_REPORT_STATS_EVENTID,
+ WMI_UPDATE_RCPI_EVENTID,
+ WMI_PEER_STATS_INFO_EVENTID,
+ WMI_RADIO_CHAN_STATS_EVENTID,
+ WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_APFIND_EVENTID,
+ WMI_PASSPOINT_MATCH_EVENTID,
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+ WMI_CSA_HANDLING_EVENTID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CHATTER_PC_QUERY_EVENTID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_VDEV_DFS_CAC_COMPLETE_EVENTID,
+ WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID,
+ WMI_ECHO_EVENTID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_THERMAL_MGMT_EVENTID,
+ WMI_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_DIAG_EVENTID,
+ WMI_OCB_SET_SCHED_EVENTID,
+ WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID,
+ WMI_RSSI_BREACH_EVENTID,
+ WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID,
+ WMI_PDEV_UTF_SCPC_EVENTID,
+ WMI_READ_DATA_FROM_FLASH_EVENTID,
+ WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
+ WMI_PKGID_EVENTID,
+ WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_UPLOADH_EVENTID,
+ WMI_CAPTUREH_EVENTID,
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TDLS_PEER_EVENTID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_STA_SMPS_FORCE_MODE_COMPL_EVENTID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_BATCH_SCAN_ENABLED_EVENTID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_RESULT_EVENTID,
+ WMI_OEM_CAPABILITY_EVENTID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_OEM_ERROR_REPORT_EVENTID,
+ WMI_OEM_RESPONSE_EVENTID,
+ WMI_NAN_EVENTID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_NAN_DISC_IFACE_CREATED_EVENTID,
+ WMI_NAN_DISC_IFACE_DELETED_EVENTID,
+ WMI_NAN_STARTED_CLUSTER_EVENTID,
+ WMI_NAN_JOINED_CLUSTER_EVENTID,
+ WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_LPI_RESULT_EVENTID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_STATUS_EVENTID,
+ WMI_LPI_HANDOFF_EVENTID,
+ WMI_EXTSCAN_START_STOP_EVENTID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_OPERATION_EVENTID,
+ WMI_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID,
+ WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
+ WMI_DCC_GET_STATS_RESP_EVENTID,
+ WMI_DCC_UPDATE_NDL_RESP_EVENTID,
+ WMI_DCC_STATS_EVENTID,
+ WMI_SOC_SET_HW_MODE_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_HW_MODE_TRANSITION_EVENTID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID,
+ WMI_MAWC_ENABLE_SENSOR_EVENTID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_BPF_CAPABILIY_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_VDEV_STATS_INFO_EVENTID,
+ WMI_RMC_NEW_LEADER_EVENTID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_REG_CHAN_LIST_CC_EVENTID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_NEW_COUNTRY_EVENTID,
+ WMI_REG_CHAN_LIST_CC_EXT_EVENTID,
+ WMI_NDI_CAP_RSP_EVENTID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_RSP_EVENTID,
+ WMI_NDP_RESPONDER_RSP_EVENTID,
+ WMI_NDP_END_RSP_EVENTID,
+ WMI_NDP_INDICATION_EVENTID,
+ WMI_NDP_CONFIRM_EVENTID,
+ WMI_NDP_END_INDICATION_EVENTID,
+
+ WMI_TWT_ENABLE_EVENTID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_EVENTID,
+ WMI_TWT_ADD_DIALOG_EVENTID,
+ WMI_TWT_DEL_DIALOG_EVENTID,
+ WMI_TWT_PAUSE_DIALOG_EVENTID,
+ WMI_TWT_RESUME_DIALOG_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PMF_QOS,
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_PDEV_PARAM_DCS,
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ WMI_PDEV_PARAM_PROXY_STA,
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_PDEV_PARAM_BURST_DUR,
+ WMI_PDEV_PARAM_BURST_ENABLE,
+ WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_PDEV_PARAM_L1SS_TRACK,
+ WMI_PDEV_PARAM_HYST_EN,
+ WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_PDEV_PARAM_LED_SYS_STATE,
+ WMI_PDEV_PARAM_LED_ENABLE,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_CTS_CBW,
+ WMI_PDEV_PARAM_WNTS_CONFIG,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP,
+ WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP,
+ WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP,
+ WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS,
+ WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG,
+ WMI_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_PDEV_PARAM_AGGR_BURST,
+ WMI_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_PDEV_PARAM_RX_FILTER,
+ WMI_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_PDEV_PARAM_EN_STATS,
+ WMI_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_PDEV_PARAM_NOISE_DETECTION,
+ WMI_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_PDEV_PARAM_DPD_ENABLE,
+ WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_PDEV_PARAM_ANT_PLZN,
+ WMI_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_PDEV_PARAM_PDEV_RESET,
+ WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_PDEV_PARAM_ARP_DBG_SRCADDR,
+ WMI_PDEV_PARAM_ARP_DBG_DSTADDR,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_PDEV_PARAM_CTRL_RETRY_LIMIT,
+ WMI_PDEV_PARAM_PROPAGATION_DELAY,
+ WMI_PDEV_PARAM_ENA_ANT_DIV,
+ WMI_PDEV_PARAM_FORCE_CHAIN_ANT,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL,
+ WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_SCH_DELAY,
+ WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING,
+ WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU,
+ WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
+ WMI_PDEV_PARAM_FAST_PWR_TRANSITION,
+ WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
+ WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
+ WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_VDEV_PARAM_SLOT_TIME,
+ WMI_VDEV_PARAM_PREAMBLE,
+ WMI_VDEV_PARAM_SWBA_TIME,
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_VDEV_PARAM_WDS,
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ WMI_VDEV_PARAM_CHWIDTH,
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_VDEV_PARAM_MGMT_RATE,
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ WMI_VDEV_PARAM_FIXED_RATE,
+ WMI_VDEV_PARAM_SGI,
+ WMI_VDEV_PARAM_LDPC,
+ WMI_VDEV_PARAM_TX_STBC,
+ WMI_VDEV_PARAM_RX_STBC,
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_VDEV_PARAM_DEF_KEYID,
+ WMI_VDEV_PARAM_NSS,
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_VDEV_PARAM_TXBF,
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_VDEV_PARAM_ENABLE_RMC,
+ WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_VDEV_PARAM_MAX_RATE,
+ WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+ WMI_VDEV_PARAM_RX_LEAK_WINDOW,
+ WMI_VDEV_PARAM_STATS_AVG_FACTOR,
+ WMI_VDEV_PARAM_DISCONNECT_TH,
+ WMI_VDEV_PARAM_RTSCTS_RATE,
+ WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE,
+ WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE_DECR_DB,
+ WMI_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_VDEV_PARAM_MFPTEST_SET,
+ WMI_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_VDEV_PARAM_VHT_SGIMASK,
+ WMI_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_VDEV_PARAM_PROXY_STA,
+ WMI_VDEV_PARAM_VIRTUAL_CELL_MODE,
+ WMI_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_VDEV_PARAM_SENSOR_AP,
+ WMI_VDEV_PARAM_BEACON_RATE,
+ WMI_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_VDEV_PARAM_STA_KICKOUT,
+ WMI_VDEV_PARAM_CAPABILITIES,
+ WMI_VDEV_PARAM_TSF_INCREMENT,
+ WMI_VDEV_PARAM_AMPDU_PER_AC,
+ WMI_VDEV_PARAM_RX_FILTER,
+ WMI_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_VDEV_PARAM_HE_DCM,
+ WMI_VDEV_PARAM_HE_RANGE_EXT,
+ WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
+ WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+ WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
+ WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
+ WMI_VDEV_PARAM_BSS_COLOR,
+ WMI_VDEV_PARAM_SET_HEMU_MODE,
+ WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003,
+};
+
+enum wmi_tlv_peer_flags {
+ WMI_TLV_PEER_AUTH = 0x00000001,
+ WMI_TLV_PEER_QOS = 0x00000002,
+ WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_TLV_PEER_APSD = 0x00000800,
+ WMI_TLV_PEER_HT = 0x00001000,
+ WMI_TLV_PEER_40MHZ = 0x00002000,
+ WMI_TLV_PEER_STBC = 0x00008000,
+ WMI_TLV_PEER_LDPC = 0x00010000,
+ WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_TLV_PEER_VHT = 0x02000000,
+ WMI_TLV_PEER_80MHZ = 0x04000000,
+ WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
+ WMI_PEER_160MHZ = 0x40000000,
+ WMI_PEER_SAFEMODE_EN = 0x80000000,
+
+};
+
+/** Enum list of TLV Tags for each parameter structure type. */
+enum wmi_tlv_tag {
+ WMI_TAG_LAST_RESERVED = 15,
+ WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_UINT32 = WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_BYTE,
+ WMI_TAG_ARRAY_STRUCT,
+ WMI_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TAG_LAST_ARRAY_ENUM = 31,
+ WMI_TAG_SERVICE_READY_EVENT,
+ WMI_TAG_HAL_REG_CAPABILITIES,
+ WMI_TAG_WLAN_HOST_MEM_REQ,
+ WMI_TAG_READY_EVENT,
+ WMI_TAG_SCAN_EVENT,
+ WMI_TAG_PDEV_TPC_CONFIG_EVENT,
+ WMI_TAG_CHAN_INFO_EVENT,
+ WMI_TAG_COMB_PHYERR_RX_HDR,
+ WMI_TAG_VDEV_START_RESPONSE_EVENT,
+ WMI_TAG_VDEV_STOPPED_EVENT,
+ WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TAG_PEER_STA_KICKOUT_EVENT,
+ WMI_TAG_MGMT_RX_HDR,
+ WMI_TAG_TBTT_OFFSET_EVENT,
+ WMI_TAG_TX_DELBA_COMPLETE_EVENT,
+ WMI_TAG_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TAG_ROAM_EVENT,
+ WMI_TAG_WOW_EVENT_INFO,
+ WMI_TAG_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TAG_RTT_EVENT_HEADER,
+ WMI_TAG_RTT_ERROR_REPORT_EVENT,
+ WMI_TAG_RTT_MEAS_EVENT,
+ WMI_TAG_ECHO_EVENT,
+ WMI_TAG_FTM_INTG_EVENT,
+ WMI_TAG_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TAG_GPIO_INPUT_EVENT,
+ WMI_TAG_CSA_EVENT,
+ WMI_TAG_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TAG_IGTK_INFO,
+ WMI_TAG_DCS_INTERFERENCE_EVENT,
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_WLAN_DCS_CW_INT = /* ALIAS */
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_DCS_IM_TGT_STATS_T = /* ALIAS */
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_PROFILE_CTX_T,
+ WMI_TAG_WLAN_PROFILE_T,
+ WMI_TAG_PDEV_QVIT_EVENT,
+ WMI_TAG_HOST_SWBA_EVENT,
+ WMI_TAG_TIM_INFO,
+ WMI_TAG_P2P_NOA_INFO,
+ WMI_TAG_STATS_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGES_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGE_DESC,
+ WMI_TAG_GTK_REKEY_FAIL_EVENT,
+ WMI_TAG_INIT_CMD,
+ WMI_TAG_RESOURCE_CONFIG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TAG_START_SCAN_CMD,
+ WMI_TAG_STOP_SCAN_CMD,
+ WMI_TAG_SCAN_CHAN_LIST_CMD,
+ WMI_TAG_CHANNEL,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TAG_PDEV_SET_PARAM_CMD,
+ WMI_TAG_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_WMM_PARAMS,
+ WMI_TAG_PDEV_SET_QUIET_CMD,
+ WMI_TAG_VDEV_CREATE_CMD,
+ WMI_TAG_VDEV_DELETE_CMD,
+ WMI_TAG_VDEV_START_REQUEST_CMD,
+ WMI_TAG_P2P_NOA_DESCRIPTOR,
+ WMI_TAG_P2P_GO_SET_BEACON_IE,
+ WMI_TAG_GTK_OFFLOAD_CMD,
+ WMI_TAG_VDEV_UP_CMD,
+ WMI_TAG_VDEV_STOP_CMD,
+ WMI_TAG_VDEV_DOWN_CMD,
+ WMI_TAG_VDEV_SET_PARAM_CMD,
+ WMI_TAG_VDEV_INSTALL_KEY_CMD,
+ WMI_TAG_PEER_CREATE_CMD,
+ WMI_TAG_PEER_DELETE_CMD,
+ WMI_TAG_PEER_FLUSH_TIDS_CMD,
+ WMI_TAG_PEER_SET_PARAM_CMD,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TAG_VHT_RATE_SET,
+ WMI_TAG_BCN_TMPL_CMD,
+ WMI_TAG_PRB_TMPL_CMD,
+ WMI_TAG_BCN_PRB_INFO,
+ WMI_TAG_PEER_TID_ADDBA_CMD,
+ WMI_TAG_PEER_TID_DELBA_CMD,
+ WMI_TAG_STA_POWERSAVE_MODE_CMD,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+ WMI_TAG_STA_DTIM_PS_METHOD_CMD,
+ WMI_TAG_ROAM_SCAN_MODE,
+ WMI_TAG_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TAG_ROAM_SCAN_PERIOD,
+ WMI_TAG_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TAG_PDEV_SUSPEND_CMD,
+ WMI_TAG_PDEV_RESUME_CMD,
+ WMI_TAG_ADD_BCN_FILTER_CMD,
+ WMI_TAG_RMV_BCN_FILTER_CMD,
+ WMI_TAG_WOW_ENABLE_CMD,
+ WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TAG_ARP_OFFLOAD_TUPLE,
+ WMI_TAG_NS_OFFLOAD_TUPLE,
+ WMI_TAG_FTM_INTG_CMD,
+ WMI_TAG_STA_KEEPALIVE_CMD,
+ WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TAG_AP_PS_PEER_CMD,
+ WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TAG_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TAG_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TAG_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TAG_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TAG_WOW_DEL_PATTERN_CMD,
+ WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ WMI_TAG_RTT_MEASREQ_HEAD,
+ WMI_TAG_RTT_MEASREQ_BODY,
+ WMI_TAG_RTT_TSF_CMD,
+ WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TAG_REQUEST_STATS_CMD,
+ WMI_TAG_NLO_CONFIG_CMD,
+ WMI_TAG_NLO_CONFIGURED_PARAMETERS,
+ WMI_TAG_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TAG_CHATTER_SET_MODE_CMD,
+ WMI_TAG_ECHO_CMD,
+ WMI_TAG_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TAG_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TAG_FORCE_FW_HANG_CMD,
+ WMI_TAG_GPIO_CONFIG_CMD,
+ WMI_TAG_GPIO_OUTPUT_CMD,
+ WMI_TAG_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TAG_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TAG_BCN_TX_HDR,
+ WMI_TAG_BCN_SEND_FROM_HOST_CMD,
+ WMI_TAG_MGMT_TX_HDR,
+ WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+ WMI_TAG_ADDBA_SEND_CMD,
+ WMI_TAG_DELBA_SEND_CMD,
+ WMI_TAG_ADDBA_SETRESPONSE_CMD,
+ WMI_TAG_SEND_SINGLEAMSDU_CMD,
+ WMI_TAG_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TAG_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TAG_PDEV_SET_HT_IE_CMD,
+ WMI_TAG_PDEV_SET_VHT_IE_CMD,
+ WMI_TAG_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TAG_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TAG_PEER_MCAST_GROUP_CMD,
+ WMI_TAG_ROAM_AP_PROFILE,
+ WMI_TAG_AP_PROFILE,
+ WMI_TAG_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TAG_PDEV_DFS_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_DISABLE_CMD,
+ WMI_TAG_WOW_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_BITMAP_PATTERN_T,
+ WMI_TAG_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TAG_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TAG_WOW_MAGIC_PATTERN_CMD,
+ WMI_TAG_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TAG_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TAG_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TAG_TXBF_CMD,
+ WMI_TAG_DEBUG_LOG_CONFIG_CMD,
+ WMI_TAG_NLO_EVENT,
+ WMI_TAG_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TAG_UPLOAD_H_HDR,
+ WMI_TAG_CAPTURE_H_EVENT_HDR,
+ WMI_TAG_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TAG_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TAG_VDEV_WMM_ADDTS_CMD,
+ WMI_TAG_VDEV_WMM_DELTS_CMD,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_TDLS_SET_STATE_CMD,
+ WMI_TAG_TDLS_PEER_UPDATE_CMD,
+ WMI_TAG_TDLS_PEER_EVENT,
+ WMI_TAG_TDLS_PEER_CAPABILITIES,
+ WMI_TAG_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TAG_ROAM_CHAN_LIST,
+ WMI_TAG_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TAG_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD,
+ WMI_TAG_BA_RSP_SSN_EVENT,
+ WMI_TAG_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TAG_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TAG_P2P_SET_OPPPS_CMD,
+ WMI_TAG_P2P_SET_NOA_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TAG_STA_SMPS_PARAM_CMD,
+ WMI_TAG_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TAG_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TAG_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TAG_P2P_NOA_EVENT,
+ WMI_TAG_HB_SET_ENABLE_CMD,
+ WMI_TAG_HB_SET_TCP_PARAMS_CMD,
+ WMI_TAG_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TAG_HB_SET_UDP_PARAMS_CMD,
+ WMI_TAG_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TAG_HB_IND_EVENT,
+ WMI_TAG_TX_PAUSE_EVENT,
+ WMI_TAG_RFKILL_EVENT,
+ WMI_TAG_DFS_RADAR_EVENT,
+ WMI_TAG_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TAG_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TAG_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TAG_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TAG_BATCH_SCAN_ENABLE_CMD,
+ WMI_TAG_BATCH_SCAN_DISABLE_CMD,
+ WMI_TAG_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TAG_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TAG_BATCH_SCAN_RESULT_EVENT,
+ WMI_TAG_VDEV_PLMREQ_START_CMD,
+ WMI_TAG_VDEV_PLMREQ_STOP_CMD,
+ WMI_TAG_THERMAL_MGMT_CMD,
+ WMI_TAG_THERMAL_MGMT_EVENT,
+ WMI_TAG_PEER_INFO_REQ_CMD,
+ WMI_TAG_PEER_INFO_EVENT,
+ WMI_TAG_PEER_INFO,
+ WMI_TAG_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TAG_RMC_SET_MODE_CMD,
+ WMI_TAG_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TAG_RMC_CONFIG_CMD,
+ WMI_TAG_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TAG_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TAG_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_NAN_CMD_PARAM,
+ WMI_TAG_NAN_EVENT_HDR,
+ WMI_TAG_PDEV_L1SS_TRACK_EVENT,
+ WMI_TAG_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TAG_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TAG_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TAG_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TAG_AGGR_STATE_TRIG_EVENT,
+ WMI_TAG_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TAG_ROAM_SCAN_CMD,
+ WMI_TAG_REQ_STATS_EXT_CMD,
+ WMI_TAG_STATS_EXT_EVENT,
+ WMI_TAG_OBSS_SCAN_ENABLE_CMD,
+ WMI_TAG_OBSS_SCAN_DISABLE_CMD,
+ WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TAG_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TAG_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TAG_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TAG_WOW_IOAC_PKT_PATTERN_T,
+ WMI_TAG_WOW_IOAC_TMR_PATTERN_T,
+ WMI_TAG_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_KEEPALIVE_T,
+ WMI_TAG_WOW_IOAC_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_IOAC_DEL_PATTERN_CMD,
+ WMI_TAG_START_LINK_STATS_CMD,
+ WMI_TAG_CLEAR_LINK_STATS_CMD,
+ WMI_TAG_REQUEST_LINK_STATS_CMD,
+ WMI_TAG_IFACE_LINK_STATS_EVENT,
+ WMI_TAG_RADIO_LINK_STATS_EVENT,
+ WMI_TAG_PEER_STATS_EVENT,
+ WMI_TAG_CHANNEL_STATS,
+ WMI_TAG_RADIO_LINK_STATS,
+ WMI_TAG_RATE_STATS,
+ WMI_TAG_PEER_LINK_STATS,
+ WMI_TAG_WMM_AC_STATS,
+ WMI_TAG_IFACE_LINK_STATS,
+ WMI_TAG_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TAG_LPI_START_SCAN_CMD,
+ WMI_TAG_LPI_STOP_SCAN_CMD,
+ WMI_TAG_LPI_RESULT_EVENT,
+ WMI_TAG_PEER_STATE_EVENT,
+ WMI_TAG_EXTSCAN_BUCKET_CMD,
+ WMI_TAG_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TAG_EXTSCAN_START_CMD,
+ WMI_TAG_EXTSCAN_STOP_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_OPERATION_EVENT,
+ WMI_TAG_EXTSCAN_START_STOP_EVENT,
+ WMI_TAG_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TAG_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TAG_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TAG_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TAG_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TAG_UNIT_TEST_CMD,
+ WMI_TAG_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_SYNCH_EVENT,
+ WMI_TAG_ROAM_SYNCH_COMPLETE,
+ WMI_TAG_EXTWOW_ENABLE_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TAG_LPI_STATUS_EVENT,
+ WMI_TAG_LPI_HANDOFF_EVENT,
+ WMI_TAG_VDEV_RATE_STATS_EVENT,
+ WMI_TAG_VDEV_RATE_HT_INFO,
+ WMI_TAG_RIC_REQUEST,
+ WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TAG_PDEV_TEMPERATURE_EVENT,
+ WMI_TAG_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TAG_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_RIC_TSPEC,
+ WMI_TAG_TPC_CHAINMASK_CONFIG,
+ WMI_TAG_IPA_OFFLOAD_ENABLE_DISABLE_CMD,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TAG_KEY_MATERIAL,
+ WMI_TAG_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TAG_SET_LED_FLASHING_CMD,
+ WMI_TAG_MDNS_OFFLOAD_CMD,
+ WMI_TAG_MDNS_SET_FQDN_CMD,
+ WMI_TAG_MDNS_SET_RESP_CMD,
+ WMI_TAG_MDNS_GET_STATS_CMD,
+ WMI_TAG_MDNS_STATS_EVENT,
+ WMI_TAG_ROAM_INVOKE_CMD,
+ WMI_TAG_PDEV_RESUME_EVENT,
+ WMI_TAG_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TAG_SAP_OFL_ENABLE_CMD,
+ WMI_TAG_SAP_OFL_ADD_STA_EVENT,
+ WMI_TAG_SAP_OFL_DEL_STA_EVENT,
+ WMI_TAG_APFIND_CMD_PARAM,
+ WMI_TAG_APFIND_EVENT_HDR,
+ WMI_TAG_OCB_SET_SCHED_CMD,
+ WMI_TAG_OCB_SET_SCHED_EVENT,
+ WMI_TAG_OCB_SET_CONFIG_CMD,
+ WMI_TAG_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TAG_OCB_SET_UTC_TIME_CMD,
+ WMI_TAG_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TAG_DCC_GET_STATS_CMD,
+ WMI_TAG_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TAG_DCC_GET_STATS_RESP_EVENT,
+ WMI_TAG_DCC_CLEAR_STATS_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TAG_DCC_STATS_EVENT,
+ WMI_TAG_OCB_CHANNEL,
+ WMI_TAG_OCB_SCHEDULE_ELEMENT,
+ WMI_TAG_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TAG_DCC_NDL_CHAN,
+ WMI_TAG_QOS_PARAMETER,
+ WMI_TAG_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TAG_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TAG_ROAM_FILTER,
+ WMI_TAG_PASSPOINT_CONFIG_CMD,
+ WMI_TAG_PASSPOINT_EVENT_HDR,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TAG_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TAG_VDEV_TSF_REPORT_EVENT,
+ WMI_TAG_GET_FW_MEM_DUMP,
+ WMI_TAG_UPDATE_FW_MEM_DUMP,
+ WMI_TAG_FW_MEM_DUMP_PARAMS,
+ WMI_TAG_DEBUG_MESG_FLUSH,
+ WMI_TAG_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TAG_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TAG_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TAG_VDEV_SET_IE_CMD,
+ WMI_TAG_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TAG_RSSI_BREACH_EVENT,
+ WMI_TAG_WOW_EVENT_INITIAL_WAKEUP,
+ WMI_TAG_SOC_SET_PCL_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_VDEV_TXRX_STREAMS,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_WOW_IOAC_SOCK_PATTERN_T,
+ WMI_TAG_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TAG_DIAG_EVENT_LOG_CONFIG,
+ WMI_TAG_DIAG_EVENT_LOG_SUPPORTED_EVENT_FIXED_PARAMS,
+ WMI_TAG_PACKET_FILTER_CONFIG,
+ WMI_TAG_PACKET_FILTER_ENABLE,
+ WMI_TAG_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TAG_MGMT_TX_SEND_CMD,
+ WMI_TAG_MGMT_TX_COMPL_EVENT,
+ WMI_TAG_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TAG_LRO_INFO_CMD,
+ WMI_TAG_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TAG_SERVICE_READY_EXT_EVENT,
+ WMI_TAG_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TAG_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TAG_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TAG_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TAG_PEER_ASSOC_CONF_EVENT,
+ WMI_TAG_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TAG_AP_PS_EGAP_PARAM_CMD,
+ WMI_TAG_AP_PS_EGAP_INFO_EVENT,
+ WMI_TAG_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TAG_SCPC_EVENT,
+ WMI_TAG_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TAG_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TAG_BPF_GET_CAPABILITY_CMD,
+ WMI_TAG_BPF_CAPABILITY_INFO_EVT,
+ WMI_TAG_BPF_GET_VDEV_STATS_CMD,
+ WMI_TAG_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TAG_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_VDEV_DELETE_RESP_EVENT,
+ WMI_TAG_PEER_DELETE_RESP_EVENT,
+ WMI_TAG_ROAM_DENSE_THRES_PARAM,
+ WMI_TAG_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TAG_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TAG_VDEV_CONFIG_RATEMASK,
+ WMI_TAG_PDEV_FIPS_CMD,
+ WMI_TAG_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TAG_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TAG_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TAG_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TAG_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TAG_FWTEST_SET_PARAM_CMD,
+ WMI_TAG_PEER_ATF_REQUEST,
+ WMI_TAG_VDEV_ATF_REQUEST,
+ WMI_TAG_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TAG_INST_RSSI_STATS_RESP,
+ WMI_TAG_MED_UTIL_REPORT_EVENT,
+ WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TAG_WDS_ADDR_EVENT,
+ WMI_TAG_PEER_RATECODE_LIST_EVENT,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TAG_PDEV_TPC_EVENT,
+ WMI_TAG_ANI_OFDM_EVENT,
+ WMI_TAG_ANI_CCK_EVENT,
+ WMI_TAG_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TAG_PDEV_FIPS_EVENT,
+ WMI_TAG_ATF_PEER_INFO,
+ WMI_TAG_PDEV_GET_TPC_CMD,
+ WMI_TAG_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TAG_QBOOST_CFG_CMD,
+ WMI_TAG_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TAG_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TAG_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TAG_PEER_MCS_RATE_INFO,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TAG_MU_REPORT_TOTAL_MU,
+ WMI_TAG_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_ROAM_SET_MBO,
+ WMI_TAG_MIB_STATS_ENABLE_CMD,
+ WMI_TAG_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TAG_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TAG_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TAG_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TAG_NDI_GET_CAP_REQ,
+ WMI_TAG_NDP_INITIATOR_REQ,
+ WMI_TAG_NDP_RESPONDER_REQ,
+ WMI_TAG_NDP_END_REQ,
+ WMI_TAG_NDI_CAP_RSP_EVENT,
+ WMI_TAG_NDP_INITIATOR_RSP_EVENT,
+ WMI_TAG_NDP_RESPONDER_RSP_EVENT,
+ WMI_TAG_NDP_END_RSP_EVENT,
+ WMI_TAG_NDP_INDICATION_EVENT,
+ WMI_TAG_NDP_CONFIRM_EVENT,
+ WMI_TAG_NDP_END_INDICATION_EVENT,
+ WMI_TAG_VDEV_SET_QUIET_CMD,
+ WMI_TAG_PDEV_SET_PCL_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_COEX_CONFIG_CMD,
+ WMI_TAG_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TAG_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TAG_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TAG_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TAG_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TAG_MAC_PHY_CAPABILITIES,
+ WMI_TAG_HW_MODE_CAPABILITIES,
+ WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TAG_HAL_REG_CAPABILITIES_EXT,
+ WMI_TAG_SOC_HAL_REG_CAPABILITIES,
+ WMI_TAG_VDEV_WISA_CMD,
+ WMI_TAG_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TAG_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TAG_NDP_END_RSP_PER_NDI,
+ WMI_TAG_PEER_BWF_REQUEST,
+ WMI_TAG_BWF_PEER_INFO,
+ WMI_TAG_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TAG_RMC_SET_LEADER_CMD,
+ WMI_TAG_RMC_MANUAL_LEADER_EVENT,
+ WMI_TAG_PER_CHAIN_RSSI_STATS,
+ WMI_TAG_RSSI_STATS,
+ WMI_TAG_P2P_LO_START_CMD,
+ WMI_TAG_P2P_LO_STOP_CMD,
+ WMI_TAG_P2P_LO_STOPPED_EVENT,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TAG_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TAG_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TAG_READ_DATA_FROM_FLASH_CMD,
+ WMI_TAG_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TAG_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TAG_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TAG_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TAG_TLV_BUF_LEN_PARAM,
+ WMI_TAG_SERVICE_AVAILABLE_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TAG_PEER_ANTDIV_INFO_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO,
+ WMI_TAG_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TAG_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TAG_MNT_FILTER_CMD,
+ WMI_TAG_GET_CHIP_POWER_STATS_CMD,
+ WMI_TAG_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TAG_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TAG_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TAG_CHAN_CCA_STATS,
+ WMI_TAG_PEER_SIGNAL_STATS,
+ WMI_TAG_TX_STATS,
+ WMI_TAG_PEER_AC_TX_STATS,
+ WMI_TAG_RX_STATS,
+ WMI_TAG_PEER_AC_RX_STATS,
+ WMI_TAG_REPORT_STATS_EVENT,
+ WMI_TAG_CHAN_CCA_STATS_THRESH,
+ WMI_TAG_PEER_SIGNAL_STATS_THRESH,
+ WMI_TAG_TX_STATS_THRESH,
+ WMI_TAG_RX_STATS_THRESH,
+ WMI_TAG_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TAG_REQUEST_WLAN_STATS_CMD,
+ WMI_TAG_RX_AGGR_FAILURE_EVENT,
+ WMI_TAG_RX_AGGR_FAILURE_INFO,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TAG_PDEV_BAND_TO_MAC,
+ WMI_TAG_TBTT_OFFSET_INFO,
+ WMI_TAG_TBTT_OFFSET_EXT_EVENT,
+ WMI_TAG_SAR_LIMITS_CMD,
+ WMI_TAG_SAR_LIMIT_CMD_ROW,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TAG_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TAG_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TAG_VENDOR_OUI,
+ WMI_TAG_REQUEST_RCPI_CMD,
+ WMI_TAG_UPDATE_RCPI_EVENT,
+ WMI_TAG_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TAG_PEER_STATS_INFO,
+ WMI_TAG_PEER_STATS_INFO_EVENT,
+ WMI_TAG_PKGID_EVENT,
+ WMI_TAG_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+ WMI_TAG_REGULATORY_RULE_STRUCT,
+ WMI_TAG_REG_CHAN_LIST_CC_EVENT,
+ WMI_TAG_11D_SCAN_START_CMD,
+ WMI_TAG_11D_SCAN_STOP_CMD,
+ WMI_TAG_11D_NEW_COUNTRY_EVENT,
+ WMI_TAG_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TAG_RADIO_CHAN_STATS,
+ WMI_TAG_RADIO_CHAN_STATS_EVENT,
+ WMI_TAG_ROAM_PER_CONFIG,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TAG_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TAG_HW_DATA_FILTER_CMD,
+ WMI_TAG_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TAG_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TAG_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TAG_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TAG_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TAG_VDEV_SET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TAG_IFACE_OFFLOAD_STATS,
+ WMI_TAG_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_RSSI_CTL_EXT,
+ WMI_TAG_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TAG_COEX_BT_ACTIVITY_EVENT,
+ WMI_TAG_VDEV_GET_TX_POWER_CMD,
+ WMI_TAG_VDEV_TX_POWER_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TAG_TX_SEND_PARAMS,
+ WMI_TAG_HE_RATE_SET,
+ WMI_TAG_CONGESTION_STATS,
+ WMI_TAG_SET_INIT_COUNTRY_CMD,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TAG_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TAG_THERM_THROT_CONFIG_REQUEST,
+ WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TAG_THERM_THROT_STATS_EVENT,
+ WMI_TAG_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TAG_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TAG_OEM_DMA_RING_CAPABILITIES,
+ WMI_TAG_OEM_DMA_RING_CFG_REQ,
+ WMI_TAG_OEM_DMA_RING_CFG_RSP,
+ WMI_TAG_OEM_INDIRECT_DATA,
+ WMI_TAG_OEM_DMA_BUF_RELEASE,
+ WMI_TAG_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TAG_ROAM_LCA_DISALLOW_CONFIG,
+ WMI_TAG_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TAG_ROAM_RSSI_REJECTION_OCE_CONFIG,
+ WMI_TAG_UNIT_TEST_EVENT,
+ WMI_TAG_ROAM_FILS_OFFLOAD,
+ WMI_TAG_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TAG_PMK_CACHE,
+ WMI_TAG_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TAG_ROAM_FILS_SYNCH,
+ WMI_TAG_GTK_OFFLOAD_EXTENDED,
+ WMI_TAG_ROAM_BG_SCAN_ROAMING,
+ WMI_TAG_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TAG_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TAG_OIC_PING_HANDOFF_EVENT,
+ WMI_TAG_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TAG_DHCP_LEASE_RENEW_EVENT,
+ WMI_TAG_BTM_CONFIG,
+ WMI_TAG_DEBUG_MESG_FW_DATA_STALL,
+ WMI_TAG_WLM_CONFIG_CMD,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TAG_ROAM_CND_SCORING_PARAM,
+ WMI_TAG_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TAG_VENDOR_OUI_EXT,
+ WMI_TAG_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TAG_FD_SEND_FROM_HOST_CMD,
+ WMI_TAG_ENABLE_FILS_CMD,
+ WMI_TAG_HOST_SWFDA_EVENT,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+ WMI_TAG_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMD,
+ WMI_TAG_STATS_PERIOD,
+ WMI_TAG_NDL_SCHEDULE_UPDATE,
+ WMI_TAG_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMD,
+ WMI_TAG_MSDUQ_QDEPTH_THRESH_UPDATE,
+ WMI_TAG_PDEV_SET_RX_FILTER_PROMISCUOUS_CMD,
+ WMI_TAG_SAR2_RESULT_EVENT,
+ WMI_TAG_SAR_CAPABILITIES,
+ WMI_TAG_SAP_OBSS_DETECTION_CFG_CMD,
+ WMI_TAG_SAP_OBSS_DETECTION_INFO_EVT,
+ WMI_TAG_DMA_RING_CAPABILITIES,
+ WMI_TAG_DMA_RING_CFG_REQ,
+ WMI_TAG_DMA_RING_CFG_RSP,
+ WMI_TAG_DMA_BUF_RELEASE,
+ WMI_TAG_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_SAR_GET_LIMITS_CMD,
+ WMI_TAG_SAR_GET_LIMITS_EVENT,
+ WMI_TAG_SAR_GET_LIMITS_EVENT_ROW,
+ WMI_TAG_OFFLOAD_11K_REPORT,
+ WMI_TAG_INVOKE_NEIGHBOR_REPORT,
+ WMI_TAG_NEIGHBOR_REPORT_OFFLOAD,
+ WMI_TAG_VDEV_SET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_VDEV_GET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_BPF_SET_VDEV_ENABLE_CMD,
+ WMI_TAG_BPF_SET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_RESP_EVT,
+ WMI_TAG_PDEV_GET_NFCAL_POWER,
+ WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+ WMI_TAG_OBSS_COLOR_COLLISION_EVT,
+ WMI_TAG_RUNTIME_DPD_RECAL_CMD,
+ WMI_TAG_TWT_ENABLE_CMD,
+ WMI_TAG_TWT_DISABLE_CMD,
+ WMI_TAG_TWT_ADD_DIALOG_CMD,
+ WMI_TAG_TWT_DEL_DIALOG_CMD,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD,
+ WMI_TAG_TWT_ENABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_DISABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_DEL_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_PAUSE_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_RESUME_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_REQUEST_ROAM_SCAN_STATS_CMD,
+ WMI_TAG_ROAM_SCAN_STATS_EVENT,
+ WMI_TAG_PEER_TID_CONFIGURATIONS_CMD,
+ WMI_TAG_VDEV_SET_CUSTOM_SW_RETRY_TH_CMD,
+ WMI_TAG_GET_TPC_POWER_CMD,
+ WMI_TAG_GET_TPC_POWER_EVENT,
+ WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA,
+ WMI_TAG_MOTION_DET_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_EVENT,
+ WMI_TAG_MOTION_DET_BASE_LINE_EVENT,
+ WMI_TAG_NDP_TRANSPORT_IP,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+ WMI_TAG_ESP_ESTIMATE_EVENT,
+ WMI_TAG_NAN_HOST_CONFIG,
+ WMI_TAG_SPECTRAL_BIN_SCALING_PARAMS,
+ WMI_TAG_PEER_CFR_CAPTURE_CMD,
+ WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD,
+ WMI_TAG_CHAN_WIDTH_PEER_LIST,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMD,
+ WMI_TAG_PDEV_HE_TB_ACTION_FRM_CMD,
+ WMI_TAG_PEER_EXTD2_STATS,
+ WMI_TAG_HPCS_PULSE_START_CMD,
+ WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT,
+ WMI_TAG_VDEV_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMD,
+ WMI_TAG_NAN_EVENT_INFO,
+ WMI_TAG_NDP_CHANNEL_INFO,
+ WMI_TAG_NDP_CMD,
+ WMI_TAG_NDP_EVENT,
+ /* TODO add all the missing cmds */
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+ WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
+ WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F,
+ WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
+ WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD = 1,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD = 2,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD = 3,
+ WMI_TLV_SERVICE_STA_PWRSAVE = 4,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE = 5,
+ WMI_TLV_SERVICE_AP_UAPSD = 6,
+ WMI_TLV_SERVICE_AP_DFS = 7,
+ WMI_TLV_SERVICE_11AC = 8,
+ WMI_TLV_SERVICE_BLOCKACK = 9,
+ WMI_TLV_SERVICE_PHYERR = 10,
+ WMI_TLV_SERVICE_BCN_FILTER = 11,
+ WMI_TLV_SERVICE_RTT = 12,
+ WMI_TLV_SERVICE_WOW = 13,
+ WMI_TLV_SERVICE_RATECTRL_CACHE = 14,
+ WMI_TLV_SERVICE_IRAM_TIDS = 15,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD = 16,
+ WMI_TLV_SERVICE_NLO = 17,
+ WMI_TLV_SERVICE_GTK_OFFLOAD = 18,
+ WMI_TLV_SERVICE_SCAN_SCH = 19,
+ WMI_TLV_SERVICE_CSA_OFFLOAD = 20,
+ WMI_TLV_SERVICE_CHATTER = 21,
+ WMI_TLV_SERVICE_COEX_FREQAVOID = 22,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE = 23,
+ WMI_TLV_SERVICE_FORCE_FW_HANG = 24,
+ WMI_TLV_SERVICE_GPIO = 25,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26,
+ WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27,
+ WMI_STA_UAPSD_VAR_AUTO_TRIG = 28,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE = 29,
+ WMI_TLV_SERVICE_TX_ENCAP = 30,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31,
+ WMI_TLV_SERVICE_EARLY_RX = 32,
+ WMI_TLV_SERVICE_STA_SMPS = 33,
+ WMI_TLV_SERVICE_FWTEST = 34,
+ WMI_TLV_SERVICE_STA_WMMAC = 35,
+ WMI_TLV_SERVICE_TDLS = 36,
+ WMI_TLV_SERVICE_BURST = 37,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS = 39,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT = 40,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41,
+ WMI_TLV_SERVICE_WLAN_HB = 42,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT = 43,
+ WMI_TLV_SERVICE_BATCH_SCAN = 44,
+ WMI_TLV_SERVICE_QPOWER = 45,
+ WMI_TLV_SERVICE_PLMREQ = 46,
+ WMI_TLV_SERVICE_THERMAL_MGMT = 47,
+ WMI_TLV_SERVICE_RMC = 48,
+ WMI_TLV_SERVICE_MHF_OFFLOAD = 49,
+ WMI_TLV_SERVICE_COEX_SAR = 50,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE = 51,
+ WMI_TLV_SERVICE_NAN = 52,
+ WMI_TLV_SERVICE_L1SS_STAT = 53,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED = 54,
+ WMI_TLV_SERVICE_OBSS_SCAN = 55,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN = 56,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA = 57,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA = 58,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE = 59,
+ WMI_TLV_SERVICE_LPASS = 60,
+ WMI_TLV_SERVICE_EXTSCAN = 61,
+ WMI_TLV_SERVICE_D0WOW = 62,
+ WMI_TLV_SERVICE_HSOFFLOAD = 63,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD = 64,
+ WMI_TLV_SERVICE_RX_FULL_REORDER = 65,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD = 66,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD = 68,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD = 69,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70,
+ WMI_TLV_SERVICE_OCB = 71,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD = 72,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD = 74,
+ WMI_TLV_SERVICE_MGMT_TX_HTT = 75,
+ WMI_TLV_SERVICE_MGMT_TX_WMI = 76,
+ WMI_TLV_SERVICE_EXT_MSG = 77,
+ WMI_TLV_SERVICE_MAWC = 78,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF = 79,
+ WMI_TLV_SERVICE_EGAP = 80,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD = 81,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY = 82,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA = 83,
+ WMI_TLV_SERVICE_ATF = 84,
+ WMI_TLV_SERVICE_COEX_GPIO = 85,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF = 86,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF = 87,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64 = 88,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH = 89,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT = 90,
+ WMI_TLV_SERVICE_BPF_OFFLOAD = 91,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS = 92,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95,
+ WMI_TLV_SERVICE_NAN_DATA = 96,
+ WMI_TLV_SERVICE_NAN_RTT = 97,
+ WMI_TLV_SERVICE_11AX = 98,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE = 99,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER = 101,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102,
+ WMI_TLV_SERVICE_MESH_11S = 103,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER = 105,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER = 109,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT = 110,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT = 111,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD = 113,
+ WMI_TLV_SERVICE_RCPI_SUPPORT = 114,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT = 115,
+ WMI_TLV_SERVICE_PEER_STATS_INFO = 116,
+ WMI_TLV_SERVICE_REGULATORY_DB = 117,
+ WMI_TLV_SERVICE_11D_OFFLOAD = 118,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING = 119,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART = 120,
+ WMI_TLV_SERVICE_PKT_ROUTING = 121,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION = 122,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI = 123,
+ WMI_TLV_SERVICE_8SS_TX_BFEE = 124,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT = 125,
+ WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+
+ WMI_MAX_SERVICE = 128,
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT = 129,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT = 130,
+ WMI_TLV_SERVICE_FILS_SUPPORT = 131,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD = 132,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW = 133,
+ WMI_TLV_SERVICE_MAWC_SUPPORT = 134,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG = 135,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT = 136,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT = 137,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT = 138,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT = 139,
+ WMI_TLV_SERVICE_THERM_THROT = 140,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT = 141,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN = 142,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+ WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+ WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+ WMI_TLV_SERVICE_MOTION_DET = 160,
+ WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+ WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+ WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+ WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+ WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+ WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+ WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+ WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+ WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+ WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+ WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+ WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+ WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
+ WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180,
+ WMI_TLV_SERVICE_FETCH_TX_PN = 181,
+ WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182,
+ WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183,
+ WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184,
+ WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185,
+ WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186,
+ WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187,
+ WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188,
+ WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189,
+ WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190,
+ WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191,
+ WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192,
+ WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193,
+ WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194,
+ WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195,
+ WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196,
+ WMI_TLV_SERVICE_VOW_ENABLE = 197,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198,
+ WMI_TLV_SERVICE_BROADCAST_TWT = 199,
+ WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200,
+ WMI_TLV_SERVICE_PS_TDCC = 201,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY = 202,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203,
+ WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204,
+ WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205,
+ WMI_TLV_SERVICE_WPA3_FT_FILS = 206,
+ WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207,
+ WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208,
+ WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209,
+ WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210,
+ WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
+ WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
+ WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
+ WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
+ WMI_TLV_SERVICE_EXT2_MSG = 220,
+
+ WMI_MAX_EXT_SERVICE
+};
+
+enum {
+ WMI_SMPS_FORCED_MODE_NONE = 0,
+ WMI_SMPS_FORCED_MODE_DISABLED,
+ WMI_SMPS_FORCED_MODE_STATIC,
+ WMI_SMPS_FORCED_MODE_DYNAMIC
+};
+
+enum wmi_tpc_chainmask {
+ WMI_TPC_CHAINMASK_CONFIG_BAND_2G = 0,
+ WMI_TPC_CHAINMASK_CONFIG_BAND_5G = 1,
+ WMI_NUM_SUPPORTED_BAND_MAX = 2,
+};
+
+enum wmi_peer_param {
+ WMI_PEER_MIMO_PS_STATE = 1,
+ WMI_PEER_AMPDU = 2,
+ WMI_PEER_AUTHORIZE = 3,
+ WMI_PEER_CHWIDTH = 4,
+ WMI_PEER_NSS = 5,
+ WMI_PEER_USE_4ADDR = 6,
+ WMI_PEER_MEMBERSHIP = 7,
+ WMI_PEER_USERPOS = 8,
+ WMI_PEER_CRIT_PROTO_HINT_ENABLED = 9,
+ WMI_PEER_TX_FAIL_CNT_THR = 10,
+ WMI_PEER_SET_HW_RETRY_CTS2S = 11,
+ WMI_PEER_IBSS_ATIM_WINDOW_LENGTH = 12,
+ WMI_PEER_PHYMODE = 13,
+ WMI_PEER_USE_FIXED_PWR = 14,
+ WMI_PEER_PARAM_FIXED_RATE = 15,
+ WMI_PEER_SET_MU_WHITELIST = 16,
+ WMI_PEER_SET_MAX_TX_RATE = 17,
+ WMI_PEER_SET_MIN_TX_RATE = 18,
+ WMI_PEER_SET_DEFAULT_ROUTING = 19,
+};
+
+enum wmi_slot_time {
+ WMI_VDEV_SLOT_TIME_LONG = 1,
+ WMI_VDEV_SLOT_TIME_SHORT = 2,
+};
+
+enum wmi_preamble {
+ WMI_VDEV_PREAMBLE_LONG = 1,
+ WMI_VDEV_PREAMBLE_SHORT = 2,
+};
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0,
+ WMI_PEER_SMPS_STATIC = 1,
+ WMI_PEER_SMPS_DYNAMIC = 2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_direct_buffer_module {
+ WMI_DIRECT_BUF_SPECTRAL = 0,
+ WMI_DIRECT_BUF_CFR = 1,
+
+ /* keep it last */
+ WMI_DIRECT_BUF_MAX
+};
+
+struct ath12k_wmi_pdev_band_arg {
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+};
+
+struct ath12k_wmi_ppe_threshold_arg {
+ u32 numss_m1;
+ u32 ru_bit_mask;
+ u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+};
+
+#define PSOC_HOST_MAX_PHY_SIZE (3)
+#define ATH12K_11B_SUPPORT BIT(0)
+#define ATH12K_11G_SUPPORT BIT(1)
+#define ATH12K_11A_SUPPORT BIT(2)
+#define ATH12K_11N_SUPPORT BIT(3)
+#define ATH12K_11AC_SUPPORT BIT(4)
+#define ATH12K_11AX_SUPPORT BIT(5)
+
+struct ath12k_wmi_hal_reg_capabilities_ext_arg {
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+#define WMI_HOST_MAX_PDEV 3
+
+struct ath12k_wmi_host_mem_chunk_params {
+ __le32 tlv_header;
+ __le32 req_id;
+ __le32 ptr;
+ __le32 size;
+} __packed;
+
+struct ath12k_wmi_host_mem_chunk_arg {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct ath12k_wmi_resource_config_arg {
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_active_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 max_peer_ext_stats;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 rx_batchmode;
+ u32 tt_support;
+ u32 atf_config;
+ u32 iphdr_pad_config;
+ u32 qwrap_config:16,
+ alloc_frag_desc_for_data_pkt:16;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 peer_map_unmap_version;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+};
+
+struct ath12k_wmi_init_cmd_arg {
+ struct ath12k_wmi_resource_config_arg res_cfg;
+ u8 num_mem_chunks;
+ struct ath12k_wmi_host_mem_chunk_arg *mem_chunks;
+ u32 hw_mode_id;
+ u32 num_band_to_mac;
+ struct ath12k_wmi_pdev_band_arg band_to_mac[WMI_HOST_MAX_PDEV];
+};
+
+struct ath12k_wmi_pdev_band_to_mac_params {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 start_freq;
+ __le32 end_freq;
+} __packed;
+
+/* This is both individual command WMI_PDEV_SET_HW_MODE_CMDID and also part
+ * of WMI_TAG_INIT_CMD.
+ */
+struct ath12k_wmi_pdev_set_hw_mode_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 hw_mode_index;
+ __le32 num_band_to_mac;
+} __packed;
+
+struct ath12k_wmi_ppe_threshold_params {
+ __le32 numss_m1; /** NSS - 1*/
+ __le32 ru_info;
+ __le32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+} __packed;
+
+#define HW_BD_INFO_SIZE 5
+
+struct ath12k_wmi_abi_version_params {
+ __le32 abi_version_0;
+ __le32 abi_version_1;
+ __le32 abi_version_ns_0;
+ __le32 abi_version_ns_1;
+ __le32 abi_version_ns_2;
+ __le32 abi_version_ns_3;
+} __packed;
+
+struct wmi_init_cmd {
+ __le32 tlv_header;
+ struct ath12k_wmi_abi_version_params host_abi_vers;
+ __le32 num_host_mem_chunks;
+} __packed;
+
+#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
+
+struct ath12k_wmi_resource_config_params {
+ __le32 tlv_header;
+ __le32 num_vdevs;
+ __le32 num_peers;
+ __le32 num_offload_peers;
+ __le32 num_offload_reorder_buffs;
+ __le32 num_peer_keys;
+ __le32 num_tids;
+ __le32 ast_skid_limit;
+ __le32 tx_chain_mask;
+ __le32 rx_chain_mask;
+ __le32 rx_timeout_pri[4];
+ __le32 rx_decap_mode;
+ __le32 scan_max_pending_req;
+ __le32 bmiss_offload_max_vdev;
+ __le32 roam_offload_max_vdev;
+ __le32 roam_offload_max_ap_profiles;
+ __le32 num_mcast_groups;
+ __le32 num_mcast_table_elems;
+ __le32 mcast2ucast_mode;
+ __le32 tx_dbg_log_size;
+ __le32 num_wds_entries;
+ __le32 dma_burst_size;
+ __le32 mac_aggr_delim;
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+ __le32 vow_config;
+ __le32 gtk_offload_max_vdev;
+ __le32 num_msdu_desc;
+ __le32 max_frag_entries;
+ __le32 num_tdls_vdevs;
+ __le32 num_tdls_conn_table_entries;
+ __le32 beacon_tx_offload_max_vdev;
+ __le32 num_multicast_filter_entries;
+ __le32 num_wow_filters;
+ __le32 num_keep_alive_pattern;
+ __le32 keep_alive_pattern_size;
+ __le32 max_tdls_concurrent_sleep_sta;
+ __le32 max_tdls_concurrent_buffer_sta;
+ __le32 wmi_send_separate;
+ __le32 num_ocb_vdevs;
+ __le32 num_ocb_channels;
+ __le32 num_ocb_schedules;
+ __le32 flag1;
+ __le32 smart_ant_cap;
+ __le32 bk_minfree;
+ __le32 be_minfree;
+ __le32 vi_minfree;
+ __le32 vo_minfree;
+ __le32 alloc_frag_desc_for_data_pkt;
+ __le32 num_ns_ext_tuples_cfg;
+ __le32 bpf_instruction_size;
+ __le32 max_bssid_rx_filters;
+ __le32 use_pdev_id;
+ __le32 max_num_dbs_scan_duty_cycle;
+ __le32 max_num_group_keys;
+ __le32 peer_map_unmap_version;
+ __le32 sched_params;
+ __le32 twt_ap_pdev_count;
+ __le32 twt_ap_sta_count;
+ __le32 max_nlo_ssids;
+ __le32 num_pkt_filters;
+ __le32 num_max_sta_vdevs;
+ __le32 max_bssid_indicator;
+ __le32 ul_resp_config;
+ __le32 msdu_flow_override_config0;
+ __le32 msdu_flow_override_config1;
+ __le32 flags2;
+ __le32 host_service_flags;
+ __le32 max_rnr_neighbours;
+ __le32 ema_max_vap_cnt;
+ __le32 ema_max_profile_period;
+} __packed;
+
+struct wmi_service_ready_event {
+ __le32 fw_build_vers;
+ struct ath12k_wmi_abi_version_params fw_abi_vers;
+ __le32 phy_capability;
+ __le32 max_frag_entry;
+ __le32 num_rf_chains;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 vht_supp_mcs;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable;
+ __le32 max_bcn_ie_size;
+ __le32 num_mem_reqs;
+ __le32 max_num_scan_channels;
+ __le32 hw_bd_id;
+ __le32 hw_bd_info[HW_BD_INFO_SIZE];
+ __le32 max_supported_macs;
+ __le32 wmi_fw_sub_feat_caps;
+ __le32 num_dbs_hw_modes;
+ /* txrx_chainmask
+ * [7:0] - 2G band tx chain mask
+ * [15:8] - 2G band rx chain mask
+ * [23:16] - 5G band tx chain mask
+ * [31:24] - 5G band rx chain mask
+ */
+ __le32 txrx_chainmask;
+ __le32 default_dbs_hw_mode_index;
+ __le32 num_msdu_desc;
+} __packed;
+
+#define WMI_SERVICE_BM_SIZE ((WMI_MAX_SERVICE + sizeof(u32) - 1) / sizeof(u32))
+
+#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x u32 = 128 bits */
+#define WMI_SERVICE_EXT_BM_SIZE (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32))
+#define WMI_AVAIL_SERVICE_BITS_IN_SIZE32 32
+#define WMI_SERVICE_BITS_IN_SIZE32 4
+
+struct wmi_service_ready_ext_event {
+ __le32 default_conc_scan_config_bits;
+ __le32 default_fw_config_bits;
+ struct ath12k_wmi_ppe_threshold_params ppet;
+ __le32 he_cap_info;
+ __le32 mpdu_density;
+ __le32 max_bssid_rx_filters;
+ __le32 fw_build_vers_ext;
+ __le32 max_nlo_ssids;
+ __le32 max_bssid_indicator;
+ __le32 he_cap_info_ext;
+} __packed;
+
+struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params {
+ __le32 num_hw_modes;
+ __le32 num_chainmask_tables;
+} __packed;
+
+struct ath12k_wmi_hw_mode_cap_params {
+ __le32 tlv_header;
+ __le32 hw_mode_id;
+ __le32 phy_id_map;
+ __le32 hw_mode_config_type;
+} __packed;
+
+#define WMI_MAX_HECAP_PHY_SIZE (3)
+
+struct ath12k_wmi_mac_phy_caps_params {
+ __le32 hw_mode_id;
+ __le32 pdev_id;
+ __le32 phy_id;
+ __le32 supported_flags;
+ __le32 supported_bands;
+ __le32 ampdu_density;
+ __le32 max_bw_supported_2g;
+ __le32 ht_cap_info_2g;
+ __le32 vht_cap_info_2g;
+ __le32 vht_supp_mcs_2g;
+ __le32 he_cap_info_2g;
+ __le32 he_supp_mcs_2g;
+ __le32 tx_chain_mask_2g;
+ __le32 rx_chain_mask_2g;
+ __le32 max_bw_supported_5g;
+ __le32 ht_cap_info_5g;
+ __le32 vht_cap_info_5g;
+ __le32 vht_supp_mcs_5g;
+ __le32 he_cap_info_5g;
+ __le32 he_supp_mcs_5g;
+ __le32 tx_chain_mask_5g;
+ __le32 rx_chain_mask_5g;
+ __le32 he_cap_phy_info_2g[WMI_MAX_HECAP_PHY_SIZE];
+ __le32 he_cap_phy_info_5g[WMI_MAX_HECAP_PHY_SIZE];
+ struct ath12k_wmi_ppe_threshold_params he_ppet2g;
+ struct ath12k_wmi_ppe_threshold_params he_ppet5g;
+ __le32 chainmask_table_id;
+ __le32 lmac_id;
+ __le32 he_cap_info_2g_ext;
+ __le32 he_cap_info_5g_ext;
+ __le32 he_cap_info_internal;
+} __packed;
+
+struct ath12k_wmi_hal_reg_caps_ext_params {
+ __le32 tlv_header;
+ __le32 phy_id;
+ __le32 eeprom_reg_domain;
+ __le32 eeprom_reg_domain_ext;
+ __le32 regcap1;
+ __le32 regcap2;
+ __le32 wireless_modes;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
+ __le32 low_5ghz_chan;
+ __le32 high_5ghz_chan;
+} __packed;
+
+struct ath12k_wmi_soc_hal_reg_caps_params {
+ __le32 num_phy;
+} __packed;
+
+/* 2 word representation of MAC addr */
+struct ath12k_wmi_mac_addr_params {
+ u8 addr[ETH_ALEN];
+ u8 padding[2];
+} __packed;
+
+struct ath12k_wmi_dma_ring_caps_params {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 module_id;
+ __le32 min_elem;
+ __le32 min_buf_sz;
+ __le32 min_buf_align;
+} __packed;
+
+struct ath12k_wmi_ready_event_min_params {
+ struct ath12k_wmi_abi_version_params fw_abi_vers;
+ struct ath12k_wmi_mac_addr_params mac_addr;
+ __le32 status;
+ __le32 num_dscp_table;
+ __le32 num_extra_mac_addr;
+ __le32 num_total_peers;
+ __le32 num_extra_peers;
+} __packed;
+
+struct wmi_ready_event {
+ struct ath12k_wmi_ready_event_min_params ready_event_min;
+ __le32 max_ast_index;
+ __le32 pktlog_defs_checksum;
+} __packed;
+
+struct wmi_service_available_event {
+ __le32 wmi_service_segment_offset;
+ __le32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
+} __packed;
+
+struct ath12k_wmi_vdev_create_arg {
+ u8 if_id;
+ u32 type;
+ u32 subtype;
+ struct {
+ u8 tx;
+ u8 rx;
+ } chains[NUM_NL80211_BANDS];
+ u32 pdev_id;
+ u8 if_stats_id;
+};
+
+#define ATH12K_MAX_VDEV_STATS_ID 0x30
+#define ATH12K_INVAL_VDEV_STATS_ID 0xFF
+
+struct wmi_vdev_create_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 vdev_type;
+ __le32 vdev_subtype;
+ struct ath12k_wmi_mac_addr_params vdev_macaddr;
+ __le32 num_cfg_txrx_streams;
+ __le32 pdev_id;
+ __le32 vdev_stats_id;
+} __packed;
+
+struct ath12k_wmi_vdev_txrx_streams_params {
+ __le32 tlv_header;
+ u32 band;
+ u32 supported_tx_streams;
+ u32 supported_rx_streams;
+} __packed;
+
+struct wmi_vdev_delete_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 vdev_assoc_id;
+ struct ath12k_wmi_mac_addr_params vdev_bssid;
+ struct ath12k_wmi_mac_addr_params trans_bssid;
+ __le32 profile_idx;
+ __le32 profile_num;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+} __packed;
+
+#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
+#define WMI_VDEV_START_PMF_ENABLED BIT(1)
+#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+
+#define ATH12K_WMI_SSID_LEN 32
+
+struct ath12k_wmi_ssid_params {
+ __le32 ssid_len;
+ u8 ssid[ATH12K_WMI_SSID_LEN];
+} __packed;
+
+#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ)
+
+struct wmi_vdev_start_request_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 requestor_id;
+ __le32 beacon_interval;
+ __le32 dtim_period;
+ __le32 flags;
+ struct ath12k_wmi_ssid_params ssid;
+ __le32 bcn_tx_rate;
+ __le32 bcn_txpower;
+ __le32 num_noa_descriptors;
+ __le32 disable_hw_ack;
+ __le32 preferred_tx_streams;
+ __le32 preferred_rx_streams;
+ __le32 he_ops;
+ __le32 cac_duration_ms;
+ __le32 regdomain;
+} __packed;
+
+#define MGMT_TX_DL_FRM_LEN 64
+
+struct ath12k_wmi_channel_arg {
+ u8 chan_id;
+ u8 pwr;
+ u32 mhz;
+ u32 half_rate:1,
+ quarter_rate:1,
+ dfs_set:1,
+ dfs_set_cfreq2:1,
+ is_chan_passive:1,
+ allow_ht:1,
+ allow_vht:1,
+ allow_he:1,
+ set_agile:1,
+ psc_channel:1;
+ u32 phy_mode;
+ u32 cfreq1;
+ u32 cfreq2;
+ char maxpower;
+ char minpower;
+ char maxregpower;
+ u8 antennamax;
+ u8 reg_class_id;
+};
+
+enum wmi_phy_mode {
+ MODE_11A = 0,
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4,
+ MODE_11NG_HT20 = 5,
+ MODE_11NA_HT40 = 6,
+ MODE_11NG_HT40 = 7,
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_11AX_HE20 = 16,
+ MODE_11AX_HE40 = 17,
+ MODE_11AX_HE80 = 18,
+ MODE_11AX_HE80_80 = 19,
+ MODE_11AX_HE160 = 20,
+ MODE_11AX_HE20_2G = 21,
+ MODE_11AX_HE40_2G = 22,
+ MODE_11AX_HE80_2G = 23,
+ MODE_UNKNOWN = 24,
+ MODE_MAX = 24
+};
+
+struct wmi_vdev_start_req_arg {
+ u32 vdev_id;
+ u32 freq;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ bool freq2_radar;
+ bool allow_he;
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ u32 max_antenna_gain;
+ enum wmi_phy_mode mode;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+ u32 pref_rx_streams;
+ u32 pref_tx_streams;
+ u32 num_noa_descriptors;
+};
+
+struct ath12k_wmi_peer_create_arg {
+ const u8 *peer_addr;
+ u32 peer_type;
+ u32 vdev_id;
+};
+
+struct ath12k_wmi_pdev_set_regdomain_arg {
+ u16 current_rd_in_use;
+ u16 current_rd_2g;
+ u16 current_rd_5g;
+ u32 ctl_2g;
+ u32 ctl_5g;
+ u8 dfs_domain;
+ u32 pdev_id;
+};
+
+struct ath12k_wmi_rx_reorder_queue_remove_arg {
+ u8 *peer_macaddr;
+ u16 vdev_id;
+ u32 peer_tid_bitmap;
+};
+
+#define WMI_HOST_PDEV_ID_SOC 0xFF
+#define WMI_HOST_PDEV_ID_0 0
+#define WMI_HOST_PDEV_ID_1 1
+#define WMI_HOST_PDEV_ID_2 2
+
+#define WMI_PDEV_ID_SOC 0
+#define WMI_PDEV_ID_1ST 1
+#define WMI_PDEV_ID_2ND 2
+#define WMI_PDEV_ID_3RD 3
+
+/* Freq units in MHz */
+#define REG_RULE_START_FREQ 0x0000ffff
+#define REG_RULE_END_FREQ 0xffff0000
+#define REG_RULE_FLAGS 0x0000ffff
+#define REG_RULE_MAX_BW 0x0000ffff
+#define REG_RULE_REG_PWR 0x00ff0000
+#define REG_RULE_ANT_GAIN 0xff000000
+#define REG_RULE_PSD_INFO BIT(2)
+#define REG_RULE_PSD_EIRP 0xffff0000
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define HECAP_PHYDWORD_0 0
+#define HECAP_PHYDWORD_1 1
+#define HECAP_PHYDWORD_2 2
+
+#define HECAP_PHY_SU_BFER BIT(31)
+#define HECAP_PHY_SU_BFEE BIT(0)
+#define HECAP_PHY_MU_BFER BIT(1)
+#define HECAP_PHY_UL_MUMIMO BIT(22)
+#define HECAP_PHY_UL_MUOFDMA BIT(23)
+
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_SU_BFER)
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_SU_BFEE)
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_MU_BFER)
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUMIMO)
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUOFDMA)
+
+#define HE_MODE_SU_TX_BFEE BIT(0)
+#define HE_MODE_SU_TX_BFER BIT(1)
+#define HE_MODE_MU_TX_BFEE BIT(2)
+#define HE_MODE_MU_TX_BFER BIT(3)
+#define HE_MODE_DL_OFDMA BIT(4)
+#define HE_MODE_UL_OFDMA BIT(5)
+#define HE_MODE_UL_MUMIMO BIT(6)
+
+#define HE_DL_MUOFDMA_ENABLE 1
+#define HE_UL_MUOFDMA_ENABLE 1
+#define HE_DL_MUMIMO_ENABLE 1
+#define HE_MU_BFEE_ENABLE 1
+#define HE_SU_BFEE_ENABLE 1
+
+#define HE_VHT_SOUNDING_MODE_ENABLE 1
+#define HE_SU_MU_SOUNDING_MODE_ENABLE 1
+#define HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE 1
+
+/* HE or VHT Sounding */
+#define HE_VHT_SOUNDING_MODE BIT(0)
+/* SU or MU Sounding */
+#define HE_SU_MU_SOUNDING_MODE BIT(2)
+/* Trig or Non-Trig Sounding */
+#define HE_TRIG_NONTRIG_SOUNDING_MODE BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0x700
+
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_create_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 peer_type;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+struct wmi_peer_reorder_queue_setup_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid;
+ __le32 queue_ptr_lo;
+ __le32 queue_ptr_hi;
+ __le32 queue_no;
+ __le32 ba_window_size_valid;
+ __le32 ba_window_size;
+} __packed;
+
+struct wmi_peer_reorder_queue_remove_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid_mask;
+} __packed;
+
+enum wmi_bss_chan_info_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_pdev_set_param_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_pdev_set_ps_mode_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 sta_ps_mode;
+} __packed;
+
+struct wmi_pdev_suspend_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 suspend_opt;
+} __packed;
+
+struct wmi_pdev_resume_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_req_cmd {
+ __le32 tlv_header;
+ /* ref wmi_bss_chan_info_req_type */
+ __le32 req_type;
+} __packed;
+
+struct wmi_ap_ps_peer_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct wmi_sta_powersave_param_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 reg_domain;
+ __le32 reg_domain_2g;
+ __le32 reg_domain_5g;
+ __le32 conformance_test_limit_2g;
+ __le32 conformance_test_limit_5g;
+ __le32 dfs_domain;
+} __packed;
+
+struct wmi_peer_set_param_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_dfs_phyerr_offload_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_bcn_offload_ctrl_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 bcn_ctrl_op;
+} __packed;
+
+enum scan_dwelltime_adaptive_mode {
+ SCAN_DWELL_MODE_DEFAULT = 0,
+ SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ SCAN_DWELL_MODE_MODERATE = 2,
+ SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ SCAN_DWELL_MODE_STATIC = 4
+};
+
+#define WLAN_SCAN_MAX_NUM_SSID 10
+#define WLAN_SCAN_MAX_NUM_BSSID 10
+#define WLAN_SCAN_MAX_NUM_CHANNELS 40
+
+struct ath12k_wmi_element_info_arg {
+ u32 len;
+ u8 *ptr;
+};
+
+#define WMI_IE_BITMAP_SIZE 8
+
+#define WMI_SCAN_MAX_NUM_SSID 0x0A
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHAN = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT = BIT(8),
+ WMI_SCAN_EVENT_SUSPENDED = BIT(9),
+ WMI_SCAN_EVENT_RESUMED = BIT(10),
+ WMI_SCAN_EVENT_MAX = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_start_scan_cmd {
+ __le32 tlv_header;
+ __le32 scan_id;
+ __le32 scan_req_id;
+ __le32 vdev_id;
+ __le32 scan_priority;
+ __le32 notify_scan_events;
+ __le32 dwell_time_active;
+ __le32 dwell_time_passive;
+ __le32 min_rest_time;
+ __le32 max_rest_time;
+ __le32 repeat_probe_time;
+ __le32 probe_spacing_time;
+ __le32 idle_time;
+ __le32 max_scan_time;
+ __le32 probe_delay;
+ __le32 scan_ctrl_flags;
+ __le32 burst_duration;
+ __le32 num_chan;
+ __le32 num_bssid;
+ __le32 num_ssids;
+ __le32 ie_len;
+ __le32 n_probes;
+ struct ath12k_wmi_mac_addr_params mac_addr;
+ struct ath12k_wmi_mac_addr_params mac_mask;
+ u32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ __le32 num_vendor_oui;
+ __le32 scan_ctrl_flags_ext;
+ __le32 dwell_time_active_2g;
+ __le32 dwell_time_active_6g;
+ __le32 dwell_time_passive_6g;
+ __le32 scan_start_offset;
+} __packed;
+
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+#define WMI_SCAN_FILTER_PROMISCUOS 0x100
+#define WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS 0x200
+#define WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ 0x400
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800
+#define WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ 0x1000
+#define WMI_SCAN_OFFCHAN_MGMT_TX 0x2000
+#define WMI_SCAN_OFFCHAN_DATA_TX 0x4000
+#define WMI_SCAN_CAPTURE_PHY_ERROR 0x8000
+#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000
+#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT 0x20000
+#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT 0x40000
+#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000
+#define WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ 0x100000
+
+#define WMI_SCAN_DWELL_MODE_MASK GENMASK(23, 21)
+
+enum {
+ WMI_SCAN_DWELL_MODE_DEFAULT = 0,
+ WMI_SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ WMI_SCAN_DWELL_MODE_MODERATE = 2,
+ WMI_SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ WMI_SCAN_DWELL_MODE_STATIC = 4,
+};
+
+struct ath12k_wmi_hint_short_ssid_arg {
+ u32 freq_flags;
+ u32 short_ssid;
+};
+
+struct ath12k_wmi_hint_bssid_arg {
+ u32 freq_flags;
+ struct ath12k_wmi_mac_addr_params bssid;
+};
+
+struct ath12k_wmi_scan_req_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 pdev_id;
+ enum wmi_scan_priority scan_priority;
+ union {
+ struct {
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
+ };
+ u32 scan_events;
+ };
+ u32 dwell_time_active;
+ u32 dwell_time_active_2g;
+ u32 dwell_time_passive;
+ u32 dwell_time_active_6g;
+ u32 dwell_time_passive_6g;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ union {
+ struct {
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
+ };
+ u32 scan_flags;
+ };
+ enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 n_probes;
+ u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS];
+ u32 notify_scan_events;
+ struct cfg80211_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
+ struct ath12k_wmi_mac_addr_params bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
+ struct ath12k_wmi_element_info_arg extraie;
+ u32 num_hint_s_ssid;
+ u32 num_hint_bssid;
+ struct ath12k_wmi_hint_short_ssid_arg hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID];
+ struct ath12k_wmi_hint_bssid_arg hint_bssid[WLAN_SCAN_MAX_HINT_BSSID];
+};
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u32 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+#define WMI_SCAN_STOP_ONE 0x00000000
+#define WMI_SCAN_STOP_VAP_ALL 0x01000000
+#define WMI_SCAN_STOP_ALL 0x04000000
+
+/* Prefix 0xA000 indicates that the scan request
+ * is trigger by HOST
+ */
+#define ATH12K_SCAN_ID 0xA000
+
+enum scan_cancel_req_type {
+ WLAN_SCAN_CANCEL_SINGLE = 1,
+ WLAN_SCAN_CANCEL_VDEV_ALL,
+ WLAN_SCAN_CANCEL_PDEV_ALL,
+};
+
+struct ath12k_wmi_scan_cancel_arg {
+ u32 requester;
+ u32 scan_id;
+ enum scan_cancel_req_type req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct wmi_bcn_send_from_host_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 data_len;
+ union {
+ __le32 frag_ptr;
+ __le32 frag_ptr_lo;
+ };
+ __le32 frame_ctrl;
+ __le32 dtim_flag;
+ __le32 bcn_antenna;
+ __le32 frag_ptr_hi;
+};
+
+#define WMI_CHAN_INFO_MODE GENMASK(5, 0)
+#define WMI_CHAN_INFO_HT40_PLUS BIT(6)
+#define WMI_CHAN_INFO_PASSIVE BIT(7)
+#define WMI_CHAN_INFO_ADHOC_ALLOWED BIT(8)
+#define WMI_CHAN_INFO_AP_DISABLED BIT(9)
+#define WMI_CHAN_INFO_DFS BIT(10)
+#define WMI_CHAN_INFO_ALLOW_HT BIT(11)
+#define WMI_CHAN_INFO_ALLOW_VHT BIT(12)
+#define WMI_CHAN_INFO_CHAN_CHANGE_CAUSE_CSA BIT(13)
+#define WMI_CHAN_INFO_HALF_RATE BIT(14)
+#define WMI_CHAN_INFO_QUARTER_RATE BIT(15)
+#define WMI_CHAN_INFO_DFS_FREQ2 BIT(16)
+#define WMI_CHAN_INFO_ALLOW_HE BIT(17)
+#define WMI_CHAN_INFO_PSC BIT(18)
+
+#define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8)
+#define WMI_CHAN_REG_INFO1_MAX_REG_PWR GENMASK(23, 16)
+#define WMI_CHAN_REG_INFO1_REG_CLS GENMASK(31, 24)
+
+#define WMI_CHAN_REG_INFO2_ANT_MAX GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO2_MAX_TX_PWR GENMASK(15, 8)
+
+struct ath12k_wmi_channel_params {
+ __le32 tlv_header;
+ __le32 mhz;
+ __le32 band_center_freq1;
+ __le32 band_center_freq2;
+ __le32 info;
+ __le32 reg_info_1;
+ __le32 reg_info_2;
+} __packed;
+
+enum wmi_sta_ps_mode {
+ WMI_STA_PS_MODE_DISABLED = 0,
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+#define WMI_SMPS_MASK_LOWER_16BITS 0xFF
+#define WMI_SMPS_MASK_UPPER_3BITS 0x7
+#define WMI_SMPS_PARAM_VALUE_SHIFT 29
+
+#define ATH12K_WMI_FW_HANG_ASSERT_TYPE 1
+#define ATH12K_WMI_FW_HANG_DELAY 0
+
+/* type, 0:unused 1: ASSERT 2: not respond detect command
+ * delay_time_ms, the simulate will delay time
+ */
+
+struct wmi_force_fw_hang_cmd {
+ __le32 tlv_header;
+ __le32 type;
+ __le32 delay_time_ms;
+} __packed;
+
+struct wmi_vdev_set_param_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_get_pdev_temperature_cmd {
+ __le32 tlv_header;
+ __le32 param;
+ __le32 pdev_id;
+} __packed;
+
+#define WMI_BEACON_TX_BUFFER_SIZE 512
+
+struct wmi_bcn_tmpl_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 tim_ie_offset;
+ __le32 buf_len;
+ __le32 csa_switch_count_offset;
+ __le32 ext_csa_switch_count_offset;
+ __le32 csa_event_bitmap;
+ __le32 mbssid_ie_offset;
+ __le32 esp_ie_offset;
+} __packed;
+
+struct wmi_vdev_install_key_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 key_idx;
+ __le32 key_flags;
+ __le32 key_cipher;
+ __le64 key_rsc_counter;
+ __le64 key_global_rsc_counter;
+ __le64 key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ __le32 key_len;
+ __le32 key_txmic_len;
+ __le32 key_rxmic_len;
+ __le32 is_group_key_id_valid;
+ __le32 group_key_id;
+
+ /* Followed by key_data containing key followed by
+ * tx mic and then rx mic
+ */
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u64 key_rsc_counter;
+ const void *key_data;
+};
+
+#define WMI_MAX_SUPPORTED_RATES 128
+#define WMI_HOST_MAX_HECAP_PHY_SIZE 3
+#define WMI_HOST_MAX_HE_RATE_SET 3
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80 0
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+
+struct wmi_rate_set_arg {
+ u32 num_rates;
+ u8 rates[WMI_MAX_SUPPORTED_RATES];
+};
+
+struct ath12k_wmi_peer_assoc_arg {
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u8 vht_capable;
+ u8 min_data_rate;
+ u32 tx_max_mcs_nss;
+ u32 peer_bw_rxnss_override;
+ bool is_pmf_enabled;
+ bool is_wme_set;
+ bool qos_flag;
+ bool apsd_flag;
+ bool ht_flag;
+ bool bw_40;
+ bool bw_80;
+ bool bw_160;
+ bool stbc_flag;
+ bool ldpc_flag;
+ bool static_mimops_flag;
+ bool dynamic_mimops_flag;
+ bool spatial_mux_flag;
+ bool vht_flag;
+ bool vht_ng_flag;
+ bool need_ptk_4_way;
+ bool need_gtk_2_way;
+ bool auth_flag;
+ bool safe_mode_enabled;
+ bool amsdu_disable;
+ /* Use common structure */
+ u8 peer_mac[ETH_ALEN];
+
+ bool he_flag;
+ u32 peer_he_cap_macinfo[2];
+ u32 peer_he_cap_macinfo_internal;
+ u32 peer_he_caps_6ghz;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs_count;
+ u32 peer_he_rx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ bool twt_responder;
+ bool twt_requester;
+ struct ath12k_wmi_ppe_threshold_arg peer_ppet;
+};
+
+struct wmi_peer_assoc_complete_cmd {
+ __le32 tlv_header;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 vdev_id;
+ __le32 peer_new_assoc;
+ __le32 peer_associd;
+ __le32 peer_flags;
+ __le32 peer_caps;
+ __le32 peer_listen_intval;
+ __le32 peer_ht_caps;
+ __le32 peer_max_mpdu;
+ __le32 peer_mpdu_density;
+ __le32 peer_rate_caps;
+ __le32 peer_nss;
+ __le32 peer_vht_caps;
+ __le32 peer_phymode;
+ __le32 peer_ht_info[2];
+ __le32 num_peer_legacy_rates;
+ __le32 num_peer_ht_rates;
+ __le32 peer_bw_rxnss_override;
+ struct ath12k_wmi_ppe_threshold_params peer_ppet;
+ __le32 peer_he_cap_info;
+ __le32 peer_he_ops;
+ __le32 peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE];
+ __le32 peer_he_mcs;
+ __le32 peer_he_cap_info_ext;
+ __le32 peer_he_cap_info_internal;
+ __le32 min_data_rate;
+ __le32 peer_he_caps_6ghz;
+} __packed;
+
+struct wmi_stop_scan_cmd {
+ __le32 tlv_header;
+ __le32 requestor;
+ __le32 scan_id;
+ __le32 req_type;
+ __le32 vdev_id;
+ __le32 pdev_id;
+} __packed;
+
+struct ath12k_wmi_scan_chan_list_arg {
+ u32 pdev_id;
+ u16 nallchans;
+ struct ath12k_wmi_channel_arg channel[];
+};
+
+struct wmi_scan_chan_list_cmd {
+ __le32 tlv_header;
+ __le32 num_scan_chans;
+ __le32 flags;
+ __le32 pdev_id;
+} __packed;
+
+#define WMI_MGMT_SEND_DOWNLD_LEN 64
+
+#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8)
+#define WMI_TX_PARAMS_DWORD0_NSS_MASK GENMASK(27, 20)
+#define WMI_TX_PARAMS_DWORD0_RETRY_LIMIT GENMASK(31, 28)
+
+#define WMI_TX_PARAMS_DWORD1_CHAIN_MASK GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD1_BW_MASK GENMASK(14, 8)
+#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE GENMASK(19, 15)
+#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20)
+#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21)
+
+struct wmi_mgmt_send_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 desc_id;
+ __le32 chanfreq;
+ __le32 paddr_lo;
+ __le32 paddr_hi;
+ __le32 frame_len;
+ __le32 buf_len;
+ __le32 tx_params_valid;
+
+ /* This TLV is followed by struct wmi_mgmt_frame */
+
+ /* Followed by struct wmi_mgmt_send_params */
+} __packed;
+
+struct wmi_sta_powersave_mode_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 sta_ps_mode;
+} __packed;
+
+struct wmi_sta_smps_force_mode_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 forced_mode;
+} __packed;
+
+struct wmi_sta_smps_param_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct ath12k_wmi_bcn_prb_info_params {
+ __le32 tlv_header;
+ __le32 caps;
+ __le32 erp;
+} __packed;
+
+enum {
+ WMI_PDEV_SUSPEND,
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct wmi_pdev_green_ap_ps_enable_cmd_param {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 enable;
+} __packed;
+
+struct ath12k_wmi_ap_ps_arg {
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+enum set_init_cc_type {
+ WMI_COUNTRY_INFO_TYPE_ALPHA,
+ WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
+ WMI_COUNTRY_INFO_TYPE_REGDOMAIN,
+};
+
+enum set_init_cc_flags {
+ INVALID_CC,
+ CC_IS_SET,
+ REGDMN_IS_SET,
+ ALPHA_IS_SET,
+};
+
+struct ath12k_wmi_init_country_arg {
+ union {
+ u16 country_code;
+ u16 regdom_id;
+ u8 alpha2[3];
+ } cc_info;
+ enum set_init_cc_flags flags;
+};
+
+struct wmi_init_country_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 init_cc_type;
+ union {
+ __le32 country_code;
+ __le32 regdom_id;
+ __le32 alpha2;
+ } cc_info;
+} __packed;
+
+struct wmi_delba_send_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid;
+ __le32 initiator;
+ __le32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid;
+ __le32 statuscode;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid;
+ __le32 buffersize;
+} __packed;
+
+struct wmi_addba_clear_resp_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+#define DFS_PHYERR_UNIT_TEST_CMD 0
+#define DFS_UNIT_TEST_MODULE 0x2b
+#define DFS_UNIT_TEST_TOKEN 0xAA
+
+enum dfs_test_args_idx {
+ DFS_TEST_CMDID = 0,
+ DFS_TEST_PDEV_ID,
+ DFS_TEST_RADAR_PARAM,
+ DFS_MAX_TEST_ARGS,
+};
+
+struct wmi_dfs_unit_test_arg {
+ u32 cmd_id;
+ u32 pdev_id;
+ u32 radar_param;
+};
+
+struct wmi_unit_test_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 module_id;
+ __le32 num_args;
+ __le32 diag_token;
+ /* Followed by test args*/
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+#define WMI_PEER_AUTH 0x00000001
+#define WMI_PEER_QOS 0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_HE 0x00000400
+#define WMI_PEER_APSD 0x00000800
+#define WMI_PEER_HT 0x00001000
+#define WMI_PEER_40MHZ 0x00002000
+#define WMI_PEER_STBC 0x00008000
+#define WMI_PEER_LDPC 0x00010000
+#define WMI_PEER_DYN_MIMOPS 0x00020000
+#define WMI_PEER_STATIC_MIMOPS 0x00040000
+#define WMI_PEER_SPATIAL_MUX 0x00200000
+#define WMI_PEER_TWT_REQ 0x00400000
+#define WMI_PEER_TWT_RESP 0x00800000
+#define WMI_PEER_VHT 0x02000000
+#define WMI_PEER_80MHZ 0x04000000
+#define WMI_PEER_PMF 0x08000000
+/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000.
+ * Need to be cleaned up
+ */
+#define WMI_PEER_IS_P2P_CAPABLE 0x20000000
+#define WMI_PEER_160MHZ 0x40000000
+#define WMI_PEER_SAFEMODE_EN 0x80000000
+
+struct ath12k_wmi_vht_rate_set_params {
+ __le32 tlv_header;
+ __le32 rx_max_rate;
+ __le32 rx_mcs_set;
+ __le32 tx_max_rate;
+ __le32 tx_mcs_set;
+ __le32 tx_max_mcs_nss;
+} __packed;
+
+struct ath12k_wmi_he_rate_set_params {
+ __le32 tlv_header;
+ __le32 rx_mcs_set;
+ __le32 tx_mcs_set;
+} __packed;
+
+#define MAX_REG_RULES 10
+#define REG_ALPHA2_LEN 2
+#define MAX_6G_REG_RULES 5
+#define REG_US_5G_NUM_REG_RULES 4
+
+enum wmi_start_event_param {
+ WMI_VDEV_START_RESP_EVENT = 0,
+ WMI_VDEV_RESTART_RESP_EVENT,
+};
+
+struct wmi_vdev_start_resp_event {
+ __le32 vdev_id;
+ __le32 requestor_id;
+ /* enum wmi_start_event_param */
+ __le32 resp_type;
+ __le32 status;
+ __le32 chain_mask;
+ __le32 smps_mode;
+ union {
+ __le32 mac_id;
+ __le32 pdev_id;
+ };
+ __le32 cfgd_tx_streams;
+ __le32 cfgd_rx_streams;
+} __packed;
+
+/* VDEV start response status codes */
+enum wmi_vdev_start_resp_status_code {
+ WMI_VDEV_START_RESPONSE_STATUS_SUCCESS = 0,
+ WMI_VDEV_START_RESPONSE_INVALID_VDEVID = 1,
+ WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2,
+ WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3,
+ WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4,
+};
+
+enum wmi_reg_6g_ap_type {
+ WMI_REG_INDOOR_AP = 0,
+ WMI_REG_STD_POWER_AP = 1,
+ WMI_REG_VLP_AP = 2,
+ WMI_REG_CURRENT_MAX_AP_TYPE,
+ WMI_REG_MAX_SUPP_AP_TYPE = WMI_REG_VLP_AP,
+ WMI_REG_MAX_AP_TYPE = 7,
+};
+
+enum wmi_reg_6g_client_type {
+ WMI_REG_DEFAULT_CLIENT = 0,
+ WMI_REG_SUBORDINATE_CLIENT = 1,
+ WMI_REG_MAX_CLIENT_TYPE = 2,
+};
+
+/* Regulatory Rule Flags Passed by FW */
+#define REGULATORY_CHAN_DISABLED BIT(0)
+#define REGULATORY_CHAN_NO_IR BIT(1)
+#define REGULATORY_CHAN_RADAR BIT(3)
+#define REGULATORY_CHAN_NO_OFDM BIT(6)
+#define REGULATORY_CHAN_INDOOR_ONLY BIT(9)
+
+#define REGULATORY_CHAN_NO_HT40 BIT(4)
+#define REGULATORY_CHAN_NO_80MHZ BIT(7)
+#define REGULATORY_CHAN_NO_160MHZ BIT(8)
+#define REGULATORY_CHAN_NO_20MHZ BIT(11)
+#define REGULATORY_CHAN_NO_10MHZ BIT(12)
+
+enum {
+ WMI_REG_SET_CC_STATUS_PASS = 0,
+ WMI_REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ WMI_REG_INIT_ALPHA2_NOT_FOUND = 2,
+ WMI_REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ WMI_REG_SET_CC_STATUS_NO_MEMORY = 4,
+ WMI_REG_SET_CC_STATUS_FAIL = 5,
+};
+
+#define WMI_REG_CLIENT_MAX 4
+
+struct wmi_reg_chan_list_cc_ext_event {
+ __le32 status_code;
+ __le32 phy_id;
+ __le32 alpha2;
+ __le32 num_phy;
+ __le32 country_id;
+ __le32 domain_code;
+ __le32 dfs_region;
+ __le32 phybitmap;
+ __le32 min_bw_2g;
+ __le32 max_bw_2g;
+ __le32 min_bw_5g;
+ __le32 max_bw_5g;
+ __le32 num_2g_reg_rules;
+ __le32 num_5g_reg_rules;
+ __le32 client_type;
+ __le32 rnr_tpe_usable;
+ __le32 unspecified_ap_usable;
+ __le32 domain_code_6g_ap_lpi;
+ __le32 domain_code_6g_ap_sp;
+ __le32 domain_code_6g_ap_vlp;
+ __le32 domain_code_6g_client_lpi[WMI_REG_CLIENT_MAX];
+ __le32 domain_code_6g_client_sp[WMI_REG_CLIENT_MAX];
+ __le32 domain_code_6g_client_vlp[WMI_REG_CLIENT_MAX];
+ __le32 domain_code_6g_super_id;
+ __le32 min_bw_6g_ap_sp;
+ __le32 max_bw_6g_ap_sp;
+ __le32 min_bw_6g_ap_lpi;
+ __le32 max_bw_6g_ap_lpi;
+ __le32 min_bw_6g_ap_vlp;
+ __le32 max_bw_6g_ap_vlp;
+ __le32 min_bw_6g_client_sp[WMI_REG_CLIENT_MAX];
+ __le32 max_bw_6g_client_sp[WMI_REG_CLIENT_MAX];
+ __le32 min_bw_6g_client_lpi[WMI_REG_CLIENT_MAX];
+ __le32 max_bw_6g_client_lpi[WMI_REG_CLIENT_MAX];
+ __le32 min_bw_6g_client_vlp[WMI_REG_CLIENT_MAX];
+ __le32 max_bw_6g_client_vlp[WMI_REG_CLIENT_MAX];
+ __le32 num_6g_reg_rules_ap_sp;
+ __le32 num_6g_reg_rules_ap_lpi;
+ __le32 num_6g_reg_rules_ap_vlp;
+ __le32 num_6g_reg_rules_cl_sp[WMI_REG_CLIENT_MAX];
+ __le32 num_6g_reg_rules_cl_lpi[WMI_REG_CLIENT_MAX];
+ __le32 num_6g_reg_rules_cl_vlp[WMI_REG_CLIENT_MAX];
+} __packed;
+
+struct ath12k_wmi_reg_rule_ext_params {
+ __le32 tlv_header;
+ __le32 freq_info;
+ __le32 bw_pwr_info;
+ __le32 flag_info;
+ __le32 psd_power_info;
+} __packed;
+
+struct wmi_vdev_delete_resp_event {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_peer_delete_resp_event {
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+struct wmi_bcn_tx_status_event {
+ __le32 vdev_id;
+ __le32 tx_status;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+ __le32 pdev_id;
+ __le32 freq; /* Units in MHz */
+ __le32 noise_floor; /* units are dBm */
+ /* rx clear - how often the channel was unused */
+ __le32 rx_clear_count_low;
+ __le32 rx_clear_count_high;
+ /* cycle count - elapsed time during measured period, in clock ticks */
+ __le32 cycle_count_low;
+ __le32 cycle_count_high;
+ /* tx cycle count - elapsed time spent in tx, in clock ticks */
+ __le32 tx_cycle_count_low;
+ __le32 tx_cycle_count_high;
+ /* rx cycle count - elapsed time spent in rx, in clock ticks */
+ __le32 rx_cycle_count_low;
+ __le32 rx_cycle_count_high;
+ /*rx_cycle cnt for my bss in 64bits format */
+ __le32 rx_bss_cycle_count_low;
+ __le32 rx_bss_cycle_count_high;
+} __packed;
+
+#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
+
+struct wmi_vdev_install_key_compl_event {
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 key_idx;
+ __le32 key_flags;
+ __le32 status;
+} __packed;
+
+struct wmi_vdev_install_key_complete_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+};
+
+struct wmi_peer_assoc_conf_event {
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+struct wmi_peer_assoc_conf_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+};
+
+struct wmi_fils_discovery_event {
+ __le32 vdev_id;
+ __le32 fils_tt;
+ __le32 tbtt;
+} __packed;
+
+struct wmi_probe_resp_tx_status_event {
+ __le32 vdev_id;
+ __le32 tx_status;
+} __packed;
+
+struct wmi_pdev_ctl_failsafe_chk_event {
+ __le32 pdev_id;
+ __le32 ctl_failsafe_status;
+} __packed;
+
+struct ath12k_wmi_pdev_csa_event {
+ __le32 pdev_id;
+ __le32 current_switch_count;
+ __le32 num_vdevs;
+} __packed;
+
+struct ath12k_wmi_pdev_radar_event {
+ __le32 pdev_id;
+ __le32 detection_mode;
+ __le32 chan_freq;
+ __le32 chan_width;
+ __le32 detector_id;
+ __le32 segment_id;
+ __le32 timestamp;
+ __le32 is_chirp;
+ a_sle32 freq_offset;
+ a_sle32 sidx;
+} __packed;
+
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celcius degree */
+ a_sle32 temp;
+ __le32 pdev_id;
+} __packed;
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+
+#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
+
+struct ath12k_wmi_mgmt_rx_arg {
+ u32 chan_freq;
+ u32 channel;
+ u32 snr;
+ u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
+ u32 rate;
+ enum wmi_phy_mode phy_mode;
+ u32 buf_len;
+ int status;
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u8 pdev_id;
+};
+
+#define ATH_MAX_ANTENNA 4
+
+struct ath12k_wmi_mgmt_rx_params {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+ __le32 rssi_ctl[ATH_MAX_ANTENNA];
+ __le32 flags;
+ a_sle32 rssi;
+ __le32 tsf_delta;
+ __le32 rx_tsf_l32;
+ __le32 rx_tsf_u32;
+ __le32 pdev_id;
+ __le32 chan_freq;
+} __packed;
+
+#define MAX_ANTENNA_EIGHT 8
+
+struct wmi_mgmt_tx_compl_event {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_scan_event {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+ /* TSF Timestamp when the scan event (%WMI_SCAN_EVENT_) is completed
+ * In case of AP it is TSF of the AP vdev
+ * In case of STA connected state, this is the TSF of the AP
+ * In case of STA not connected, it will be the free running HW timer
+ */
+ __le32 tsf_timestamp;
+} __packed;
+
+struct wmi_peer_sta_kickout_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_peer_sta_kickout_event {
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_event {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+} __packed;
+
+#define WMI_CHAN_INFO_START_RESP 0
+#define WMI_CHAN_INFO_END_RESP 1
+
+struct wmi_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+ __le32 my_bss_rx_cycle_count;
+ __le32 rx_11b_mode_data_duration;
+ __le32 tx_frame_cnt;
+ __le32 mac_clk_mhz;
+ __le32 vdev_id;
+} __packed;
+
+struct ath12k_wmi_target_cap_arg {
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 max_num_scan_channels;
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+};
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+};
+
+enum wmi_sta_powersave_param {
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /* Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/* The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /* Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * FW will send before waking up.
+ */
+};
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+enum wmi_ap_ps_peer_param {
+ /** Set uapsd configuration for a given peer.
+ *
+ * This include the delivery and trigger enabled state for each AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /**
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /** Time in seconds for aging out buffered frames
+ * for STA in power save
+ */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+
+ /** Specify frame types that are considered SIFS
+ * RESP trigger frame
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3,
+
+ /** Specifies the trigger state of TID.
+ * Valid only for UAPSD frame type
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4,
+
+ /* Specifies the WNM sleep state of a STA */
+ WMI_AP_PS_PEER_PARAM_WNM_SLEEP = 5,
+};
+
+#define DISABLE_SIFS_RESPONSE_TRIGGER 0
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+enum wmi_key_type {
+ WMI_KEY_PAIRWISE = 0,
+ WMI_KEY_GROUP = 1,
+};
+
+enum wmi_cipher_type {
+ WMI_CIPHER_NONE = 0, /* clear key */
+ WMI_CIPHER_WEP = 1,
+ WMI_CIPHER_TKIP = 2,
+ WMI_CIPHER_AES_OCB = 3,
+ WMI_CIPHER_AES_CCM = 4,
+ WMI_CIPHER_WAPI = 5,
+ WMI_CIPHER_CKIP = 6,
+ WMI_CIPHER_AES_CMAC = 7,
+ WMI_CIPHER_ANY = 8,
+ WMI_CIPHER_AES_GCM = 9,
+ WMI_CIPHER_AES_GMAC = 10,
+};
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xffff)
+
+#define ATH12K_RC_VERSION_OFFSET 28
+#define ATH12K_RC_PREAMBLE_OFFSET 8
+#define ATH12K_RC_NSS_OFFSET 5
+
+#define ATH12K_HW_RATE_CODE(rate, nss, preamble) \
+ ((1 << ATH12K_RC_VERSION_OFFSET) | \
+ ((nss) << ATH12K_RC_NSS_OFFSET) | \
+ ((preamble) << ATH12K_RC_PREAMBLE_OFFSET) | \
+ (rate))
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+ WMI_RATE_PREAMBLE_HE,
+};
+
+/**
+ * enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
+ * @WMI_RTS_CTS_DISABLED: RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS: RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF: CTS to self protection Enabled.
+ */
+enum wmi_rtscts_prot_mode {
+ WMI_RTS_CTS_DISABLED = 0,
+ WMI_USE_RTS_CTS = 1,
+ WMI_USE_CTS2SELF = 2,
+};
+
+/**
+ * enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
+ * protection mode.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES: Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES: Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES: Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP: RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES: Enable RTS-CTS for all rate series.
+ */
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES = 1,
+ WMI_RTSCTS_ACROSS_SW_RETRIES = 2,
+ WMI_RTSCTS_ERP = 3,
+ WMI_RTSCTS_FOR_ALL_RATESERIES = 4,
+};
+
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+enum wmi_sta_ps_param_rx_wake_policy {
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/* Do not change existing values! Used by ath12k_frame_mode parameter
+ * module parameter.
+ */
+enum ath12k_hw_txrx_mode {
+ ATH12K_HW_TXRX_RAW = 0,
+ ATH12K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH12K_HW_TXRX_ETHERNET = 2,
+};
+
+struct wmi_wmm_params {
+ __le32 tlv_header;
+ __le32 cwmin;
+ __le32 cwmax;
+ __le32 aifs;
+ __le32 txoplimit;
+ __le32 acm;
+ __le32 no_ack;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u8 acm;
+ u8 aifs;
+ u16 cwmin;
+ u16 cwmax;
+ u16 txop;
+ u8 no_ack;
+};
+
+struct wmi_vdev_set_wmm_params_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct wmi_wmm_params wmm_params[4];
+ __le32 wmm_param_type;
+} __packed;
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+#define ATH12K_TWT_DEF_STA_CONG_TIMER_MS 5000
+#define ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE 10
+#define ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP 50
+#define ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN 20
+#define ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL 100
+#define ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN 80
+#define ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP 50
+#define ATH12K_TWT_DEF_MIN_NO_STA_SETUP 10
+#define ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN 2
+#define ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS 2
+#define ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS 2
+#define ATH12K_TWT_DEF_MAX_NO_STA_TWT 500
+#define ATH12K_TWT_DEF_MODE_CHECK_INTERVAL 10000
+#define ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
+#define ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+
+struct wmi_twt_enable_params_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 sta_cong_timer_ms;
+ __le32 mbss_support;
+ __le32 default_slot_size;
+ __le32 congestion_thresh_setup;
+ __le32 congestion_thresh_teardown;
+ __le32 congestion_thresh_critical;
+ __le32 interference_thresh_teardown;
+ __le32 interference_thresh_setup;
+ __le32 min_no_sta_setup;
+ __le32 min_no_sta_teardown;
+ __le32 no_of_bcast_mcast_slots;
+ __le32 min_no_twt_slots;
+ __le32 max_no_sta_twt;
+ __le32 mode_check_interval;
+ __le32 add_sta_slot_interval;
+ __le32 remove_sta_slot_interval;
+} __packed;
+
+struct wmi_twt_disable_params_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_obss_spatial_reuse_params_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 enable;
+ a_sle32 obss_min;
+ a_sle32 obss_max;
+ __le32 vdev_id;
+} __packed;
+
+#define ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200
+#define ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0
+#define ATH12K_OBSS_COLOR_COLLISION_DETECTION 1
+
+#define ATH12K_BSS_COLOR_STA_PERIODS 10000
+#define ATH12K_BSS_COLOR_AP_PERIODS 5000
+
+struct wmi_obss_color_collision_cfg_params_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 flags;
+ __le32 evt_type;
+ __le32 current_bss_color;
+ __le32 detection_period_ms;
+ __le32 scan_period_ms;
+ __le32 free_slot_expiry_time_ms;
+} __packed;
+
+struct wmi_bss_color_change_enable_params_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enable;
+} __packed;
+
+#define ATH12K_IPV4_TH_SEED_SIZE 5
+#define ATH12K_IPV6_TH_SEED_SIZE 11
+
+struct ath12k_wmi_pdev_lro_config_cmd {
+ __le32 tlv_header;
+ __le32 lro_enable;
+ __le32 res;
+ u32 th_4[ATH12K_IPV4_TH_SEED_SIZE];
+ u32 th_6[ATH12K_IPV6_TH_SEED_SIZE];
+ __le32 pdev_id;
+} __packed;
+
+#define ATH12K_WMI_SPECTRAL_COUNT_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_PERIOD_DEFAULT 224
+#define ATH12K_WMI_SPECTRAL_PRIORITY_DEFAULT 1
+#define ATH12K_WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
+#define ATH12K_WMI_SPECTRAL_GC_ENA_DEFAULT 1
+#define ATH12K_WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
+#define ATH12K_WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
+#define ATH12K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
+#define ATH12K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
+#define ATH12K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
+#define ATH12K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_RPT_MODE_DEFAULT 2
+#define ATH12K_WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
+#define ATH12K_WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
+#define ATH12K_WMI_SPECTRAL_CHN_MASK_DEFAULT 1
+
+struct ath12k_wmi_vdev_spectral_conf_arg {
+ u32 vdev_id;
+ u32 scan_count;
+ u32 scan_period;
+ u32 scan_priority;
+ u32 scan_fft_size;
+ u32 scan_gc_ena;
+ u32 scan_restart_ena;
+ u32 scan_noise_floor_ref;
+ u32 scan_init_delay;
+ u32 scan_nb_tone_thr;
+ u32 scan_str_bin_thr;
+ u32 scan_wb_rpt_mode;
+ u32 scan_rssi_rpt_mode;
+ u32 scan_rssi_thr;
+ u32 scan_pwr_format;
+ u32 scan_rpt_mode;
+ u32 scan_bin_scale;
+ u32 scan_dbm_adj;
+ u32 scan_chn_mask;
+};
+
+struct ath12k_wmi_vdev_spectral_conf_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 scan_count;
+ __le32 scan_period;
+ __le32 scan_priority;
+ __le32 scan_fft_size;
+ __le32 scan_gc_ena;
+ __le32 scan_restart_ena;
+ __le32 scan_noise_floor_ref;
+ __le32 scan_init_delay;
+ __le32 scan_nb_tone_thr;
+ __le32 scan_str_bin_thr;
+ __le32 scan_wb_rpt_mode;
+ __le32 scan_rssi_rpt_mode;
+ __le32 scan_rssi_thr;
+ __le32 scan_pwr_format;
+ __le32 scan_rpt_mode;
+ __le32 scan_bin_scale;
+ __le32 scan_dbm_adj;
+ __le32 scan_chn_mask;
+} __packed;
+
+#define ATH12K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
+#define ATH12K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
+#define ATH12K_WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
+#define ATH12K_WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
+
+struct ath12k_wmi_vdev_spectral_enable_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 trigger_cmd;
+ __le32 enable_cmd;
+} __packed;
+
+struct ath12k_wmi_pdev_dma_ring_cfg_arg {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 module_id;
+ u32 base_paddr_lo;
+ u32 base_paddr_hi;
+ u32 head_idx_paddr_lo;
+ u32 head_idx_paddr_hi;
+ u32 tail_idx_paddr_lo;
+ u32 tail_idx_paddr_hi;
+ u32 num_elems;
+ u32 buf_size;
+ u32 num_resp_per_event;
+ u32 event_timeout_ms;
+};
+
+struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 module_id; /* see enum wmi_direct_buffer_module */
+ __le32 base_paddr_lo;
+ __le32 base_paddr_hi;
+ __le32 head_idx_paddr_lo;
+ __le32 head_idx_paddr_hi;
+ __le32 tail_idx_paddr_lo;
+ __le32 tail_idx_paddr_hi;
+ __le32 num_elems; /* Number of elems in the ring */
+ __le32 buf_size; /* size of allocated buffer in bytes */
+
+ /* Number of wmi_dma_buf_release_entry packed together */
+ __le32 num_resp_per_event;
+
+ /* Target should timeout and send whatever resp
+ * it has if this time expires, units in milliseconds
+ */
+ __le32 event_timeout_ms;
+} __packed;
+
+struct ath12k_wmi_dma_buf_release_fixed_params {
+ __le32 pdev_id;
+ __le32 module_id;
+ __le32 num_buf_release_entry;
+ __le32 num_meta_data_entry;
+} __packed;
+
+struct ath12k_wmi_dma_buf_release_entry_params {
+ __le32 tlv_header;
+ __le32 paddr_lo;
+
+ /* Bits 11:0: address of data
+ * Bits 31:12: host context data
+ */
+ __le32 paddr_hi;
+} __packed;
+
+#define WMI_SPECTRAL_META_INFO1_FREQ1 GENMASK(15, 0)
+#define WMI_SPECTRAL_META_INFO1_FREQ2 GENMASK(31, 16)
+
+#define WMI_SPECTRAL_META_INFO2_CHN_WIDTH GENMASK(7, 0)
+
+struct ath12k_wmi_dma_buf_release_meta_data_params {
+ __le32 tlv_header;
+ a_sle32 noise_floor[WMI_MAX_CHAINS];
+ __le32 reset_delay;
+ __le32 freq1;
+ __le32 freq2;
+ __le32 ch_width;
+} __packed;
+
+enum wmi_fils_discovery_cmd_type {
+ WMI_FILS_DISCOVERY_CMD,
+ WMI_UNSOL_BCAST_PROBE_RESP,
+};
+
+struct wmi_fils_discovery_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 interval;
+ __le32 config; /* enum wmi_fils_discovery_cmd_type */
+} __packed;
+
+struct wmi_fils_discovery_tmpl_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_probe_tmpl_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 buf_len;
+} __packed;
+
+#define WMI_MAX_MEM_REQS 32
+
+#define MAX_RADIOS 3
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+
+struct ath12k_wmi_pdev {
+ struct ath12k_wmi_base *wmi_ab;
+ enum ath12k_htc_ep_id eid;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 rx_decap_mode;
+};
+
+struct ath12k_wmi_base {
+ struct ath12k_base *ab;
+ struct ath12k_wmi_pdev wmi[MAX_RADIOS];
+ enum ath12k_htc_ep_id wmi_endpoint_id[MAX_RADIOS];
+ u32 max_msg_len[MAX_RADIOS];
+
+ struct completion service_ready;
+ struct completion unified_ready;
+ DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE);
+ wait_queue_head_t tx_credits_wq;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 num_mem_chunks;
+ u32 rx_decap_mode;
+ struct ath12k_wmi_host_mem_chunk_arg mem_chunks[WMI_MAX_MEM_REQS];
+
+ enum wmi_host_hw_mode_config_type preferred_hw_mode;
+
+ struct ath12k_wmi_target_cap_arg *targ_cap;
+};
+
+#define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config);
+void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config);
+int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
+ u32 cmd_id);
+struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
+int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame);
+int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn);
+int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id);
+int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id);
+int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart);
+int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val);
+int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id);
+int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable);
+int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab);
+int ath12k_wmi_cmd_init(struct ath12k_base *ab);
+int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab);
+int ath12k_wmi_connect(struct ath12k_base *ab);
+int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
+ u8 pdev_id);
+int ath12k_wmi_attach(struct ath12k_base *ab);
+void ath12k_wmi_detach(struct ath12k_base *ab);
+int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
+ struct ath12k_wmi_vdev_create_arg *arg);
+int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
+ struct ath12k_wmi_peer_create_arg *arg);
+int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+
+int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
+ u32 param, u32 param_value);
+int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms);
+int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
+ const u8 *peer_addr, u8 vdev_id);
+int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id);
+void ath12k_wmi_start_scan_init(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg);
+int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg);
+int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_cancel_arg *arg);
+int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param);
+int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
+ u32 pdev_id);
+int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id);
+
+int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
+ struct ath12k_wmi_peer_assoc_arg *arg);
+int ath12k_wmi_vdev_install_key(struct ath12k *ar,
+ struct wmi_vdev_install_key_arg *arg);
+int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
+ enum wmi_bss_chan_info_req_type type);
+int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
+ u32 vdev_id, u32 pdev_id);
+int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar);
+int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
+ u8 peer_addr[ETH_ALEN],
+ u32 peer_tid_bitmap,
+ u8 vdev_id);
+int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
+ struct ath12k_wmi_ap_ps_arg *arg);
+int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_chan_list_arg *arg);
+int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
+ u32 pdev_id);
+int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac);
+int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size);
+int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status);
+int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason);
+int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op);
+int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
+ struct ath12k_wmi_init_country_arg *arg);
+int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
+ int vdev_id, const u8 *addr,
+ dma_addr_t paddr, u8 tid,
+ u8 ba_window_size_valid,
+ u32 ba_window_size);
+int
+ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
+ struct ath12k_wmi_rx_reorder_queue_remove_arg *arg);
+int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
+ struct ath12k_wmi_pdev_set_regdomain_arg *arg);
+int ath12k_wmi_simulate_radar(struct ath12k *ar);
+int ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id);
+int ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id);
+int ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd);
+int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable);
+int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
+ bool enable);
+int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar, int pdev_id);
+int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
+ struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg);
+int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
+ u32 trigger, u32 enable);
+int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
+ struct ath12k_wmi_vdev_spectral_conf_arg *arg);
+int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct sk_buff *tmpl);
+int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
+ bool unsol_bcast_probe_resp_enabled);
+int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct sk_buff *tmpl);
+int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
+ enum wmi_host_hw_mode_config_type mode);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index a20e0aeae284..0c2b8b1a10d5 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1119,7 +1119,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->ndev, &chandef, 0);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0);
mutex_unlock(&vif->wdev.mtx);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 6610d76131fa..7a45f5f62826 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -1277,13 +1277,13 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
static void ar5008_hw_init_txpower_cck(struct ath_hw *ah, int16_t *rate_array)
{
-#define CCK_DELTA(x) ((OLC_FOR_AR9280_20_LATER) ? max((x) - 2, 0) : (x))
- ah->tx_power[0] = CCK_DELTA(rate_array[rate1l]);
- ah->tx_power[1] = CCK_DELTA(min(rate_array[rate2l],
+#define CCK_DELTA(_ah, x) ((OLC_FOR_AR9280_20_LATER(_ah)) ? max((x) - 2, 0) : (x))
+ ah->tx_power[0] = CCK_DELTA(ah, rate_array[rate1l]);
+ ah->tx_power[1] = CCK_DELTA(ah, min(rate_array[rate2l],
rate_array[rate2s]));
- ah->tx_power[2] = CCK_DELTA(min(rate_array[rate5_5l],
+ ah->tx_power[2] = CCK_DELTA(ah, min(rate_array[rate5_5l],
rate_array[rate5_5s]));
- ah->tx_power[3] = CCK_DELTA(min(rate_array[rate11l],
+ ah->tx_power[3] = CCK_DELTA(ah, min(rate_array[rate11l],
rate_array[rate11s]));
#undef CCK_DELTA
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index fd53b5f9e9b5..c8b3f3aaa45b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -659,9 +659,9 @@ static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
{
- if (OLC_FOR_AR9287_10_LATER)
+ if (OLC_FOR_AR9287_10_LATER(ah))
ar9287_hw_olc_temp_compensation(ah);
- else if (OLC_FOR_AR9280_20_LATER)
+ else if (OLC_FOR_AR9280_20_LATER(ah))
ar9280_hw_olc_temp_compensation(ah);
}
@@ -672,7 +672,7 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
bool nfcal, nfcal_pending = false, percal_pending;
int ret;
- nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
+ nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata) {
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (longcal) /* Remember to not miss */
@@ -752,11 +752,11 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (IS_CHAN_HT20(chan)) {
REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_FLTR_CAL);
REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in %d ms; noisy environment?\n",
@@ -768,10 +768,10 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
}
REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL);
REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in %d ms; noisy environment?\n",
@@ -781,7 +781,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL);
return true;
}
@@ -857,17 +857,17 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (!AR_SREV_9287_11_OR_LATER(ah))
REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_FLTR_CAL);
}
/* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) |
AR_PHY_AGC_CONTROL_CAL);
/* Poll for offset calibration complete */
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
ath_dbg(common, CALIBRATE,
@@ -880,7 +880,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (!AR_SREV_9287_11_OR_LATER(ah))
REG_SET_BIT(ah, AR_PHY_ADC_CTL,
AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_FLTR_CAL);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index ae68f674829b..d08ea0b28530 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -249,9 +249,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
if (power_off) {
/* clear bit 19 to disable L1 */
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PCIE_PM_CTRL_ENA);
- val = REG_READ(ah, AR_WA);
+ val = REG_READ(ah, AR_WA(ah));
/*
* Set PCIe workaround bits
@@ -286,7 +286,7 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
- REG_WRITE(ah, AR_WA, val);
+ REG_WRITE(ah, AR_WA(ah), val);
} else {
if (ah->config.pcie_waen) {
val = ah->config.pcie_waen;
@@ -314,10 +314,10 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
- REG_WRITE(ah, AR_WA, val);
+ REG_WRITE(ah, AR_WA(ah), val);
/* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PCIE_PM_CTRL_ENA);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index a8c0e8e2d78c..b70cd4af1ae0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -21,7 +21,7 @@
static void ar9002_hw_rx_enable(struct ath_hw *ah)
{
- REG_WRITE(ah, AR_CR, AR_CR_RXE);
+ REG_WRITE(ah, AR_CR, AR_CR_RXE(ah));
}
static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
@@ -40,14 +40,14 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
struct ath_common *common = ath9k_hw_common(ah);
if (!AR_SREV_9100(ah)) {
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
- if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE(ah)) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS(ah)) & AR_RTC_STATUS_M(ah))
== AR_RTC_STATUS_ON) {
isr = REG_READ(ah, AR_ISR);
}
}
- sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah)) &
AR_INTR_SYNC_DEFAULT;
*masked = 0;
@@ -138,7 +138,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
u32 s5_s;
if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
- s5_s = REG_READ(ah, AR_ISR_S5_S);
+ s5_s = REG_READ(ah, AR_ISR_S5_S(ah));
} else {
s5_s = REG_READ(ah, AR_ISR_S5);
}
@@ -201,8 +201,8 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
}
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
- (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR(ah), sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR(ah));
}
return true;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index ebdb97999335..23ac6b7c2cbd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -281,10 +281,10 @@ static void ar9002_olc_init(struct ath_hw *ah)
{
u32 i;
- if (!OLC_FOR_AR9280_20_LATER)
+ if (!OLC_FOR_AR9280_20_LATER(ah))
return;
- if (OLC_FOR_AR9287_10_LATER) {
+ if (OLC_FOR_AR9287_10_LATER(ah)) {
REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 6ca089f15629..2224cb74b1d4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -346,14 +346,14 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
/*
* Clear offset and IQ calibration, run AGC cal.
*/
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_OFFSET_CAL);
- REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL);
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
if (!status) {
@@ -367,13 +367,13 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
* (Carrier Leak calibration, TX Filter calibration and
* Peak Detector offset calibration).
*/
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_OFFSET_CAL);
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
AR_PHY_CL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_PKDET_CAL);
ch0_done = 0;
@@ -387,10 +387,10 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL);
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
if (!status) {
@@ -531,7 +531,7 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
}
}
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_OFFSET_CAL);
REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
@@ -539,7 +539,7 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
* We don't need to check txiqcal_done here since it is always
* set for AR9550.
*/
- REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
return true;
@@ -897,7 +897,7 @@ static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] =
- AR_PHY_TX_IQCAL_CORR_COEFF_B0(i);
+ AR_PHY_TX_IQCAL_CORR_COEFF_B0(ah, i);
if (!AR_SREV_9485(ah)) {
tx_corr_coeff[i * 2][1] =
tx_corr_coeff[(i * 2) + 1][1] =
@@ -914,7 +914,7 @@ static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
if (!(ah->txchainmask & (1 << i)))
continue;
nmeasurement = REG_READ_FIELD(ah,
- AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B0(ah),
AR_PHY_CALIBRATED_GAINS_0);
if (nmeasurement > MAX_MEASUREMENT)
@@ -988,10 +988,10 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
AR_PHY_TXGAIN_FORCE, 0);
- REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START(ah),
AR_PHY_TX_IQCAL_START_DO_CAL, 1);
- if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
+ if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START(ah),
AR_PHY_TX_IQCAL_START_DO_CAL, 0,
AH_WAIT_TIMEOUT)) {
ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n");
@@ -1056,7 +1056,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
- AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B0(ah),
AR_PHY_TX_IQCAL_STATUS_B1,
AR_PHY_TX_IQCAL_STATUS_B2,
};
@@ -1076,7 +1076,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
continue;
nmeasurement = REG_READ_FIELD(ah,
- AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B0(ah),
AR_PHY_CALIBRATED_GAINS_0);
if (nmeasurement > MAX_MEASUREMENT)
nmeasurement = MAX_MEASUREMENT;
@@ -1096,7 +1096,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
u32 idx = 2 * j, offset = 4 * (3 * im + j);
REG_RMW_FIELD(ah,
- AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY(ah),
AR_PHY_CHAN_INFO_TAB_S2_READ,
0);
@@ -1106,7 +1106,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
offset);
REG_RMW_FIELD(ah,
- AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY(ah),
AR_PHY_CHAN_INFO_TAB_S2_READ,
1);
@@ -1161,7 +1161,7 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] =
- AR_PHY_TX_IQCAL_CORR_COEFF_B0(i);
+ AR_PHY_TX_IQCAL_CORR_COEFF_B0(ah, i);
if (!AR_SREV_9485(ah)) {
tx_corr_coeff[i * 2][1] =
tx_corr_coeff[(i * 2) + 1][1] =
@@ -1346,7 +1346,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
if (!caldata || !(ah->enabled_cals & TX_CL_CAL))
return;
- txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) &
AR_PHY_AGC_CONTROL_CLC_SUCCESS);
if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
@@ -1424,12 +1424,12 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
if (rtt) {
if (!run_rtt_cal) {
- agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL(ah));
agc_supp_cals &= agc_ctrl;
agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), agc_ctrl);
} else {
if (ah->ah_flags & AH_FASTCC)
run_agc_cal = true;
@@ -1452,7 +1452,7 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
goto skip_tx_iqcal;
/* Do Tx IQ Calibration */
- REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1(ah),
AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
DELPT);
@@ -1462,10 +1462,10 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
- REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
else
- REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
}
@@ -1485,12 +1485,12 @@ skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
/* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) |
AR_PHY_AGC_CONTROL_CAL);
/* Poll for offset calibration complete */
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
@@ -1507,7 +1507,7 @@ skip_tx_iqcal:
if (rtt && !run_rtt_cal) {
agc_ctrl |= agc_supp_cals;
- REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), agc_ctrl);
}
if (!status) {
@@ -1558,11 +1558,11 @@ static bool do_ar9003_agc_cal(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
bool status;
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) |
AR_PHY_AGC_CONTROL_CAL);
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
if (!status) {
@@ -1596,7 +1596,7 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
goto skip_tx_iqcal;
/* Do Tx IQ Calibration */
- REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1(ah),
AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
DELPT);
@@ -1605,7 +1605,7 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
* AGC calibration. Specifically, AR9550 in SoC chips.
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
txiqcal_done = true;
} else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 16bfcd0a1f6e..944f46cdf34c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3084,13 +3084,13 @@ error:
static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data)
{
- REG_READ(ah, AR9300_OTP_BASE + (4 * addr));
+ REG_READ(ah, AR9300_OTP_BASE(ah) + (4 * addr));
- if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE,
+ if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS(ah), AR9300_OTP_STATUS_TYPE,
AR9300_OTP_STATUS_VALID, 1000))
return false;
- *data = REG_READ(ah, AR9300_OTP_READ_DATA);
+ *data = REG_READ(ah, AR9300_OTP_READ_DATA(ah));
return true;
}
@@ -3607,15 +3607,15 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
AR_SREV_9531(ah) || AR_SREV_9561(ah))
- REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
+ REG_RMW_FIELD(ah, AR_CH0_TOP2(ah), AR_CH0_TOP2_XPABIASLVL, bias);
else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
- REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
+ REG_RMW_FIELD(ah, AR_CH0_TOP(ah), AR_CH0_TOP_XPABIASLVL, bias);
else {
- REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
- REG_RMW_FIELD(ah, AR_CH0_THERM,
+ REG_RMW_FIELD(ah, AR_CH0_TOP(ah), AR_CH0_TOP_XPABIASLVL, bias);
+ REG_RMW_FIELD(ah, AR_CH0_THERM(ah),
AR_CH0_THERM_XPABIASLVL_MSB,
bias >> 2);
- REG_RMW_FIELD(ah, AR_CH0_THERM,
+ REG_RMW_FIELD(ah, AR_CH0_THERM(ah),
AR_CH0_THERM_XPASHORT2GND, 1);
}
}
@@ -3960,9 +3960,9 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
int reg_pmu_set;
- reg_pmu_set = REG_READ(ah, AR_PHY_PMU2) & ~AR_PHY_PMU2_PGM;
- REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
- if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ reg_pmu_set = REG_READ(ah, AR_PHY_PMU2(ah)) & ~AR_PHY_PMU2_PGM;
+ REG_WRITE(ah, AR_PHY_PMU2(ah), reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2(ah), reg_pmu_set))
return;
if (AR_SREV_9330(ah)) {
@@ -3984,28 +3984,28 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
(3 << 24) | (1 << 28);
}
- REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
- if (!is_pmu_set(ah, AR_PHY_PMU1, reg_pmu_set))
+ REG_WRITE(ah, AR_PHY_PMU1(ah), reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU1(ah), reg_pmu_set))
return;
- reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0xFFC00000)
+ reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2(ah)) & ~0xFFC00000)
| (4 << 26);
- REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
- if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ REG_WRITE(ah, AR_PHY_PMU2(ah), reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2(ah), reg_pmu_set))
return;
- reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0x00200000)
+ reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2(ah)) & ~0x00200000)
| (1 << 21);
- REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
- if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ REG_WRITE(ah, AR_PHY_PMU2(ah), reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2(ah), reg_pmu_set))
return;
} else if (AR_SREV_9462(ah) || AR_SREV_9565(ah) ||
AR_SREV_9561(ah)) {
reg_val = le32_to_cpu(pBase->swreg);
- REG_WRITE(ah, AR_PHY_PMU1, reg_val);
+ REG_WRITE(ah, AR_PHY_PMU1(ah), reg_val);
if (AR_SREV_9561(ah))
- REG_WRITE(ah, AR_PHY_PMU2, 0x10200000);
+ REG_WRITE(ah, AR_PHY_PMU2(ah), 0x10200000);
} else {
/* Internal regulator is ON. Write swreg register. */
reg_val = le32_to_cpu(pBase->swreg);
@@ -4021,25 +4021,25 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
}
} else {
if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
- REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0);
- while (REG_READ_FIELD(ah, AR_PHY_PMU2,
+ REG_RMW_FIELD(ah, AR_PHY_PMU2(ah), AR_PHY_PMU2_PGM, 0);
+ while (REG_READ_FIELD(ah, AR_PHY_PMU2(ah),
AR_PHY_PMU2_PGM))
udelay(10);
- REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
- while (!REG_READ_FIELD(ah, AR_PHY_PMU1,
+ REG_RMW_FIELD(ah, AR_PHY_PMU1(ah), AR_PHY_PMU1_PWD, 0x1);
+ while (!REG_READ_FIELD(ah, AR_PHY_PMU1(ah),
AR_PHY_PMU1_PWD))
udelay(10);
- REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0x1);
- while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
+ REG_RMW_FIELD(ah, AR_PHY_PMU2(ah), AR_PHY_PMU2_PGM, 0x1);
+ while (!REG_READ_FIELD(ah, AR_PHY_PMU2(ah),
AR_PHY_PMU2_PGM))
udelay(10);
} else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
- REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_PMU1(ah), AR_PHY_PMU1_PWD, 0x1);
else {
- reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) |
+ reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK(ah)) |
AR_RTC_FORCE_SWREG_PRD;
- REG_WRITE(ah, AR_RTC_SLEEP_CLK, reg_val);
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK(ah), reg_val);
}
}
@@ -4055,9 +4055,9 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
if (eep->baseEepHeader.featureEnable & 0x40) {
tuning_caps_param &= 0x7f;
- REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC,
+ REG_RMW_FIELD(ah, AR_CH0_XTAL(ah), AR_CH0_XTAL_CAPINDAC,
tuning_caps_param);
- REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPOUTDAC,
+ REG_RMW_FIELD(ah, AR_CH0_XTAL(ah), AR_CH0_XTAL_CAPOUTDAC,
tuning_caps_param);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index f8ae20318302..b91ef1250ba8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -82,16 +82,16 @@
/* AR5416_EEPMISC_BIG_ENDIAN not set indicates little endian */
#define AR9300_EEPMISC_LITTLE_ENDIAN 0
-#define AR9300_OTP_BASE \
- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
-#define AR9300_OTP_STATUS \
- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18)
+#define AR9300_OTP_BASE(_ah) \
+ ((AR_SREV_9340(_ah) || AR_SREV_9550(_ah)) ? 0x30000 : 0x14000)
+#define AR9300_OTP_STATUS(_ah) \
+ ((AR_SREV_9340(_ah) || AR_SREV_9550(_ah)) ? 0x31018 : 0x15f18)
#define AR9300_OTP_STATUS_TYPE 0x7
#define AR9300_OTP_STATUS_VALID 0x4
#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
#define AR9300_OTP_STATUS_SM_BUSY 0x1
-#define AR9300_OTP_READ_DATA \
- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c)
+#define AR9300_OTP_READ_DATA(_ah) \
+ ((AR_SREV_9340(_ah) || AR_SREV_9550(_ah)) ? 0x3101c : 0x15f1c)
enum targetPowerHTRates {
HT_TARGET_RATE_0_8_16,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 42f00a2a8c80..4f27a9fb1482 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1032,8 +1032,8 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
/* Nothing to do on restore for 11N */
if (!power_off /* !restore */) {
/* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PCIE_PM_CTRL_ENA);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index ff8ab58e67d9..a8bc003077dc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -193,16 +193,16 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
if (ath9k_hw_mci_is_enabled(ah))
async_mask |= AR_INTR_ASYNC_MASK_MCI;
- async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+ async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE(ah));
if (async_cause & async_mask) {
- if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ if ((REG_READ(ah, AR_RTC_STATUS(ah)) & AR_RTC_STATUS_M(ah))
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
}
- sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah)) & AR_INTR_SYNC_DEFAULT;
*masked = 0;
@@ -280,7 +280,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
u32 s5;
if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
- s5 = REG_READ(ah, AR_ISR_S5_S);
+ s5 = REG_READ(ah, AR_ISR_S5_S(ah));
else
s5 = REG_READ(ah, AR_ISR_S5);
@@ -345,8 +345,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
- (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR(ah), sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR(ah));
}
return true;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 8d7efd80d97a..2b9c07961cd7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -458,7 +458,7 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah)
} else
return;
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), AR_GPIO_JTAG_DISABLE);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_DS_JTAG_DISABLE, 1);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_WLAN_UART_INTF_EN, 0);
@@ -466,12 +466,12 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
- REG_WRITE(ah, AR_OBS, 0x4b);
+ REG_WRITE(ah, AR_OBS(ah), 0x4b);
REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
- REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+ REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS(ah),
AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index b2d53b6c0ffd..83d993fff695 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -201,19 +201,19 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
ar9003_paprd_enable(ah, false);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP, 0x30);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE, 1);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE, 1);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE, 0);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE, 0);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
if (AR_SREV_9485(ah)) {
@@ -229,15 +229,15 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
}
}
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2(ah),
AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, val);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN, 4);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
if (AR_SREV_9485(ah) ||
@@ -246,10 +246,10 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_SREV_9550(ah) ||
AR_SREV_9330(ah) ||
AR_SREV_9340(ah))
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -3);
else
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
val = -10;
@@ -257,16 +257,16 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
if (IS_CHAN_2GHZ(ah->curchan) && !AR_SREV_9462(ah) && !AR_SREV_9565(ah))
val = -15;
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
val);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4(ah),
AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA, 0);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4(ah),
AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR, 400);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4(ah),
AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES,
100);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_0_B0,
@@ -313,7 +313,7 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
int desired_scale, desired_gain = 0;
u32 reg_olpc = 0, reg_cl_gain = 0;
- REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
desired_scale = REG_READ_FIELD(ah, AR_PHY_TPC_12,
AR_PHY_TPC_12_DESIRED_SCALE_HT40_5);
@@ -812,7 +812,7 @@ void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
ar9003_tx_force_gain(ah, gain_index);
- REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
}
EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
@@ -833,7 +833,7 @@ static bool ar9003_paprd_retrain_pa_in(struct ath_hw *ah,
capdiv2g = REG_READ_FIELD(ah, AR_PHY_65NM_CH0_TXRF3,
AR_PHY_65NM_CH0_TXRF3_CAPDIV2G);
- quick_drop = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ quick_drop = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP);
if (quick_drop)
@@ -906,7 +906,7 @@ static bool ar9003_paprd_retrain_pa_in(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_TXRF3,
AR_PHY_65NM_CH0_TXRF3_CAPDIV2G, capdiv2g);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
quick_drop);
@@ -932,14 +932,14 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
data_L = &buf[0];
data_U = &buf[48];
- REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY(ah),
AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
reg = AR_PHY_CHAN_INFO_TAB_0;
for (i = 0; i < 48; i++)
data_L[i] = REG_READ(ah, reg + (i << 2));
- REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY(ah),
AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
for (i = 0; i < 48; i++)
@@ -951,7 +951,7 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
if (ar9003_paprd_retrain_pa_in(ah, caldata, chain))
status = -EINPROGRESS;
- REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
kfree(buf);
@@ -977,14 +977,14 @@ bool ar9003_paprd_is_done(struct ath_hw *ah)
{
int paprd_done, agc2_pwr;
- paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
if (AR_SREV_9485(ah))
goto exit;
if (paprd_done == 0x1) {
- agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
ath_dbg(ath9k_hw_common(ah), CALIBRATE,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 090ff0600c81..a29c11f944a5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -296,7 +296,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
cck_spur_freq = cck_spur_freq & 0xfffff;
- REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
@@ -314,7 +314,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
}
}
- REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
@@ -352,7 +352,7 @@ static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
@@ -360,7 +360,7 @@ static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
@@ -419,7 +419,7 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
@@ -427,7 +427,7 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
@@ -449,7 +449,7 @@ static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
mask_index);
/* A == B */
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A,
mask_index);
@@ -462,7 +462,7 @@ static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B, 0xe);
/* A == B */
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
}
@@ -710,7 +710,7 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
- if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
ah->enabled_cals |= TX_IQ_CAL;
else
@@ -726,11 +726,11 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
AR_SREV_9561(ah)) {
if (ah->is_clk_25mhz) {
- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK(ah), 0x17c << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
} else {
- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK(ah), 0x261 << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
}
@@ -1795,7 +1795,7 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
{
- REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, AR_PHY_TEST(ah), PHY_AGC_CLR);
REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
REG_WRITE(ah, AR_CR, AR_CR_RXD);
REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
@@ -1808,7 +1808,7 @@ static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
static void ar9003_hw_tx99_stop(struct ath_hw *ah)
{
- REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_CLR_BIT(ah, AR_PHY_TEST(ah), PHY_AGC_CLR);
REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index ad949eb02f3d..57e2b4c89125 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -454,8 +454,8 @@
#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
#define AR_PHY_MODE (AR_SM_BASE + 0x8)
#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
-#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
-#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24))
+#define AR_PHY_SPUR_MASK_A(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x18 : 0x20))
+#define AR_PHY_SPUR_MASK_B(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x1c : 0x24))
#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
@@ -498,7 +498,7 @@
#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
-#define AR_PHY_TEST (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160))
+#define AR_PHY_TEST(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x15c : 0x160))
#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
#define AR_PHY_TEST_BBB_OBS_SEL_S 19
@@ -509,7 +509,7 @@
#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
#define AR_PHY_TEST_CHAIN_SEL_S 30
-#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x160 : 0x164))
+#define AR_PHY_TEST_CTL_STATUS(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x160 : 0x164))
#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
@@ -524,22 +524,22 @@
#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
-#define AR_PHY_TSTDAC (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168))
+#define AR_PHY_TSTDAC(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x164 : 0x168))
-#define AR_PHY_CHAN_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c))
+#define AR_PHY_CHAN_STATUS(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x168 : 0x16c))
-#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
+#define AR_PHY_CHAN_INFO_MEMORY(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x16c : 0x170))
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
-#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174))
-#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178))
-#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c))
-#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180))
-#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190))
-#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194))
+#define AR_PHY_CHNINFO_NOISEPWR(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x170 : 0x174))
+#define AR_PHY_CHNINFO_GAINDIFF(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x174 : 0x178))
+#define AR_PHY_CHNINFO_FINETIM(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x178 : 0x17c))
+#define AR_PHY_CHAN_INFO_GAIN_0(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x17c : 0x180))
+#define AR_PHY_SCRAMBLER_SEED(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x184 : 0x190))
+#define AR_PHY_CCK_TX_CTRL(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x188 : 0x194))
-#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
+#define AR_PHY_HEAVYCLIP_CTL(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x198 : 0x1a4))
#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
#define AR_PHY_HEAVYCLIP_1 (AR_SM_BASE + 0x19c)
@@ -611,16 +611,16 @@
#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
-#define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_CONTROL_0(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \
0x3c4 : 0x444))
-#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_CONTROL_1(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \
0x3c8 : 0x448))
-#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_START(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \
0x3c4 : 0x440))
-#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_STATUS_B0(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \
0x3f0 : 0x48c))
-#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \
- (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_ah, _i) (AR_SM_BASE + \
+ (AR_SREV_9485(_ah) ? \
0x3d0 : 0x450) + ((_i) << 2))
#define AR_PHY_RTT_CTRL (AR_SM_BASE + 0x380)
@@ -684,8 +684,8 @@
#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK 0x00000008
#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S 3
-#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
- (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
+#define AR_CH0_TOP(_ah) (AR_SREV_9300(_ah) ? 0x16288 : \
+ (((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x1628c : 0x16280)))
#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
@@ -705,8 +705,8 @@
#define AR_SWITCH_TABLE_ALL (0xfff)
#define AR_SWITCH_TABLE_ALL_S (0)
-#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
- ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
+#define AR_CH0_THERM(_ah) (AR_SREV_9300(_ah) ? 0x16290 :\
+ ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16294 : 0x1628c))
#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
#define AR_CH0_THERM_XPASHORT2GND 0x4
@@ -717,26 +717,26 @@
#define AR_CH0_THERM_SAR_ADC_OUT 0x0000ff00
#define AR_CH0_THERM_SAR_ADC_OUT_S 8
-#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
- (AR_SREV_9462(ah) ? 0x16290 : 0x16284))
+#define AR_CH0_TOP2(_ah) (AR_SREV_9300(_ah) ? 0x1628c : \
+ (AR_SREV_9462(_ah) ? 0x16290 : 0x16284))
#define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
#define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12)
-#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
- ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
- (AR_SREV_9561(ah) ? 0x162c0 : 0x16290)))
+#define AR_CH0_XTAL(_ah) (AR_SREV_9300(_ah) ? 0x16294 : \
+ ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16298 : \
+ (AR_SREV_9561(_ah) ? 0x162c0 : 0x16290)))
#define AR_CH0_XTAL_CAPINDAC 0x7f000000
#define AR_CH0_XTAL_CAPINDAC_S 24
#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
#define AR_CH0_XTAL_CAPOUTDAC_S 17
-#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : \
- (AR_SREV_9561(ah) ? 0x16cc0 : 0x16c40))
+#define AR_PHY_PMU1(_ah) ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16340 : \
+ (AR_SREV_9561(_ah) ? 0x16cc0 : 0x16c40))
#define AR_PHY_PMU1_PWD 0x1
#define AR_PHY_PMU1_PWD_S 0
-#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : \
- (AR_SREV_9561(ah) ? 0x16cc4 : 0x16c44))
+#define AR_PHY_PMU2(_ah) ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16344 : \
+ (AR_SREV_9561(_ah) ? 0x16cc4 : 0x16c44))
#define AR_PHY_PMU2_PGM 0x00200000
#define AR_PHY_PMU2_PGM_S 21
@@ -974,7 +974,7 @@
#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
-#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(ah) ? \
+#define AR_PHY_PDADC_TAB_1(_ah) (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(_ah) ? \
0x280 : 0x240))
#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240)
#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff
@@ -1152,7 +1152,7 @@
#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
-#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x580 : 0x490))
+#define AR_PHY_PAPRD_TRAINER_CNTL1(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x580 : 0x490))
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
@@ -1169,12 +1169,12 @@
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
-#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x584 : 0x494))
+#define AR_PHY_PAPRD_TRAINER_CNTL2(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x584 : 0x494))
#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
-#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x588 : 0x498))
+#define AR_PHY_PAPRD_TRAINER_CNTL3(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x588 : 0x498))
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
@@ -1191,7 +1191,7 @@
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
-#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x58c : 0x49c))
+#define AR_PHY_PAPRD_TRAINER_CNTL4(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x58c : 0x49c))
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
@@ -1211,7 +1211,7 @@
#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF
#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0
-#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x590 : 0x4a0))
+#define AR_PHY_PAPRD_TRAINER_STAT1(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x590 : 0x4a0))
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0
@@ -1226,7 +1226,7 @@
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9
-#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x594 : 0x4a4))
+#define AR_PHY_PAPRD_TRAINER_STAT2(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x594 : 0x4a4))
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0
@@ -1235,7 +1235,7 @@
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21
-#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x598 : 0x4a8))
+#define AR_PHY_PAPRD_TRAINER_STAT3(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x598 : 0x4a8))
#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff
#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
index bea41df9fbd7..ac32afbf2c97 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_wow.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
@@ -43,7 +43,7 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
/* set rx disable bit */
REG_WRITE(ah, AR_CR, AR_CR_RXD);
- if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
+ if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE(ah), 0, AH_WAIT_TIMEOUT)) {
ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
return;
@@ -61,7 +61,7 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
if (ath9k_hw_mci_is_enabled(ah))
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
- REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_ON_INT);
}
static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
@@ -226,7 +226,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
*/
/* do we need to check the bit value 0x01000000 (7-10) ?? */
- REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
+ REG_RMW(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_WOW_PME_CLR,
AR_PMCTRL_PWR_STATE_D1D3);
/*
@@ -278,12 +278,12 @@ static void ath9k_hw_wow_set_arwr_reg(struct ath_hw *ah)
* to the external PCI-E reset. We also need to tie
* the PCI-E Phy reset to the PCI-E reset.
*/
- wa_reg = REG_READ(ah, AR_WA);
+ wa_reg = REG_READ(ah, AR_WA(ah));
wa_reg &= ~AR_WA_UNTIE_RESET_EN;
wa_reg |= AR_WA_RESET_EN;
wa_reg |= AR_WA_POR_SHORT;
- REG_WRITE(ah, AR_WA, wa_reg);
+ REG_WRITE(ah, AR_WA(ah), wa_reg);
}
void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
@@ -309,11 +309,11 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
* Set and clear WOW_PME_CLEAR for the chip
* to generate next wow signal.
*/
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_HOST_PME_EN |
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_HOST_PME_EN |
AR_PMCTRL_PWR_PM_CTRL_ENA |
AR_PMCTRL_AUX_PWR_DET |
AR_PMCTRL_WOW_PME_CLR);
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR);
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_WOW_PME_CLR);
/*
* Random Backoff.
@@ -414,7 +414,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
/*
* Set the power states appropriately and enable PME.
*/
- host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL);
+ host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL(ah));
host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3 |
AR_PMCTRL_HOST_PME_EN |
AR_PMCTRL_PWR_PM_CTRL_ENA;
@@ -430,7 +430,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3_REAL;
}
- REG_WRITE(ah, AR_PCIE_PM_CTRL, host_pm_ctrl);
+ REG_WRITE(ah, AR_PCIE_PM_CTRL(ah), host_pm_ctrl);
/*
* Enable sequence number generation when asleep.
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 618c9df35fc1..9b393a8f7c3a 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -173,16 +173,16 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
/* connect bt_active to baseband */
- REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah),
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah),
AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
/* Set input mux for bt_active to gpio pin */
if (!AR_SREV_SOC(ah))
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1(ah),
AR_GPIO_INPUT_MUX1_BT_ACTIVE,
btcoex_hw->btactive_gpio);
@@ -197,17 +197,17 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
/* btcoex 3-wire */
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah),
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB));
/* Set input mux for bt_prority_async and
* bt_active_async to GPIO pins */
if (!AR_SREV_SOC(ah)) {
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1(ah),
AR_GPIO_INPUT_MUX1_BT_ACTIVE,
btcoex_hw->btactive_gpio);
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1(ah),
AR_GPIO_INPUT_MUX1_BT_PRIORITY,
btcoex_hw->btpriority_gpio);
}
@@ -404,7 +404,7 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI &&
!AR_SREV_SOC(ah)) {
- REG_RMW(ah, AR_GPIO_PDPU,
+ REG_RMW(ah, AR_GPIO_PDPU(ah),
(0x2 << (btcoex_hw->btactive_gpio * 2)),
(0x3 << (btcoex_hw->btactive_gpio * 2)));
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 0422a33395b7..fb270df75eb2 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -231,17 +231,17 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
if (ah->caldata)
set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_ENABLE_NF);
if (update)
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
else
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
}
int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
@@ -251,7 +251,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
s16 default_nf = ath9k_hw_get_nf_limits(ah, chan)->nominal;
- u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL(ah));
if (ah->caldata)
h = ah->caldata->nfCalHist;
@@ -286,7 +286,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* (or after end rx/tx frame if ongoing)
*/
if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) {
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
REG_RMW_BUFFER_FLUSH(ah);
ENABLE_REG_RMW_BUFFER(ah);
}
@@ -295,11 +295,11 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* Load software filtered NF value into baseband internal minCCApwr
* variable.
*/
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_ENABLE_NF);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
REG_RMW_BUFFER_FLUSH(ah);
/*
@@ -309,7 +309,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* (11n max length 22.1 msec)
*/
for (j = 0; j < 22200; j++) {
- if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) &
AR_PHY_AGC_CONTROL_NF) == 0)
break;
udelay(10);
@@ -321,12 +321,12 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) {
ENABLE_REG_RMW_BUFFER(ah);
if (bb_agc_ctl & AR_PHY_AGC_CONTROL_ENABLE_NF)
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_ENABLE_NF);
if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NO_UPDATE_NF)
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
REG_RMW_BUFFER_FLUSH(ah);
}
@@ -342,7 +342,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (j == 22200) {
ath_dbg(common, ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
- REG_READ(ah, AR_PHY_AGC_CONTROL));
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)));
return -ETIMEDOUT;
}
@@ -410,7 +410,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
struct ieee80211_channel *c = chan->chan;
struct ath9k_hw_cal_data *caldata = ah->caldata;
- if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
+ if (REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF) {
ath_dbg(common, CALIBRATE,
"NF did not complete in calibration window\n");
return false;
@@ -478,7 +478,7 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
*/
if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
ath9k_hw_start_nfcal(ah, true);
- else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
+ else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF))
ath9k_hw_getnf(ah, ah->curchan);
set_bit(NFCAL_INTF, &caldata->cal_flags);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 31390af6c33e..f1cde43fcb55 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -68,8 +68,8 @@
#define AR5416_EEPROM_OFFSET 0x2000
#define AR5416_EEPROM_MAX 0xae0
-#define AR5416_EEPROM_START_ADDR \
- (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
+#define AR5416_EEPROM_START_ADDR(_ah) \
+ (AR_SREV_9100(_ah)) ? 0x1fff1000 : 0x503f1200
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
@@ -110,10 +110,10 @@
#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
-#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \
- ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
-#define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \
- ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+#define OLC_FOR_AR9280_20_LATER(_ah) (AR_SREV_9280_20_OR_LATER(_ah) && \
+ _ah->eep_ops->get_eeprom(_ah, EEP_OL_PWRCTRL))
+#define OLC_FOR_AR9287_10_LATER(_ah) (AR_SREV_9287_11_OR_LATER(_ah) && \
+ _ah->eep_ops->get_eeprom(_ah, EEP_OL_PWRCTRL))
#define EEP_RFSILENT_ENABLED 0x0001
#define EEP_RFSILENT_ENABLED_S 0
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 9729a69d3e2e..7685f8ab371e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -800,7 +800,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
numPiers = AR5416_NUM_5G_CAL_PIERS;
}
- if (OLC_FOR_AR9280_20_LATER && IS_CHAN_2GHZ(chan)) {
+ if (OLC_FOR_AR9280_20_LATER(ah) && IS_CHAN_2GHZ(chan)) {
pRawDataset = pEepData->calPierData2G[0];
ah->initPDADC = ((struct calDataPerFreqOpLoop *)
pRawDataset)->vpdPdg[0][0];
@@ -841,7 +841,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
pRawDataset = pEepData->calPierData5G[i];
- if (OLC_FOR_AR9280_20_LATER) {
+ if (OLC_FOR_AR9280_20_LATER(ah)) {
u8 pcdacIdx;
u8 txPower;
@@ -869,7 +869,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
ENABLE_REGWRITE_BUFFER(ah);
- if (OLC_FOR_AR9280_20_LATER) {
+ if (OLC_FOR_AR9280_20_LATER(ah)) {
REG_WRITE(ah,
AR_PHY_TPCRG5 + regChainOffset,
SM(0x6,
@@ -1203,7 +1203,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rate24mb], 0));
if (IS_CHAN_2GHZ(chan)) {
- if (OLC_FOR_AR9280_20_LATER) {
+ if (OLC_FOR_AR9280_20_LATER(ah)) {
cck_ofdm_delta = 2;
REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
ATH9K_POW_SM(RT_AR_DELTA(rate2s), 24)
@@ -1259,7 +1259,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
ht40PowerIncForPdadc, 8)
| ATH9K_POW_SM(ratesArray[rateHt40_4] +
ht40PowerIncForPdadc, 0));
- if (OLC_FOR_AR9280_20_LATER) {
+ if (OLC_FOR_AR9280_20_LATER(ah)) {
REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
| ATH9K_POW_SM(RT_AR_DELTA(rateExtCck), 16)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 1a2e0c7eeb02..f521dfa2f194 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -561,11 +561,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
memcpy(ptr, skb->data, rx_remain_len);
rx_pkt_len += rx_remain_len;
- hif_dev->rx_remain_len = 0;
skb_put(remain_skb, rx_pkt_len);
skb_pool[pool_index++] = remain_skb;
-
+ hif_dev->remain_skb = NULL;
+ hif_dev->rx_remain_len = 0;
} else {
index = rx_remain_len;
}
@@ -584,16 +584,21 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
pkt_len = get_unaligned_le16(ptr + index);
pkt_tag = get_unaligned_le16(ptr + index + 2);
+ /* It is supposed that if we have an invalid pkt_tag or
+ * pkt_len then the whole input SKB is considered invalid
+ * and dropped; the associated packets already in skb_pool
+ * are dropped, too.
+ */
if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
RX_STAT_INC(hif_dev, skb_dropped);
- return;
+ goto invalid_pkt;
}
if (pkt_len > 2 * MAX_RX_BUF_SIZE) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: invalid pkt_len (%x)\n", pkt_len);
RX_STAT_INC(hif_dev, skb_dropped);
- return;
+ goto invalid_pkt;
}
pad_len = 4 - (pkt_len & 0x3);
@@ -605,11 +610,6 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
if (index > MAX_RX_BUF_SIZE) {
spin_lock(&hif_dev->rx_lock);
- hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
- hif_dev->rx_transfer_len =
- MAX_RX_BUF_SIZE - chk_idx - 4;
- hif_dev->rx_pad_len = pad_len;
-
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
@@ -617,6 +617,12 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
spin_unlock(&hif_dev->rx_lock);
goto err;
}
+
+ hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+ hif_dev->rx_transfer_len =
+ MAX_RX_BUF_SIZE - chk_idx - 4;
+ hif_dev->rx_pad_len = pad_len;
+
skb_reserve(nskb, 32);
RX_STAT_INC(hif_dev, skb_allocated);
@@ -654,6 +660,13 @@ err:
skb_pool[i]->len, USB_WLAN_RX_PIPE);
RX_STAT_INC(hif_dev, skb_completed);
}
+ return;
+invalid_pkt:
+ for (i = 0; i < pool_index; i++) {
+ dev_kfree_skb_any(skb_pool[i]);
+ RX_STAT_INC(hif_dev, skb_dropped);
+ }
+ return;
}
static void ath9k_hif_usb_rx_cb(struct urb *urb)
@@ -1411,8 +1424,6 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
- ath9k_hif_usb_dev_deinit(hif_dev);
- ath9k_destroy_wmi(hif_dev->htc_handle->drv_priv);
ath9k_htc_hw_free(hif_dev->htc_handle);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 07ac88fb1c57..dae3d9c7b640 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -523,13 +523,13 @@ static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
(void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
if (!ath9k_hw_wait(ah,
- AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA(ah),
AR_EEPROM_STATUS_DATA_BUSY |
AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
AH_WAIT_TIMEOUT))
return false;
- *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+ *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA(ah)),
AR_EEPROM_STATUS_DATA_VAL);
return true;
@@ -988,6 +988,8 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
ath9k_deinit_device(htc_handle->drv_priv);
ath9k_stop_wmi(htc_handle->drv_priv);
+ ath9k_hif_usb_dealloc_urbs((struct hif_device_usb *)htc_handle->hif_dev);
+ ath9k_destroy_wmi(htc_handle->drv_priv);
ieee80211_free_hw(htc_handle->drv_priv->hw);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index ca05b07a45e6..fe62ff668f75 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -391,7 +391,7 @@ static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
* HTC Messages are handled directly here and the obtained SKB
* is freed.
*
- * Service messages (Data, WMI) passed to the corresponding
+ * Service messages (Data, WMI) are passed to the corresponding
* endpoint RX handlers, which have to free the SKB.
*/
void ath9k_htc_rx_msg(struct htc_target *htc_handle,
@@ -478,6 +478,8 @@ invalid:
if (endpoint->ep_callbacks.rx)
endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
skb, epid);
+ else
+ goto invalid;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 172081ffe477..5982e0db45f9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -266,7 +266,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
case AR9300_DEVID_AR9330:
ah->hw_version.macVersion = AR_SREV_VERSION_9330;
if (!ah->get_mac_revision) {
- val = REG_READ(ah, AR_SREV);
+ val = REG_READ(ah, AR_SREV(ah));
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
}
return true;
@@ -284,7 +284,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
return true;
}
- srev = REG_READ(ah, AR_SREV);
+ srev = REG_READ(ah, AR_SREV(ah));
if (srev == -1) {
ath_err(ath9k_hw_common(ah),
@@ -292,7 +292,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
return false;
}
- val = srev & AR_SREV_ID;
+ val = srev & AR_SREV_ID(ah);
if (val == 0xFF) {
val = srev;
@@ -601,12 +601,12 @@ static int __ath9k_hw_init(struct ath_hw *ah)
}
/*
- * Read back AR_WA into a permanent copy and set bits 14 and 17.
+ * Read back AR_WA(ah) into a permanent copy and set bits 14 and 17.
* We need to do this to avoid RMW of this register. We cannot
* read the reg when chip is asleep.
*/
if (AR_SREV_9300_20_OR_LATER(ah)) {
- ah->WARegVal = REG_READ(ah, AR_WA);
+ ah->WARegVal = REG_READ(ah, AR_WA(ah));
ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
AR_WA_ASPM_TIMER_BASED_DISABLE);
}
@@ -618,7 +618,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (AR_SREV_9565(ah)) {
ah->WARegVal |= AR_WA_BIT22;
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
}
ath9k_hw_init_defaults(ah);
@@ -814,7 +814,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
AR_CH0_DPLL3_PHASE_SHIFT, 0x1);
- REG_WRITE(ah, AR_RTC_PLL_CONTROL,
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL(ah),
pll | AR_RTC_9300_PLL_BYPASS);
udelay(1000);
@@ -832,7 +832,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
AR_SREV_9561(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
- REG_WRITE(ah, AR_RTC_PLL_CONTROL,
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL(ah),
pll | AR_RTC_9300_SOC_PLL_BYPASS);
udelay(1000);
@@ -911,7 +911,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
if (AR_SREV_9565(ah))
pll |= 0x40000;
- REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL(ah), pll);
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
AR_SREV_9550(ah))
@@ -925,7 +925,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
udelay(RTC_PLL_SETTLE_DELAY);
- REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK(ah), AR_RTC_FORCE_DERIVED_CLK);
}
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
@@ -977,7 +977,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
if (ah->msi_enabled) {
- ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ ah->msi_reg = REG_READ(ah, AR_PCIE_MSI(ah));
ah->msi_reg |= AR_PCIE_MSI_HW_DBI_WR_EN;
ah->msi_reg &= AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
REG_WRITE(ah, AR_INTCFG, msi_cfg);
@@ -987,18 +987,18 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
}
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
- REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE(ah), 0xFFFFFFFF);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), sync_default);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK(ah), 0);
}
REGWRITE_BUFFER_FLUSH(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
- REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
- REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah), 0);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK(ah), 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE(ah), 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK(ah), 0);
}
}
@@ -1341,7 +1341,7 @@ static bool ath9k_hw_ar9330_reset_war(struct ath_hw *ah, int type)
return false;
}
- REG_WRITE(ah, AR_RTC_RESET, 1);
+ REG_WRITE(ah, AR_RTC_RESET(ah), 1);
}
return true;
@@ -1353,26 +1353,26 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
u32 tmpReg;
if (AR_SREV_9100(ah)) {
- REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK,
+ REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK(ah),
AR_RTC_DERIVED_CLK_PERIOD, 1);
- (void)REG_READ(ah, AR_RTC_DERIVED_CLK);
+ (void)REG_READ(ah, AR_RTC_DERIVED_CLK(ah));
}
ENABLE_REGWRITE_BUFFER(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
udelay(10);
}
- REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
if (AR_SREV_9100(ah)) {
rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
} else {
- tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah));
if (AR_SREV_9340(ah))
tmpReg &= AR9340_INTR_SYNC_LOCAL_TIMEOUT;
else
@@ -1381,7 +1381,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (tmpReg) {
u32 val;
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), 0);
val = AR_RC_HOSTIF;
if (!AR_SREV_9300_20_OR_LATER(ah))
@@ -1414,7 +1414,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
REG_CLR_BIT(ah, AR_CFG, AR_CFG_HALT_REQ);
}
- REG_WRITE(ah, AR_RTC_RC, rst_flags);
+ REG_WRITE(ah, AR_RTC_RC(ah), rst_flags);
REGWRITE_BUFFER_FLUSH(ah);
@@ -1425,8 +1425,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
else
udelay(100);
- REG_WRITE(ah, AR_RTC_RC, 0);
- if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
+ REG_WRITE(ah, AR_RTC_RC(ah), 0);
+ if (!ath9k_hw_wait(ah, AR_RTC_RC(ah), AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n");
return false;
}
@@ -1445,17 +1445,17 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
ENABLE_REGWRITE_BUFFER(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
udelay(10);
}
- REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
- REG_WRITE(ah, AR_RTC_RESET, 0);
+ REG_WRITE(ah, AR_RTC_RESET(ah), 0);
REGWRITE_BUFFER_FLUSH(ah);
@@ -1464,11 +1464,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
- REG_WRITE(ah, AR_RTC_RESET, 1);
+ REG_WRITE(ah, AR_RTC_RESET(ah), 1);
if (!ath9k_hw_wait(ah,
- AR_RTC_STATUS,
- AR_RTC_STATUS_M,
+ AR_RTC_STATUS(ah),
+ AR_RTC_STATUS_M(ah),
AR_RTC_STATUS_ON,
AH_WAIT_TIMEOUT)) {
ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n");
@@ -1483,11 +1483,11 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
bool ret = false;
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
udelay(10);
}
- REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah),
AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
if (!ah->reset_power_on)
@@ -1521,7 +1521,7 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
else
reset_type = ATH9K_RESET_COLD;
} else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
- (REG_READ(ah, AR_CR) & AR_CR_RXE))
+ (REG_READ(ah, AR_CR) & AR_CR_RXE(ah)))
reset_type = ATH9K_RESET_COLD;
if (!ath9k_hw_set_reset_reg(ah, reset_type))
@@ -1955,7 +1955,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_settsf64(ah, tsf + tsf_offset);
if (AR_SREV_9280_20_OR_LATER(ah))
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), AR_GPIO_JTAG_DISABLE);
if (!AR_SREV_9300_20_OR_LATER(ah))
ar9002_hw_enable_async_fifo(ah);
@@ -2017,7 +2017,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_set_dma(ah);
if (!ath9k_hw_mci_is_enabled(ah))
- REG_WRITE(ah, AR_OBS, 8);
+ REG_WRITE(ah, AR_OBS(ah), 8);
ENABLE_REG_RMW_BUFFER(ah);
if (ah->config.rx_intr_mitigation) {
@@ -2111,7 +2111,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah)
* Clear the RTC force wake bit to allow the
* mac to go to sleep.
*/
- REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN);
if (ath9k_hw_mci_is_enabled(ah))
udelay(100);
@@ -2121,13 +2121,13 @@ static void ath9k_set_power_sleep(struct ath_hw *ah)
/* Shutdown chip. Active low */
if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
- REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
+ REG_CLR_BIT(ah, AR_RTC_RESET(ah), AR_RTC_RESET_EN);
udelay(2);
}
- /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
+ /* Clear Bit 14 of AR_WA(ah) after putting chip into Full Sleep mode. */
if (AR_SREV_9300_20_OR_LATER(ah))
- REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
/*
@@ -2143,7 +2143,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah)
if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
/* Set WakeOnInterrupt bit; clear ForceWake bit */
- REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah),
AR_RTC_FORCE_WAKE_ON_INT);
} else {
@@ -2163,15 +2163,15 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah)
* Clear the RTC force wake bit to allow the
* mac to go to sleep.
*/
- REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN);
if (ath9k_hw_mci_is_enabled(ah))
udelay(30);
}
- /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
+ /* Clear Bit 14 of AR_WA(ah) after putting chip into Net Sleep mode. */
if (AR_SREV_9300_20_OR_LATER(ah))
- REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
@@ -2179,14 +2179,14 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
u32 val;
int i;
- /* Set Bits 14 and 17 of AR_WA before powering on the chip. */
+ /* Set Bits 14 and 17 of AR_WA(ah) before powering on the chip. */
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
udelay(10);
}
- if ((REG_READ(ah, AR_RTC_STATUS) &
- AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
+ if ((REG_READ(ah, AR_RTC_STATUS(ah)) &
+ AR_RTC_STATUS_M(ah)) == AR_RTC_STATUS_SHUTDOWN) {
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
return false;
}
@@ -2194,10 +2194,10 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
ath9k_hw_init_pll(ah, NULL);
}
if (AR_SREV_9100(ah))
- REG_SET_BIT(ah, AR_RTC_RESET,
+ REG_SET_BIT(ah, AR_RTC_RESET(ah),
AR_RTC_RESET_EN);
- REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE(ah),
AR_RTC_FORCE_WAKE_EN);
if (AR_SREV_9100(ah))
mdelay(10);
@@ -2205,11 +2205,11 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
udelay(50);
for (i = POWER_UP_TIME / 50; i > 0; i--) {
- val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
+ val = REG_READ(ah, AR_RTC_STATUS(ah)) & AR_RTC_STATUS_M(ah);
if (val == AR_RTC_STATUS_ON)
break;
udelay(50);
- REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE(ah),
AR_RTC_FORCE_WAKE_EN);
}
if (i == 0) {
@@ -2701,16 +2701,16 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
u32 gpio_shift, tmp;
if (gpio > 11)
- addr = AR_GPIO_OUTPUT_MUX3;
+ addr = AR_GPIO_OUTPUT_MUX3(ah);
else if (gpio > 5)
- addr = AR_GPIO_OUTPUT_MUX2;
+ addr = AR_GPIO_OUTPUT_MUX2(ah);
else
- addr = AR_GPIO_OUTPUT_MUX1;
+ addr = AR_GPIO_OUTPUT_MUX1(ah);
gpio_shift = (gpio % 6) * 5;
if (AR_SREV_9280_20_OR_LATER(ah) ||
- (addr != AR_GPIO_OUTPUT_MUX1)) {
+ (addr != AR_GPIO_OUTPUT_MUX1(ah))) {
REG_RMW(ah, addr, (type << gpio_shift),
(0x1f << gpio_shift));
} else {
@@ -2754,13 +2754,13 @@ static void ath9k_hw_gpio_cfg_wmac(struct ath_hw *ah, u32 gpio, bool out,
AR7010_GPIO_OE_MASK << gpio_shift);
} else if (AR_SREV_SOC(ah)) {
gpio_set = out ? 1 : 0;
- REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+ REG_RMW(ah, AR_GPIO_OE_OUT(ah), gpio_set << gpio_shift,
gpio_set << gpio_shift);
} else {
gpio_shift = gpio << 1;
gpio_set = out ?
AR_GPIO_OE_OUT_DRV_ALL : AR_GPIO_OE_OUT_DRV_NO;
- REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+ REG_RMW(ah, AR_GPIO_OE_OUT(ah), gpio_set << gpio_shift,
AR_GPIO_OE_OUT_DRV << gpio_shift);
if (out)
@@ -2813,7 +2813,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
u32 val = 0xffffffff;
#define MS_REG_READ(x, y) \
- (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & BIT(y))
+ (MS(REG_READ(ah, AR_GPIO_IN_OUT(ah)), x##_GPIO_IN_VAL) & BIT(y))
WARN_ON(gpio >= ah->caps.num_gpio_pins);
@@ -2829,7 +2829,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
else if (AR_DEVID_7010(ah))
val = REG_READ(ah, AR7010_GPIO_IN) & BIT(gpio);
else if (AR_SREV_9300_20_OR_LATER(ah))
- val = REG_READ(ah, AR_GPIO_IN) & BIT(gpio);
+ val = REG_READ(ah, AR_GPIO_IN(ah)) & BIT(gpio);
else
val = MS_REG_READ(AR, gpio);
} else if (BIT(gpio) & ah->caps.gpio_requested) {
@@ -2853,7 +2853,7 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
if (BIT(gpio) & ah->caps.gpio_mask) {
u32 out_addr = AR_DEVID_7010(ah) ?
- AR7010_GPIO_OUT : AR_GPIO_IN_OUT;
+ AR7010_GPIO_OUT : AR_GPIO_IN_OUT(ah);
REG_RMW(ah, out_addr, val << gpio, BIT(gpio));
} else if (BIT(gpio) & ah->caps.gpio_requested) {
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 58d02c19b6d0..b070403e083f 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -707,7 +707,7 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
/* Wait for rx enable bit to go low */
for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
- if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
+ if ((REG_READ(ah, AR_CR) & AR_CR_RXE(ah)) == 0)
break;
if (!AR_SREV_9300_20_OR_LATER(ah)) {
@@ -762,14 +762,14 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
if (AR_SREV_9100(ah))
return true;
- host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+ host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE(ah));
if (((host_isr & AR_INTR_MAC_IRQ) ||
(host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
(host_isr != AR_INTR_SPURIOUS))
return true;
- host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah));
if ((host_isr & AR_INTR_SYNC_DEFAULT)
&& (host_isr != AR_INTR_SPURIOUS))
return true;
@@ -786,11 +786,11 @@ void ath9k_hw_kill_interrupts(struct ath_hw *ah)
REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
(void) REG_READ(ah, AR_IER);
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE(ah), 0);
+ (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE(ah));
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), 0);
+ (void) REG_READ(ah, AR_INTR_SYNC_ENABLE(ah));
}
}
EXPORT_SYMBOL(ath9k_hw_kill_interrupts);
@@ -824,11 +824,11 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
ath_dbg(common, INTERRUPT, "enable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE(ah), async_mask);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK(ah), async_mask);
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
- REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), sync_default);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK(ah), sync_default);
}
ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
@@ -841,26 +841,26 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
ath_dbg(ath9k_hw_common(ah), INTERRUPT,
"Enabling MSI, msi_mask=0x%X\n", ah->msi_mask);
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, ah->msi_mask);
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, ah->msi_mask);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah), ah->msi_mask);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK(ah), ah->msi_mask);
ath_dbg(ath9k_hw_common(ah), INTERRUPT,
"AR_INTR_PRIO_ASYNC_ENABLE=0x%X, AR_INTR_PRIO_ASYNC_MASK=0x%X\n",
- REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE),
- REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK));
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah)),
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK(ah)));
if (ah->msi_reg == 0)
- ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ ah->msi_reg = REG_READ(ah, AR_PCIE_MSI(ah));
ath_dbg(ath9k_hw_common(ah), INTERRUPT,
"AR_PCIE_MSI=0x%X, ah->msi_reg = 0x%X\n",
- AR_PCIE_MSI, ah->msi_reg);
+ AR_PCIE_MSI(ah), ah->msi_reg);
i = 0;
do {
- REG_WRITE(ah, AR_PCIE_MSI,
+ REG_WRITE(ah, AR_PCIE_MSI(ah),
(ah->msi_reg | AR_PCIE_MSI_ENABLE)
& msi_pend_addr_mask);
- _msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ _msi_reg = REG_READ(ah, AR_PCIE_MSI(ah));
i++;
} while ((_msi_reg & AR_PCIE_MSI_ENABLE) == 0 && i < 200);
@@ -918,8 +918,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
if (ah->msi_enabled) {
ath_dbg(common, INTERRUPT, "Clearing AR_INTR_PRIO_ASYNC_ENABLE\n");
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
- REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah), 0);
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah));
}
ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index a074e23013c5..a09f9d223f3d 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -804,14 +804,14 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
if (!ath9k_hw_wait(ah,
- AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA(ah),
AR_EEPROM_STATUS_DATA_BUSY |
AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
AH_WAIT_TIMEOUT)) {
return false;
}
- *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
+ *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA(ah)),
AR_EEPROM_STATUS_DATA_VAL);
return true;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8983ea6fc727..9f5b8a538071 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -20,7 +20,7 @@
#include "../reg.h"
#define AR_CR 0x0008
-#define AR_CR_RXE (AR_SREV_9300_20_OR_LATER(ah) ? 0x0000000c : 0x00000004)
+#define AR_CR_RXE(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x0000000c : 0x00000004)
#define AR_CR_RXD 0x00000020
#define AR_CR_SWI 0x00000040
@@ -352,10 +352,10 @@
#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
#define AR_ISR_S1_QCU_TXEOL_S 16
-#define AR_ISR_S2_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d0 : 0x00cc)
-#define AR_ISR_S3_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d4 : 0x00d0)
-#define AR_ISR_S4_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d8 : 0x00d4)
-#define AR_ISR_S5_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00dc : 0x00d8)
+#define AR_ISR_S2_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00d0 : 0x00cc)
+#define AR_ISR_S3_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00d4 : 0x00d0)
+#define AR_ISR_S4_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00d8 : 0x00d4)
+#define AR_ISR_S5_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00dc : 0x00d8)
#define AR_DMADBG_0 0x00e0
#define AR_DMADBG_1 0x00e4
#define AR_DMADBG_2 0x00e8
@@ -699,7 +699,7 @@
#define AR_RC_APB 0x00000002
#define AR_RC_HOSTIF 0x00000100
-#define AR_WA (AR_SREV_9340(ah) ? 0x40c4 : 0x4004)
+#define AR_WA(_ah) (AR_SREV_9340(_ah) ? 0x40c4 : 0x4004)
#define AR_WA_BIT6 (1 << 6)
#define AR_WA_BIT7 (1 << 7)
#define AR_WA_BIT23 (1 << 23)
@@ -721,7 +721,7 @@
#define AR_PM_STATE 0x4008
#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000
-#define AR_HOST_TIMEOUT (AR_SREV_9340(ah) ? 0x4008 : 0x4018)
+#define AR_HOST_TIMEOUT(_ah) (AR_SREV_9340(_ah) ? 0x4008 : 0x4018)
#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF
#define AR_HOST_TIMEOUT_APB_CNTR_S 0
#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000
@@ -750,12 +750,12 @@
#define EEPROM_PROTECT_RP_1024_2047 0x4000
#define EEPROM_PROTECT_WP_1024_2047 0x8000
-#define AR_SREV \
- ((AR_SREV_9100(ah)) ? 0x0600 : (AR_SREV_9340(ah) \
+#define AR_SREV(_ah) \
+ ((AR_SREV_9100(_ah)) ? 0x0600 : (AR_SREV_9340(_ah) \
? 0x400c : 0x4020))
-#define AR_SREV_ID \
- ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF)
+#define AR_SREV_ID(_ah) \
+ ((AR_SREV_9100(_ah)) ? 0x00000FFF : 0x000000FF)
#define AR_SREV_VERSION 0x000000F0
#define AR_SREV_VERSION_S 4
#define AR_SREV_REVISION 0x00000007
@@ -1038,11 +1038,11 @@ enum ath_usb_dev {
#define AR_INTR_SPURIOUS 0xFFFFFFFF
-#define AR_INTR_SYNC_CAUSE (AR_SREV_9340(ah) ? 0x4010 : 0x4028)
-#define AR_INTR_SYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4010 : 0x4028)
+#define AR_INTR_SYNC_CAUSE(_ah) (AR_SREV_9340(_ah) ? 0x4010 : 0x4028)
+#define AR_INTR_SYNC_CAUSE_CLR(_ah) (AR_SREV_9340(_ah) ? 0x4010 : 0x4028)
-#define AR_INTR_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4014 : 0x402c)
+#define AR_INTR_SYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4014 : 0x402c)
#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000
#define AR_INTR_SYNC_ENABLE_GPIO_S 18
@@ -1084,18 +1084,18 @@ enum {
};
-#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
+#define AR_INTR_ASYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x4018 : 0x4030)
#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
#define AR_INTR_ASYNC_MASK_GPIO_S 18
#define AR_INTR_ASYNC_MASK_MCI 0x00000080
#define AR_INTR_ASYNC_MASK_MCI_S 7
-#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
+#define AR_INTR_SYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x401c : 0x4034)
#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
#define AR_INTR_SYNC_MASK_GPIO_S 18
-#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
-#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_CLR(_ah) (AR_SREV_9340(_ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE(_ah) (AR_SREV_9340(_ah) ? 0x4020 : 0x4038)
#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080
#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \
AR_INTR_ASYNC_CAUSE_MCI)
@@ -1105,13 +1105,13 @@ enum {
#define AR_INTR_ASYNC_ENABLE_MCI_S 7
-#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
+#define AR_INTR_ASYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4024 : 0x403c)
#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
#define AR_INTR_ASYNC_ENABLE_GPIO_S 18
#define AR_PCIE_SERDES 0x4040
#define AR_PCIE_SERDES2 0x4044
-#define AR_PCIE_PM_CTRL (AR_SREV_9340(ah) ? 0x4004 : 0x4014)
+#define AR_PCIE_PM_CTRL(_ah) (AR_SREV_9340(_ah) ? 0x4004 : 0x4014)
#define AR_PCIE_PM_CTRL_ENA 0x00080000
#define AR_PCIE_PHY_REG3 0x18c08
@@ -1156,7 +1156,7 @@ enum {
#define AR9580_GPIO_MASK 0x0000F4FF
#define AR7010_GPIO_MASK 0x0000FFFF
-#define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048)
+#define AR_GPIO_IN_OUT(_ah) (AR_SREV_9340(_ah) ? 0x4028 : 0x4048)
#define AR_GPIO_IN_VAL 0x0FFFC000
#define AR_GPIO_IN_VAL_S 14
#define AR928X_GPIO_IN_VAL 0x000FFC00
@@ -1170,12 +1170,12 @@ enum {
#define AR7010_GPIO_IN_VAL 0x0000FFFF
#define AR7010_GPIO_IN_VAL_S 0
-#define AR_GPIO_IN (AR_SREV_9340(ah) ? 0x402c : 0x404c)
+#define AR_GPIO_IN(_ah) (AR_SREV_9340(_ah) ? 0x402c : 0x404c)
#define AR9300_GPIO_IN_VAL 0x0001FFFF
#define AR9300_GPIO_IN_VAL_S 0
-#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
+#define AR_GPIO_OE_OUT(_ah) (AR_SREV_9340(_ah) ? 0x4030 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4050 : 0x404c))
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
#define AR_GPIO_OE_OUT_DRV_LOW 0x1
@@ -1197,13 +1197,13 @@ enum {
#define AR7010_GPIO_INT_MASK 0x52024
#define AR7010_GPIO_FUNCTION 0x52028
-#define AR_GPIO_INTR_POL (AR_SREV_9340(ah) ? 0x4038 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050))
+#define AR_GPIO_INTR_POL(_ah) (AR_SREV_9340(_ah) ? 0x4038 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4058 : 0x4050))
#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
#define AR_GPIO_INTR_POL_VAL_S 0
-#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9340(ah) ? 0x403c : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054))
+#define AR_GPIO_INPUT_EN_VAL(_ah) (AR_SREV_9340(_ah) ? 0x403c : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x405c : 0x4054))
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
@@ -1221,15 +1221,15 @@ enum {
#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
#define AR_GPIO_JTAG_DISABLE 0x00020000
-#define AR_GPIO_INPUT_MUX1 (AR_SREV_9340(ah) ? 0x4040 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058))
+#define AR_GPIO_INPUT_MUX1(_ah) (AR_SREV_9340(_ah) ? 0x4040 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4060 : 0x4058))
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8
-#define AR_GPIO_INPUT_MUX2 (AR_SREV_9340(ah) ? 0x4044 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c))
+#define AR_GPIO_INPUT_MUX2(_ah) (AR_SREV_9340(_ah) ? 0x4044 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4064 : 0x405c))
#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
#define AR_GPIO_INPUT_MUX2_CLK25_S 0
#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
@@ -1237,18 +1237,18 @@ enum {
#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
-#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9340(ah) ? 0x4048 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060))
-#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9340(ah) ? 0x404c : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064))
-#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9340(ah) ? 0x4050 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068))
+#define AR_GPIO_OUTPUT_MUX1(_ah) (AR_SREV_9340(_ah) ? 0x4048 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4068 : 0x4060))
+#define AR_GPIO_OUTPUT_MUX2(_ah) (AR_SREV_9340(_ah) ? 0x404c : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x406c : 0x4064))
+#define AR_GPIO_OUTPUT_MUX3(_ah) (AR_SREV_9340(_ah) ? 0x4050 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4070 : 0x4068))
-#define AR_INPUT_STATE (AR_SREV_9340(ah) ? 0x4054 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c))
+#define AR_INPUT_STATE(_ah) (AR_SREV_9340(_ah) ? 0x4054 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4074 : 0x406c))
-#define AR_EEPROM_STATUS_DATA (AR_SREV_9340(ah) ? 0x40c8 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c))
+#define AR_EEPROM_STATUS_DATA(_ah) (AR_SREV_9340(_ah) ? 0x40c8 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4084 : 0x407c))
#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
#define AR_EEPROM_STATUS_DATA_VAL_S 0
#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
@@ -1256,13 +1256,13 @@ enum {
#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
-#define AR_OBS (AR_SREV_9340(ah) ? 0x405c : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080))
+#define AR_OBS(_ah) (AR_SREV_9340(_ah) ? 0x405c : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4088 : 0x4080))
-#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088)
+#define AR_GPIO_PDPU(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4090 : 0x4088)
-#define AR_PCIE_MSI (AR_SREV_9340(ah) ? 0x40d8 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094))
+#define AR_PCIE_MSI(_ah) (AR_SREV_9340(_ah) ? 0x40d8 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x40a4 : 0x4094))
#define AR_PCIE_MSI_ENABLE 0x00000001
#define AR_PCIE_MSI_HW_DBI_WR_EN 0x02000000
#define AR_PCIE_MSI_HW_INT_PENDING_ADDR 0xFFA0C1FF /* bits 8..11: value must be 0x5060 */
@@ -1272,10 +1272,10 @@ enum {
#define AR_INTR_PRIO_RXLP 0x00000002
#define AR_INTR_PRIO_RXHP 0x00000004
-#define AR_INTR_PRIO_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4088 : 0x40c4)
-#define AR_INTR_PRIO_ASYNC_MASK (AR_SREV_9340(ah) ? 0x408c : 0x40c8)
-#define AR_INTR_PRIO_SYNC_MASK (AR_SREV_9340(ah) ? 0x4090 : 0x40cc)
-#define AR_INTR_PRIO_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4094 : 0x40d4)
+#define AR_INTR_PRIO_SYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4088 : 0x40c4)
+#define AR_INTR_PRIO_ASYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x408c : 0x40c8)
+#define AR_INTR_PRIO_SYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x4090 : 0x40cc)
+#define AR_INTR_PRIO_ASYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4094 : 0x40d4)
#define AR_ENT_OTP 0x40d8
#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
#define AR_ENT_OTP_49GHZ_DISABLE 0x00100000
@@ -1339,8 +1339,8 @@ enum {
#define AR_RTC_9160_PLL_CLKSEL_S 14
#define AR_RTC_BASE 0x00020000
-#define AR_RTC_RC \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000)
+#define AR_RTC_RC(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000)
#define AR_RTC_RC_M 0x00000003
#define AR_RTC_RC_MAC_WARM 0x00000001
#define AR_RTC_RC_MAC_COLD 0x00000002
@@ -1357,8 +1357,8 @@ enum {
#define AR_RTC_REG_CONTROL1 0x700c
#define AR_RTC_REG_CONTROL1_SWREG_PROGRAM 0x00000001
-#define AR_RTC_PLL_CONTROL \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
+#define AR_RTC_PLL_CONTROL(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
#define AR_RTC_PLL_CONTROL2 0x703c
@@ -1378,15 +1378,15 @@ enum {
#define PLL4_MEAS_DONE 0x8
#define SQSUM_DVC_MASK 0x007ffff8
-#define AR_RTC_RESET \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
+#define AR_RTC_RESET(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
#define AR_RTC_RESET_EN (0x00000001)
-#define AR_RTC_STATUS \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044)
+#define AR_RTC_STATUS(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044)
-#define AR_RTC_STATUS_M \
- ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f)
+#define AR_RTC_STATUS_M(_ah) \
+ ((AR_SREV_9100(_ah)) ? 0x0000003f : 0x0000000f)
#define AR_RTC_PM_STATUS_M 0x0000000f
@@ -1395,32 +1395,32 @@ enum {
#define AR_RTC_STATUS_SLEEP 0x00000004
#define AR_RTC_STATUS_WAKEUP 0x00000008
-#define AR_RTC_SLEEP_CLK \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
+#define AR_RTC_SLEEP_CLK(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
#define AR_RTC_FORCE_DERIVED_CLK 0x2
#define AR_RTC_FORCE_SWREG_PRD 0x00000004
-#define AR_RTC_FORCE_WAKE \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
+#define AR_RTC_FORCE_WAKE(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
#define AR_RTC_FORCE_WAKE_EN 0x00000001
#define AR_RTC_FORCE_WAKE_ON_INT 0x00000002
-#define AR_RTC_INTR_CAUSE \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050)
+#define AR_RTC_INTR_CAUSE(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050)
-#define AR_RTC_INTR_ENABLE \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054)
+#define AR_RTC_INTR_ENABLE(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054)
-#define AR_RTC_INTR_MASK \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
+#define AR_RTC_INTR_MASK(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
#define AR_RTC_KEEP_AWAKE 0x7034
/* RTC_DERIVED_* - only for AR9100 */
-#define AR_RTC_DERIVED_CLK \
- (AR_SREV_9100(ah) ? (AR_RTC_BASE + 0x0038) : 0x7038)
+#define AR_RTC_DERIVED_CLK(_ah) \
+ (AR_SREV_9100(_ah) ? (AR_RTC_BASE + 0x0038) : 0x7038)
#define AR_RTC_DERIVED_CLK_PERIOD 0x0000fffe
#define AR_RTC_DERIVED_CLK_PERIOD_S 1
@@ -2114,7 +2114,7 @@ enum {
#define AR9300_SM_BASE 0xa200
#define AR9002_PHY_AGC_CONTROL 0x9860
#define AR9003_PHY_AGC_CONTROL AR9300_SM_BASE + 0xc4
-#define AR_PHY_AGC_CONTROL (AR_SREV_9300_20_OR_LATER(ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
+#define AR_PHY_AGC_CONTROL(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
#define AR_PHY_AGC_CONTROL_CAL 0x00000001 /* do internal calibration */
#define AR_PHY_AGC_CONTROL_NF 0x00000002 /* do noise-floor calibration */
#define AR_PHY_AGC_CONTROL_OFFSET_CAL 0x00000800 /* allow offset calibration */
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index 58c0ab01771b..e1def77591c6 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -29,9 +29,9 @@ static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
ath9k_ps_wakeup(sc);
- REG_RMW_FIELD(ah, AR_PHY_TEST, AR_PHY_TEST_BBB_OBS_SEL, 1);
- REG_CLR_BIT(ah, AR_PHY_TEST, AR_PHY_TEST_RX_OBS_SEL_BIT5);
- REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS, AR_PHY_TEST_CTL_RX_OBS_SEL, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TEST(ah), AR_PHY_TEST_BBB_OBS_SEL, 1);
+ REG_CLR_BIT(ah, AR_PHY_TEST(ah), AR_PHY_TEST_RX_OBS_SEL_BIT5);
+ REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS(ah), AR_PHY_TEST_CTL_RX_OBS_SEL, 0);
for (i = 0, j = 0; i < buf_size; i++) {
v1 = REG_READ(ah, AR_PHY_TST_ADC) & 0xffff;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index f315c54bd3ac..19345b8f7bfd 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -341,6 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
if (!time_left) {
ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
+ wmi->last_seq_id = 0;
mutex_unlock(&wmi->op_mutex);
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 39abb59d8771..ef9a8e0b75e6 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1216,7 +1216,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
txpower -= 2 * power_offset;
}
- if (OLC_FOR_AR9280_20_LATER && is_cck)
+ if (OLC_FOR_AR9280_20_LATER(ah) && is_cck)
txpower -= 2;
txpower = max(txpower, 0);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b115902eb475..a9690ec4c850 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -7928,13 +7928,10 @@ exit:
}
static s32
-cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+brcmf_set_channel(struct brcmf_cfg80211_info *cfg, struct ieee80211_channel *chan)
{
u16 chspec = 0;
int err = 0;
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
if (chan->flags & IEEE80211_CHAN_DISABLED)
@@ -7994,7 +7991,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
/* Setting current channel to the requested channel */
info->filled = 0;
- if (cfg80211_set_channel(wiphy, ndev, info->channel, NL80211_CHAN_HT20))
+ if (brcmf_set_channel(cfg, info->channel))
return 0;
/* Disable mpc */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index e90a30808c22..0e1fa3f0dea2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -383,7 +383,7 @@ struct brcmf_cfg80211_info {
struct brcmf_tlv {
u8 id;
u8 len;
- u8 data[1];
+ u8 data[];
};
static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 121893bbaa1d..8073f31be27d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -726,17 +726,17 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43664_CHIP_ID:
case BRCM_CC_43666_CHIP_ID:
return 0x200000;
+ case BRCM_CC_4355_CHIP_ID:
case BRCM_CC_4359_CHIP_ID:
return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
case BRCM_CC_4364_CHIP_ID:
case CY_CC_4373_CHIP_ID:
return 0x160000;
case CY_CC_43752_CHIP_ID:
+ case BRCM_CC_4377_CHIP_ID:
return 0x170000;
case BRCM_CC_4378_CHIP_ID:
return 0x352000;
- case CY_CC_89459_CHIP_ID:
- return ((ci->pub.chiprev < 9) ? 0x180000 : 0x160000);
default:
brcmf_err("unknown chip: %s\n", ci->pub.name);
break;
@@ -1426,8 +1426,8 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
addr = CORE_CC_REG(base, sr_control1);
reg = chip->ops->read32(chip->ctx, addr);
return reg != 0;
+ case BRCM_CC_4355_CHIP_ID:
case CY_CC_4373_CHIP_ID:
- case CY_CC_89459_CHIP_ID:
/* explicitly check SR engine enable bit */
addr = CORE_CC_REG(base, sr_control0);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 4a309e5a5707..f235beaddddb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -299,6 +299,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err);
goto done;
}
+ buf[sizeof(buf) - 1] = '\0';
ptr = (char *)buf;
strsep(&ptr, "\n");
@@ -319,15 +320,17 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
if (err) {
brcmf_dbg(TRACE, "retrieving clmver failed, %d\n", err);
} else {
+ buf[sizeof(buf) - 1] = '\0';
clmver = (char *)buf;
- /* store CLM version for adding it to revinfo debugfs file */
- memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
/* Replace all newline/linefeed characters with space
* character
*/
strreplace(clmver, '\n', ' ');
+ /* store CLM version for adding it to revinfo debugfs file */
+ memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
+
brcmf_dbg(INFO, "CLM version = %s\n", clmver);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 83ea251cfcec..f599d5f896e8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -336,6 +336,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
bphy_err(drvr, "%s: failed to expand headroom\n",
brcmf_ifname(ifp));
atomic_inc(&drvr->bus_if->stats.pktcow_failed);
+ dev_kfree_skb(skb);
goto done;
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index cec53f934940..45fbcbdc7d9e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -347,8 +347,11 @@ brcmf_msgbuf_alloc_pktid(struct device *dev,
count++;
} while (count < pktids->array_size);
- if (count == pktids->array_size)
+ if (count == pktids->array_size) {
+ dma_unmap_single(dev, *physaddr, skb->len - data_offset,
+ pktids->direction);
return -ENOMEM;
+ }
array[*idx].data_offset = data_offset;
array[*idx].physaddr = *physaddr;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index e975d10e6009..d4492d02e4ea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1466,8 +1466,8 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
ETH_ALEN);
memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
- memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
- mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
+ memcpy(mgmt_frame->u.body, frame, mgmt_frame_len);
+ mgmt_frame_len += offsetof(struct ieee80211_mgmt, u.body);
freq = ieee80211_channel_to_frequency(ch.control_ch_num,
ch.band == BRCMU_CHAN_BAND_2G ?
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index b67f6d0810b6..a9b9b2dc62d4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -51,18 +51,21 @@ enum brcmf_pcie_state {
BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
+BRCMF_FW_CLM_DEF(4355, "brcmfmac4355-pcie");
+BRCMF_FW_CLM_DEF(4355C1, "brcmfmac4355c1-pcie");
BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-pcie");
BRCMF_FW_CLM_DEF(43570, "brcmfmac43570-pcie");
BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
-BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
+BRCMF_FW_CLM_DEF(4364B2, "brcmfmac4364b2-pcie");
+BRCMF_FW_CLM_DEF(4364B3, "brcmfmac4364b3-pcie");
BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
+BRCMF_FW_CLM_DEF(4377B3, "brcmfmac4377b3-pcie");
BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie");
-BRCMF_FW_DEF(4355, "brcmfmac89459-pcie");
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
@@ -78,13 +81,16 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
+ BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0x000007FF, 4355),
+ BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0xFFFFF800, 4355C1), /* rev ID 12/C2 seen */
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
- BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
+ BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0x0000000F, 4364B2), /* 3 */
+ BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFF0, 4364B3), /* 4 */
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
@@ -92,8 +98,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
+ BRCMF_FW_ENTRY(BRCM_CC_4377_CHIP_ID, 0xFFFFFFFF, 4377B3), /* revision ID 4 */
BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFFF, 4378B1), /* revision ID 3 */
- BRCMF_FW_ENTRY(CY_CC_89459_CHIP_ID, 0xFFFFFFFF, 4355),
};
#define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
@@ -1994,6 +2000,17 @@ static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
int ret;
switch (devinfo->ci->chip) {
+ case BRCM_CC_4355_CHIP_ID:
+ coreid = BCMA_CORE_CHIPCOMMON;
+ base = 0x8c0;
+ words = 0xb2;
+ break;
+ case BRCM_CC_4364_CHIP_ID:
+ coreid = BCMA_CORE_CHIPCOMMON;
+ base = 0x8c0;
+ words = 0x1a0;
+ break;
+ case BRCM_CC_4377_CHIP_ID:
case BRCM_CC_4378_CHIP_ID:
coreid = BCMA_CORE_GCI;
base = 0x1120;
@@ -2590,6 +2607,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID, WCC),
@@ -2600,7 +2618,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, BCA),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID, BCA),
@@ -2609,9 +2627,10 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43596_DEVICE_ID, CYW),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4377_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(CY_PCIE_89459_DEVICE_ID, CYW),
- BRCMF_PCIE_DEVICE(CY_PCIE_89459_RAW_DEVICE_ID, CYW),
+
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index f4939cf62767..896615f57952 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -37,6 +37,7 @@
#define BRCM_CC_4350_CHIP_ID 0x4350
#define BRCM_CC_43525_CHIP_ID 43525
#define BRCM_CC_4354_CHIP_ID 0x4354
+#define BRCM_CC_4355_CHIP_ID 0x4355
#define BRCM_CC_4356_CHIP_ID 0x4356
#define BRCM_CC_43566_CHIP_ID 43566
#define BRCM_CC_43567_CHIP_ID 43567
@@ -51,12 +52,12 @@
#define BRCM_CC_43664_CHIP_ID 43664
#define BRCM_CC_43666_CHIP_ID 43666
#define BRCM_CC_4371_CHIP_ID 0x4371
+#define BRCM_CC_4377_CHIP_ID 0x4377
#define BRCM_CC_4378_CHIP_ID 0x4378
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
#define CY_CC_43439_CHIP_ID 43439
#define CY_CC_43752_CHIP_ID 43752
-#define CY_CC_89459_CHIP_ID 0x4355
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
@@ -72,6 +73,7 @@
#define BRCM_PCIE_4350_DEVICE_ID 0x43a3
#define BRCM_PCIE_4354_DEVICE_ID 0x43df
#define BRCM_PCIE_4354_RAW_DEVICE_ID 0x4354
+#define BRCM_PCIE_4355_DEVICE_ID 0x43dc
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
@@ -90,9 +92,9 @@
#define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4
#define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5
#define BRCM_PCIE_4371_DEVICE_ID 0x440d
+#define BRCM_PCIE_43596_DEVICE_ID 0x4415
+#define BRCM_PCIE_4377_DEVICE_ID 0x4488
#define BRCM_PCIE_4378_DEVICE_ID 0x4425
-#define CY_PCIE_89459_DEVICE_ID 0x4415
-#define CY_PCIE_89459_RAW_DEVICE_ID 0x4355
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ca802af8cddc..d382f2017325 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3427,7 +3427,7 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
dma_unmap_single(&priv->pci_dev->dev,
rxq->pool[i].dma_addr,
IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb(rxq->pool[i].skb);
+ dev_kfree_skb_irq(rxq->pool[i].skb);
rxq->pool[i].skb = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -11383,9 +11383,14 @@ static int ipw_wdev_init(struct net_device *dev)
set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
/* With that information in place, we can now register the wiphy... */
- if (wiphy_register(wdev->wiphy))
- rc = -EIO;
+ rc = wiphy_register(wdev->wiphy);
+ if (rc)
+ goto out;
+
+ return 0;
out:
+ kfree(priv->ieee->a_band.channels);
+ kfree(priv->ieee->bg_band.channels);
return rc;
}
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index d7e99d50b287..9eaf5ec133f9 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3372,10 +3372,12 @@ static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log);
*
*****************************************************************************/
-static void
+static int
il3945_setup_deferred_work(struct il_priv *il)
{
il->workqueue = create_singlethread_workqueue(DRV_NAME);
+ if (!il->workqueue)
+ return -ENOMEM;
init_waitqueue_head(&il->wait_command_queue);
@@ -3392,6 +3394,8 @@ il3945_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_setup(&il->irq_tasklet, il3945_irq_tasklet);
+
+ return 0;
}
static void
@@ -3712,7 +3716,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
- il3945_setup_deferred_work(il);
+ err = il3945_setup_deferred_work(il);
+ if (err)
+ goto out_remove_sysfs;
+
il3945_setup_handlers(il);
il_power_initialize(il);
@@ -3724,7 +3731,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = il3945_setup_mac(il);
if (err)
- goto out_remove_sysfs;
+ goto out_destroy_workqueue;
il_dbgfs_register(il, DRV_NAME);
@@ -3733,9 +3740,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-out_remove_sysfs:
+out_destroy_workqueue:
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
+out_remove_sysfs:
sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
out_release_irq:
free_irq(il->pci_dev->irq, il);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 721b4042b4bf..0a4aa3c678c1 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -4020,7 +4020,7 @@ il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
D_INFO("Initialization Alive received.\n");
- memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ memcpy(&il->card_alive_init, &pkt->u.raw,
sizeof(struct il_init_alive_resp));
pwork = &il->init_alive_start;
} else {
@@ -6211,10 +6211,12 @@ out:
mutex_unlock(&il->mutex);
}
-static void
+static int
il4965_setup_deferred_work(struct il_priv *il)
{
il->workqueue = create_singlethread_workqueue(DRV_NAME);
+ if (!il->workqueue)
+ return -ENOMEM;
init_waitqueue_head(&il->wait_command_queue);
@@ -6233,6 +6235,8 @@ il4965_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet);
+
+ return 0;
}
static void
@@ -6618,7 +6622,10 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_msi;
}
- il4965_setup_deferred_work(il);
+ err = il4965_setup_deferred_work(il);
+ if (err)
+ goto out_free_irq;
+
il4965_setup_handlers(il);
/*********************************************
@@ -6656,6 +6663,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_destroy_workqueue:
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
+out_free_irq:
free_irq(il->pci_dev->irq, il);
out_disable_msi:
pci_disable_msi(il->pci_dev);
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 341c17fe2af4..96002121bb8b 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -5174,7 +5174,7 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
/* new association get rid of ibss beacon skb */
- dev_kfree_skb(il->beacon_skb);
+ dev_consume_skb_irq(il->beacon_skb);
il->beacon_skb = NULL;
il->timestamp = 0;
@@ -5293,7 +5293,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_irqsave(&il->lock, flags);
- dev_kfree_skb(il->beacon_skb);
+ dev_consume_skb_irq(il->beacon_skb);
il->beacon_skb = skb;
timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index ec6198f1b38c..3bdd6774716d 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 72
+#define IWL_22000_UCODE_API_MAX 74
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 0b052c2e563a..28c87a480246 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -153,6 +153,7 @@ enum iwl_legacy_cmds {
/**
* @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd
+ * response in &struct iwl_tx_path_flush_cmd_rsp
*/
TXPATH_FLUSH = 0x1e,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 9263413ee06f..8b38a0073077 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -83,7 +83,7 @@ enum iwl_data_path_subcmd_ids {
MONITOR_NOTIF = 0xF4,
/**
- * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
+ * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data or &struct iwl_rx_no_data_ver_3
*/
RX_NO_DATA_NOTIF = 0xF5,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 74a01888715b..1c4e84932058 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -273,7 +273,7 @@ enum iwl_rx_mpdu_mac_info {
};
/* TSF overload low dword */
-enum iwl_rx_phy_data0 {
+enum iwl_rx_phy_he_data0 {
/* info type: HE any */
IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001,
IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002,
@@ -289,6 +289,25 @@ enum iwl_rx_phy_data0 {
IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000,
};
+/* TSF overload low dword */
+enum iwl_rx_phy_eht_data0 {
+ /* info type: EHT any */
+ /* 1 bits reserved */
+ IWL_RX_PHY_DATA0_EHT_UPLINK = BIT(1),
+ IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK = 0x000000fc,
+ IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK = 0x00000f00,
+ IWL_RX_PHY_DATA0_EHT_PS160 = BIT(12),
+ IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK = 0x000fe000,
+ IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM = BIT(20),
+ IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK = 0x00600000,
+ IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG = BIT(23),
+ IWL_RX_PHY_DATA0_EHT_BW320_SLOT = BIT(24),
+ IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK = BIT(25),
+ IWL_RX_PHY_DATA0_EHT_PHY_VER = 0x1c000000,
+ /* 2 bits reserved */
+ IWL_RX_PHY_DATA0_EHT_DELIM_EOF = BIT(31),
+};
+
enum iwl_rx_phy_info_type {
IWL_RX_PHY_INFO_TYPE_NONE = 0,
IWL_RX_PHY_INFO_TYPE_CCK = 1,
@@ -301,19 +320,26 @@ enum iwl_rx_phy_info_type {
IWL_RX_PHY_INFO_TYPE_HE_TB = 8,
IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9,
IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10,
+ IWL_RX_PHY_INFO_TYPE_EHT_MU = 11,
+ IWL_RX_PHY_INFO_TYPE_EHT_TB = 12,
+ IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT = 13,
+ IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT = 14,
};
/* TSF overload high dword */
-enum iwl_rx_phy_data1 {
+enum iwl_rx_phy_common_data1 {
/*
* check this first - if TSF overload is set,
* see &enum iwl_rx_phy_info_type
*/
IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000,
- /* info type: HT/VHT/HE any */
+ /* info type: HT/VHT/HE/EHT any */
IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000,
+};
+/* TSF overload high dword For HE rates*/
+enum iwl_rx_phy_he_data1 {
/* info type: HE MU/MU-EXT */
IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001,
IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e,
@@ -329,8 +355,23 @@ enum iwl_rx_phy_data1 {
IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e,
};
+/* TSF overload high dword For EHT-MU/TB rates*/
+enum iwl_rx_phy_eht_data1 {
+ /* info type: EHT-MU */
+ IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2 = 0x0000001f,
+ /* info type: EHT-TB */
+ IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE = BIT(0),
+ IWL_RX_PHY_DATA1_EHT_TB_LOW_SS = 0x0000001e,
+
+ /* info type: EHT any */
+ /* number of EHT-LTF symbols 0 - 1 EHT-LTF, 1 - 2 EHT-LTFs, 2 - 4 EHT-LTFs,
+ * 3 - 6 EHT-LTFs, 4 - 8 EHT-LTFs */
+ IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM = 0x000000e0,
+ IWL_RX_PHY_DATA1_EHT_RU_ALLOC = 0x0000ff00,
+};
+
/* goes into Metadata DW 7 */
-enum iwl_rx_phy_data2 {
+enum iwl_rx_phy_he_data2 {
/* info type: HE MU-EXT */
/* the a1/a2/... is what the PHY/firmware calls the values */
IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */
@@ -346,7 +387,7 @@ enum iwl_rx_phy_data2 {
};
/* goes into Metadata DW 8 */
-enum iwl_rx_phy_data3 {
+enum iwl_rx_phy_he_data3 {
/* info type: HE MU-EXT */
IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */
IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */
@@ -355,7 +396,7 @@ enum iwl_rx_phy_data3 {
};
/* goes into Metadata DW 4 high 16 bits */
-enum iwl_rx_phy_data4 {
+enum iwl_rx_phy_he_he_data4 {
/* info type: HE MU-EXT */
IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001,
IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002,
@@ -366,6 +407,51 @@ enum iwl_rx_phy_data4 {
IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600,
};
+/* goes into Metadata DW 7 */
+enum iwl_rx_phy_eht_data2 {
+ /* info type: EHT-MU-EXT */
+ /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_0_OUT */
+ IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A1 = 0x000001ff,
+ IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A2 = 0x0003fe00,
+ IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A3 = 0x01fc0000,
+
+ /* info type: EHT-TB-EXT */
+ IWL_RX_PHY_DATA2_EHT_TB_EXT_TRIG_SIGA1 = 0xffffffff,
+};
+
+/* goes into Metadata DW 8 */
+enum iwl_rx_phy_eht_data3 {
+ /* info type: EHT-MU-EXT */
+ /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_1_OUT */
+ IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B1 = 0x000001ff,
+ IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B2 = 0x0003fe00,
+ IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B3 = 0x01fc0000,
+};
+
+/* goes into Metadata DW 4 */
+enum iwl_rx_phy_eht_data4 {
+ /* info type: EHT-MU-EXT */
+ /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_2_OUT */
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_C1 = 0x000001ff,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_C2 = 0x0003fe00,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_C3 = 0x01fc0000,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS = 0x18000000,
+};
+
+/* goes into Metadata DW 16 */
+enum iwl_rx_phy_data5 {
+ /* info type: EHT any */
+ IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP = 0x00000003,
+ /* info type: EHT-TB */
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1 = 0x0000003c,
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2 = 0x000003c0,
+ /* info type: EHT-MU */
+ IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE = 0x0000007c,
+ IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR = 0x0003ff80,
+ IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA = 0x001c0000,
+ IWL_RX_PHY_DATA5_EHT_MU_SPATIAL_CONF_USR_FIELD = 0x0fe00000,
+};
+
/**
* struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
*/
@@ -440,7 +526,9 @@ struct iwl_rx_mpdu_desc_v1 {
/**
* @phy_data1: valid only if
* %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
- * see &enum iwl_rx_phy_data1.
+ * see &enum iwl_rx_phy_common_data1 or
+ * &enum iwl_rx_phy_he_data1 or
+ * &enum iwl_rx_phy_eht_data1.
*/
__le32 phy_data1;
};
@@ -540,11 +628,18 @@ struct iwl_rx_mpdu_desc_v3 {
__le32 phy_data1;
};
};
- /* DW16 & DW17 */
+ /* DW16 */
+ /**
+ * @phy_data5: valid only if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
+ * see &enum iwl_rx_phy_data5.
+ */
+ __le32 phy_data5;
+ /* DW17 */
/**
* @reserved: reserved
*/
- __le32 reserved[2];
+ __le32 reserved[1];
} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
RX_MPDU_RES_START_API_S_VER_5 */
@@ -639,12 +734,14 @@ struct iwl_rx_mpdu_desc {
#define RX_NO_DATA_INFO_ERR_UNSUPPORTED_RATE 2
#define RX_NO_DATA_INFO_ERR_NO_DELIM 3
#define RX_NO_DATA_INFO_ERR_BAD_MAC_HDR 4
+#define RX_NO_DATA_INFO_LOW_ENERGY 5
#define RX_NO_DATA_FRAME_TIME_POS 0
#define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS)
#define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000
#define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000
+#define RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK 0x00f00000
/**
* struct iwl_rx_no_data - RX no data descriptor
@@ -654,7 +751,8 @@ struct iwl_rx_mpdu_desc {
* @on_air_rise_time: GP2 during on air rise
* @fr_time: frame time
* @rate: rate/mcs of frame
- * @phy_info: &enum iwl_rx_phy_data0 and &enum iwl_rx_phy_info_type
+ * @phy_info: &enum iwl_rx_phy_he_data0 or &enum iwl_rx_phy_eht_data0
+ * based on &enum iwl_rx_phy_info_type
* @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type.
* for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT
* for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT
@@ -670,6 +768,33 @@ struct iwl_rx_no_data {
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
RX_NO_DATA_NTFY_API_S_VER_2 */
+/**
+ * struct iwl_rx_no_data_ver_3 - RX no data descriptor
+ * @info: 7:0 frame type, 15:8 RX error type
+ * @rssi: 7:0 energy chain-A,
+ * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel
+ * @on_air_rise_time: GP2 during on air rise
+ * @fr_time: frame time
+ * @rate: rate/mcs of frame
+ * @phy_info: &enum iwl_rx_phy_eht_data0 and &enum iwl_rx_phy_info_type
+ * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type.
+ * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT
+ * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT
+ * for EHT: OFDM_RX_VECTOR_USIG_A1_OUT, OFDM_RX_VECTOR_USIG_A2_OUT,
+ * OFDM_RX_VECTOR_EHT_OUT, OFDM_RX_VECTOR_EHT_USER_FIELD_OUT
+ */
+struct iwl_rx_no_data_ver_3 {
+ __le32 info;
+ __le32 rssi;
+ __le32 on_air_rise_time;
+ __le32 fr_time;
+ __le32 rate;
+ __le32 phy_info[2];
+ __le32 rx_vec[4];
+} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
+ RX_NO_DATA_NTFY_API_S_VER_2
+ RX_NO_DATA_NTFY_API_S_VER_3 */
+
struct iwl_frame_release {
u8 baid;
u8 reserved;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 6d408cd0f517..0b6f694cf30d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2021-2022 Intel Corporation
*/
#include "iwl-drv.h"
@@ -246,6 +246,63 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
return data;
}
+static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_data,
+ struct iwl_trans *trans)
+{
+ if (common_step_data->revision != 1)
+ return -EINVAL;
+
+ trans->mbx_addr_0_step = (u32)common_step_data->revision |
+ (u32)common_step_data->cnvi_eq_channel << 8 |
+ (u32)common_step_data->cnvr_eq_channel << 16 |
+ (u32)common_step_data->radio1 << 24;
+ trans->mbx_addr_1_step = (u32)common_step_data->radio2;
+ return 0;
+}
+
+void iwl_uefi_get_step_table(struct iwl_trans *trans)
+{
+ struct uefi_cnv_common_step_data *data;
+ unsigned long package_size;
+ efi_status_t status;
+ int ret;
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return;
+
+ /* TODO: we hardcode a maximum length here, because reading
+ * from the UEFI is not working. To implement this properly,
+ * we have to call efivar_entry_size().
+ */
+ package_size = IWL_HARDCODED_STEP_SIZE;
+
+ data = kmalloc(package_size, GFP_KERNEL);
+ if (!data)
+ return;
+
+ status = efi.get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
+ IWL_DEBUG_FW(trans,
+ "STEP UEFI variable not found 0x%lx\n", status);
+ goto out_free;
+ }
+
+ IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n",
+ package_size);
+
+ ret = iwl_uefi_step_parse(data, trans);
+ if (ret < 0)
+ IWL_DEBUG_FW(trans, "Cannot read STEP tables. rev is invalid\n");
+
+out_free:
+ kfree(data);
+}
+IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table);
+
#ifdef CONFIG_ACPI
static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
struct iwl_fw_runtime *fwrt)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 09d2a971b3a0..17089bc74cf9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2021-2022 Intel Corporation
*/
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
@@ -8,6 +8,7 @@
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
+#define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP"
/*
* TODO: we have these hardcoded values that the caller must pass,
@@ -18,6 +19,7 @@
#define IWL_HARDCODED_PNVM_SIZE 4096
#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768
#define IWL_HARDCODED_SGOM_SIZE 339
+#define IWL_HARDCODED_STEP_SIZE 6
struct pnvm_sku_package {
u8 rev;
@@ -32,6 +34,15 @@ struct uefi_cnv_wlan_sgom_data {
u8 offset_map[IWL_HARDCODED_SGOM_SIZE - 1];
} __packed;
+struct uefi_cnv_common_step_data {
+ u8 revision;
+ u8 step_mode;
+ u8 cnvi_eq_channel;
+ u8 cnvr_eq_channel;
+ u8 radio1;
+ u8 radio2;
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -40,6 +51,7 @@ struct uefi_cnv_wlan_sgom_data {
#ifdef CONFIG_EFI
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
+void iwl_uefi_get_step_table(struct iwl_trans *trans);
#else /* CONFIG_EFI */
static inline
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
@@ -52,6 +64,11 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline
+void iwl_uefi_get_step_table(struct iwl_trans *trans)
+{
+}
#endif /* CONFIG_EFI */
#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index b84884034c74..3f7278014009 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -141,12 +141,27 @@ struct iwl_prph_scratch_uefi_cfg {
} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
/*
+ * struct iwl_prph_scratch_step_cfg - prph scratch step configuration
+ * @mbx_addr_0: [0:7] revision,
+ * [8:15] cnvi_to_cnvr length,
+ * [16:23] cnvr_to_cnvi channel length,
+ * [24:31] radio1 reserved
+ * @mbx_addr_1: [0:7] radio2 reserved
+ */
+
+struct iwl_prph_scratch_step_cfg {
+ __le32 mbx_addr_0;
+ __le32 mbx_addr_1;
+} __packed;
+
+/*
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
* @version: version information of context info and HW
* @control: control flags of FH configurations
* @pnvm_cfg: ror configuration
* @hwm_cfg: hwm configuration
* @rbd_cfg: default RX queue configuration
+ * @step_cfg: step configuration
*/
struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_version version;
@@ -155,6 +170,7 @@ struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_hwm_cfg hwm_cfg;
struct iwl_prph_scratch_rbd_cfg rbd_cfg;
struct iwl_prph_scratch_uefi_cfg reduce_power_cfg;
+ struct iwl_prph_scratch_step_cfg step_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
/*
@@ -165,7 +181,7 @@ struct iwl_prph_scratch_ctrl_cfg {
*/
struct iwl_prph_scratch {
struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
- __le32 reserved[12];
+ __le32 reserved[10];
struct iwl_context_info_dram dram;
} __packed; /* PERIPH_SCRATCH_S */
@@ -265,5 +281,6 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
const void *data, u32 len);
int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
const void *data, u32 len);
-
+int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans,
+ u32 mbx_addr_0_step, u32 mbx_addr_1_step);
#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ab7065c93826..4c977ba9cd85 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -163,7 +163,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const struct iwl_cfg *cfg = drv->trans->cfg;
- char tag[8];
if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
(drv->trans->hw_rev_step != SILICON_B_STEP &&
@@ -174,13 +173,10 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -EINVAL;
}
- if (first) {
+ if (first)
drv->fw_index = cfg->ucode_api_max;
- sprintf(tag, "%d", drv->fw_index);
- } else {
+ else
drv->fw_index--;
- sprintf(tag, "%d", drv->fw_index);
- }
if (drv->fw_index < cfg->ucode_api_min) {
IWL_ERR(drv, "no suitable firmware found!\n");
@@ -200,8 +196,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
- snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
- cfg->fw_name_pre, tag);
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%d.ucode",
+ cfg->fw_name_pre, drv->fw_index);
IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 479a518c89a1..9aced3e44bc2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1001,6 +1001,8 @@ struct iwl_trans_txqs {
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
* @iwl_trans_txqs: transport tx queues data.
+ * @mbx_addr_0_step: step address data 0
+ * @mbx_addr_1_step: step address data 1
*/
struct iwl_trans {
bool csme_own;
@@ -1057,6 +1059,8 @@ struct iwl_trans {
const char *name;
struct iwl_trans_txqs txqs;
+ u32 mbx_addr_0_step;
+ u32 mbx_addr_1_step;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index f9d11935ed97..67dfb77fedf7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -788,7 +788,7 @@ static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
if (mei->amt_enabled)
iwl_mei_set_init_conf(mei);
else if (iwl_mei_cache.ops)
- iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
schedule_work(&mei->netdev_work);
@@ -829,7 +829,7 @@ static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
*/
mei->csme_taking_ownership = true;
- iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
} else {
iwl_mei_send_sap_msg(cldev,
SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
@@ -1774,7 +1774,7 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
if (mei->amt_enabled)
iwl_mei_send_sap_msg(mei->cldev,
SAP_MSG_NOTIF_WIFIDR_UP);
- ops->rfkill(priv, mei->link_prot_state, false);
+ ops->rfkill(priv, mei->link_prot_state);
}
}
ret = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 78d8b37eb71a..3779ac040ba0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -438,13 +438,6 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static inline char *iwl_dbgfs_is_match(char *name, char *buf)
-{
- int len = strlen(name);
-
- return !strncmp(name, buf, len) ? buf + len : NULL;
-}
-
static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1ce9450e5add..85b99316d029 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -324,12 +324,12 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos,
"Use geographic profile %d\n", tbl_idx);
pos += scnprintf(buf + pos, bufsz - pos,
- "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n",
+ "2.4GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n",
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0],
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1],
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max);
pos += scnprintf(buf + pos, bufsz - pos,
- "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n",
+ "5.2GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n",
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0],
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1],
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max);
@@ -1069,7 +1069,7 @@ iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "A");
if (mvm->scan_rx_ant & ANT_B)
pos += scnprintf(buf + pos, bufsz - pos, "B");
- pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+ pos += scnprintf(buf + pos, bufsz - pos, " (%x)\n", mvm->scan_rx_ant);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 6eee3d0b2157..05f3136b1c43 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1206,7 +1206,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
}
IWL_DEBUG_INFO(mvm, "Range response received\n");
- IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n",
+ IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
mvm->ftm_initiator.req->cookie, num_of_aps);
for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
@@ -1298,7 +1298,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
- IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n",
+ IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n",
fw_ap->rttConfidence);
iwl_mvm_debug_range_resp(mvm, i, &result);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 5273ade71117..565522466eba 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -5116,6 +5116,9 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
case RATE_MCS_CHAN_WIDTH_160:
rinfo->bw = RATE_INFO_BW_160;
break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rinfo->bw = RATE_INFO_BW_320;
+ break;
}
if (format == RATE_MCS_CCK_MSK ||
@@ -5176,6 +5179,10 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
switch (format) {
+ case RATE_MCS_EHT_MSK:
+ /* TODO: GI/LTF/RU. How does the firmware encode them? */
+ rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
+ break;
case RATE_MCS_HE_MSK:
gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ebe6d9c4ccaf..f4e9446d9dc2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1128,6 +1128,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_get_acpi_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
+ iwl_uefi_get_step_table(trans);
mvm->init_status = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 97b67270f384..549dbe0be223 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -10,37 +10,11 @@
#include "mvm.h"
#include "fw-api.h"
-static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb)
-{
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- u8 *data = skb->data;
-
- /* Alignment concerns */
- BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
- BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
- BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) % 4);
- BUILD_BUG_ON(sizeof(struct ieee80211_vendor_radiotap) % 4);
-
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
- data += sizeof(struct ieee80211_radiotap_he);
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
- data += sizeof(struct ieee80211_radiotap_he_mu);
- if (rx_status->flag & RX_FLAG_RADIOTAP_LSIG)
- data += sizeof(struct ieee80211_radiotap_lsig);
- if (rx_status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
- struct ieee80211_vendor_radiotap *radiotap = (void *)data;
-
- data += sizeof(*radiotap) + radiotap->len + radiotap->pad;
- }
-
- return data;
-}
-
static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
int queue, struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta;
- struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
struct iwl_mvm_key_pn *ptk_pn;
int res;
@@ -179,6 +153,10 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (unlikely(headlen < hdrlen))
return -EINVAL;
+ /* Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
+ */
+ skb_set_mac_header(skb, skb->len);
skb_put_data(skb, hdr, hdrlen);
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
@@ -936,7 +914,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
struct iwl_mvm_sta *mvm_sta;
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
@@ -1346,6 +1324,10 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
case IWL_RX_PHY_INFO_TYPE_HT:
case IWL_RX_PHY_INFO_TYPE_VHT_SU:
case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
return;
case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
@@ -1690,6 +1672,9 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
case RATE_MCS_CHAN_WIDTH_160:
rx_status->bw = RATE_INFO_BW_160;
break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rx_status->bw = RATE_INFO_BW_320;
+ break;
}
/* must be before L-SIG data */
@@ -1726,6 +1711,9 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
rx_status->he_dcm =
!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
break;
+ case RATE_MCS_EHT_MSK:
+ rx_status->encoding = RX_ENC_EHT;
+ break;
}
switch (format) {
@@ -1736,6 +1724,7 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
break;
case RATE_MCS_VHT_MSK:
case RATE_MCS_HE_MSK:
+ case RATE_MCS_EHT_MSK:
rx_status->nss =
u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
@@ -1747,10 +1736,11 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
rx_status->rate_idx = rate;
- if ((rate < 0 || rate > 0xFF) && net_ratelimit()) {
- IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n",
- rate_n_flags, rx_status->band);
+ if ((rate < 0 || rate > 0xFF)) {
rx_status->rate_idx = 0;
+ if (net_ratelimit())
+ IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band);
}
break;
@@ -2065,7 +2055,7 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
{
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_no_data *desc = (void *)pkt->data;
+ struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data;
u32 rssi;
u32 info_type;
struct ieee80211_sta *sta = NULL;
@@ -2101,6 +2091,18 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ RX_NO_DATA_NOTIF, 0) >= 3) {
+ if (unlikely(iwl_rx_packet_payload_len(pkt) <
+ sizeof(struct iwl_rx_no_data_ver_3)))
+ /* invalid len for ver 3 */
+ return;
+ } else {
+ if (format == RATE_MCS_EHT_MSK)
+ /* no support for EHT before version 3 API */
+ return;
+ }
+
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -2136,6 +2138,16 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
+ /* no more radio tap info should be put after this point.
+ *
+ * We mark it as mac header, for upper layers to know where
+ * all radio tap header ends.
+ *
+ * Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
+ */
+ skb_set_mac_header(skb, skb->len);
+
/*
* Override the nss from the rx_vec since the rate_n_flags has
* only 2 bits for the nss which gives a max of 4 ss but there
@@ -2152,6 +2164,10 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
break;
+ case RATE_MCS_EHT_MSK:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[2],
+ RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
}
rcu_read_lock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index fadaa683a416..9813d7fa1800 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1107,8 +1107,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_lock(&mvmsta->lock);
/* nullfunc frames should go to the MGMT queue regardless of QOS,
- * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
- * assignment of MGMT TID
+ * the conditions of !ieee80211_is_qos_nullfunc(fc) and
+ * !ieee80211_is_data_qos(fc) keep the default assignment of MGMT TID
*/
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
tid = ieee80211_get_tid(hdr);
@@ -1133,7 +1133,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
/* update the tx_cmd hdr as it was already copied */
tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
}
- } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) {
+ } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc) &&
+ !ieee80211_is_nullfunc(fc)) {
tid = IWL_TID_NON_QOS;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 75fd386b048e..cb60ba40fe97 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -136,6 +136,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
&control_flags);
prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+ /* initialize the Step equalizer data */
+ prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
+ prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step);
+
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
if (ret)
@@ -343,3 +347,4 @@ int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
return 0;
}
+
diff --git a/drivers/net/wireless/intersil/orinoco/hermes.c b/drivers/net/wireless/intersil/orinoco/hermes.c
index 256946552742..4888286727ff 100644
--- a/drivers/net/wireless/intersil/orinoco/hermes.c
+++ b/drivers/net/wireless/intersil/orinoco/hermes.c
@@ -38,6 +38,7 @@
* under either the MPL or the GPL.
*/
+#include <linux/net.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
index 0aea35c9c11c..4fcca08e50de 100644
--- a/drivers/net/wireless/intersil/orinoco/hw.c
+++ b/drivers/net/wireless/intersil/orinoco/hw.c
@@ -931,6 +931,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFAUTHENTICATION_AGERE,
auth_flag);
+ if (err)
+ return err;
}
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFWEPENABLED_AGERE,
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c57c8903b7c0..4cc4eaf80b14 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2034,7 +2034,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct ieee80211_channel *chan)
{
struct mac80211_hwsim_data *data = hw->priv;
- u32 _pid = READ_ONCE(data->wmediumd);
+ u32 _portid = READ_ONCE(data->wmediumd);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
@@ -2045,8 +2045,8 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, chan);
- if (_pid || hwsim_virtio_enabled)
- return mac80211_hwsim_tx_frame_nl(hw, skb, _pid, chan);
+ if (_portid || hwsim_virtio_enabled)
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, chan);
data->tx_pkts++;
data->tx_bytes += skb->len;
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 3e065cbb0af9..b700c213d10c 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -416,10 +416,20 @@ static int lbs_add_cf_param_tlv(u8 *tlv)
static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
{
- size_t tlv_len;
+ struct mrvl_ie_data *wpatlv = (struct mrvl_ie_data *)tlv;
+ const struct element *wpaie;
+
+ /* Find the first RSN or WPA IE to use */
+ wpaie = cfg80211_find_elem(WLAN_EID_RSN, ie, ie_len);
+ if (!wpaie)
+ wpaie = cfg80211_find_vendor_elem(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ie, ie_len);
+ if (!wpaie || wpaie->datalen > 128)
+ return 0;
/*
- * We need just convert an IE to an TLV. IEs use u8 for the header,
+ * Convert the found IE to a TLV. IEs use u8 for the header,
* u8 type
* u8 len
* u8[] data
@@ -428,14 +438,47 @@ static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
* __le16 len
* u8[] data
*/
- *tlv++ = *ie++;
- *tlv++ = 0;
- tlv_len = *tlv++ = *ie++;
- *tlv++ = 0;
- while (tlv_len--)
- *tlv++ = *ie++;
- /* the TLV is two bytes larger than the IE */
- return ie_len + 2;
+ wpatlv->header.type = cpu_to_le16(wpaie->id);
+ wpatlv->header.len = cpu_to_le16(wpaie->datalen);
+ memcpy(wpatlv->data, wpaie->data, wpaie->datalen);
+
+ /* Return the total number of bytes added to the TLV buffer */
+ return sizeof(struct mrvl_ie_header) + wpaie->datalen;
+}
+
+/* Add WPS enrollee TLV
+ */
+#define LBS_MAX_WPS_ENROLLEE_TLV_SIZE \
+ (sizeof(struct mrvl_ie_header) \
+ + 256)
+
+static int lbs_add_wps_enrollee_tlv(u8 *tlv, const u8 *ie, size_t ie_len)
+{
+ struct mrvl_ie_data *wpstlv = (struct mrvl_ie_data *)tlv;
+ const struct element *wpsie;
+
+ /* Look for a WPS IE and add it to the probe request */
+ wpsie = cfg80211_find_vendor_elem(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPS,
+ ie, ie_len);
+ if (!wpsie)
+ return 0;
+
+ /* Convert the WPS IE to a TLV. The IE looks like this:
+ * u8 type (WLAN_EID_VENDOR_SPECIFIC)
+ * u8 len
+ * u8[] data
+ * but the TLV will look like this instead:
+ * __le16 type (TLV_TYPE_WPS_ENROLLEE)
+ * __le16 len
+ * u8[] data
+ */
+ wpstlv->header.type = cpu_to_le16(TLV_TYPE_WPS_ENROLLEE);
+ wpstlv->header.len = cpu_to_le16(wpsie->datalen);
+ memcpy(wpstlv->data, wpsie->data, wpsie->datalen);
+
+ /* Return the total number of bytes added to the TLV buffer */
+ return sizeof(struct mrvl_ie_header) + wpsie->datalen;
}
/*
@@ -664,14 +707,15 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
/*
- * Our scan command contains a TLV, consting of a SSID TLV, a channel list
- * TLV and a rates TLV. Determine the maximum size of them:
+ * Our scan command contains a TLV, consisting of a SSID TLV, a channel list
+ * TLV, a rates TLV, and an optional WPS IE. Determine the maximum size of them:
*/
#define LBS_SCAN_MAX_CMD_SIZE \
(sizeof(struct cmd_ds_802_11_scan) \
+ LBS_MAX_SSID_TLV_SIZE \
+ LBS_MAX_CHANNEL_LIST_TLV_SIZE \
- + LBS_MAX_RATES_TLV_SIZE)
+ + LBS_MAX_RATES_TLV_SIZE \
+ + LBS_MAX_WPS_ENROLLEE_TLV_SIZE)
/*
* Assumes priv->scan_req is initialized and valid
@@ -720,6 +764,11 @@ static void lbs_scan_worker(struct work_struct *work)
/* add rates TLV */
tlv += lbs_add_supported_rates_tlv(tlv);
+ /* add optional WPS enrollee TLV */
+ if (priv->scan_req->ie && priv->scan_req->ie_len)
+ tlv += lbs_add_wps_enrollee_tlv(tlv, priv->scan_req->ie,
+ priv->scan_req->ie_len);
+
if (priv->scan_channel < priv->scan_req->n_channels) {
cancel_delayed_work(&priv->scan_work);
if (netif_running(priv->dev))
@@ -2106,6 +2155,7 @@ int lbs_cfg_register(struct lbs_private *priv)
int ret;
wdev->wiphy->max_scan_ssids = 1;
+ wdev->wiphy->max_scan_ie_len = 256;
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wdev->wiphy->interface_modes =
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index cb515c5584c1..74cb7551f427 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -48,7 +48,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv,
/* Free Tx and Rx packets */
spin_lock_irqsave(&priv->driver_lock, flags);
- kfree_skb(priv->currenttxskb);
+ dev_kfree_skb_irq(priv->currenttxskb);
priv->currenttxskb = NULL;
priv->tx_pending_len = 0;
spin_unlock_irqrestore(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 32fdc4150b60..2240b4db8c03 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -637,7 +637,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
priv->resp_len[i] = (recvlength - MESSAGE_HEADER_LEN);
memcpy(priv->resp_buf[i], recvbuff + MESSAGE_HEADER_LEN,
priv->resp_len[i]);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
lbs_notify_command_response(priv, i);
spin_unlock_irqrestore(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 8f5220cee112..78e8b5aecec0 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -216,7 +216,7 @@ int lbs_stop_iface(struct lbs_private *priv)
spin_lock_irqsave(&priv->driver_lock, flags);
priv->iface_running = false;
- kfree_skb(priv->currenttxskb);
+ dev_kfree_skb_irq(priv->currenttxskb);
priv->currenttxskb = NULL;
priv->tx_pending_len = 0;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -869,6 +869,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL);
if (ret) {
pr_err("Out of memory allocating event FIFO buffer\n");
+ lbs_free_cmd_buffer(priv);
goto out;
}
diff --git a/drivers/net/wireless/marvell/libertas/types.h b/drivers/net/wireless/marvell/libertas/types.h
index cd4ceb6f885d..bad38d312d0d 100644
--- a/drivers/net/wireless/marvell/libertas/types.h
+++ b/drivers/net/wireless/marvell/libertas/types.h
@@ -93,6 +93,7 @@ union ieee_phy_param_set {
#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
#define TLV_TYPE_SNR_HIGH (PROPRIETARY_TLV_BASE_ID + 23)
+#define TLV_TYPE_WPS_ENROLLEE (PROPRIETARY_TLV_BASE_ID + 27)
#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
#define TLV_TYPE_MESH_ID (PROPRIETARY_TLV_BASE_ID + 37)
#define TLV_TYPE_OLD_MESH_ID (PROPRIETARY_TLV_BASE_ID + 291)
@@ -105,23 +106,23 @@ struct mrvl_ie_header {
struct mrvl_ie_data {
struct mrvl_ie_header header;
- u8 Data[1];
+ u8 data[];
} __packed;
struct mrvl_ie_rates_param_set {
struct mrvl_ie_header header;
- u8 rates[1];
+ u8 rates[];
} __packed;
struct mrvl_ie_ssid_param_set {
struct mrvl_ie_header header;
- u8 ssid[1];
+ u8 ssid[];
} __packed;
struct mrvl_ie_wildcard_ssid_param_set {
struct mrvl_ie_header header;
- u8 MaxSsidlength;
- u8 ssid[1];
+ u8 maxssidlength;
+ u8 ssid[];
} __packed;
struct chanscanmode {
@@ -146,7 +147,7 @@ struct chanscanparamset {
struct mrvl_ie_chanlist_param_set {
struct mrvl_ie_header header;
- struct chanscanparamset chanscanparam[1];
+ struct chanscanparamset chanscanparam[];
} __packed;
struct mrvl_ie_cf_param_set {
@@ -164,12 +165,12 @@ struct mrvl_ie_ds_param_set {
struct mrvl_ie_rsn_param_set {
struct mrvl_ie_header header;
- u8 rsnie[1];
+ u8 rsnie[];
} __packed;
struct mrvl_ie_tsf_timestamp {
struct mrvl_ie_header header;
- __le64 tsftable[1];
+ __le64 tsftable[];
} __packed;
/* v9 and later firmware only */
@@ -220,7 +221,7 @@ struct led_pin {
struct mrvl_ie_ledgpio {
struct mrvl_ie_header header;
- struct led_pin ledpin[1];
+ struct led_pin ledpin[];
} __packed;
struct led_bhv {
@@ -233,7 +234,7 @@ struct led_bhv {
struct mrvl_ie_ledbhv {
struct mrvl_ie_header header;
- struct led_bhv ledbhv[1];
+ struct led_bhv ledbhv[];
} __packed;
/*
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 75b5319d033f..1750f5e93de2 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -613,7 +613,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
spin_lock_irqsave(&priv->driver_lock, flags);
memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
recvlength - MESSAGE_HEADER_LEN);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
lbtf_cmd_response_rx(priv);
spin_unlock_irqrestore(&priv->driver_lock, flags);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 6a9d7bc1f41e..b0c40a776a2e 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -292,6 +292,6 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
mutex_lock(&priv->wdev.mtx);
- cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0);
+ cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0, 0);
mutex_unlock(&priv->wdev.mtx);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 4af57e6d4393..90e401100898 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -878,7 +878,7 @@ mwifiex_send_delba_txbastream_tbl(struct mwifiex_private *priv, u8 tid)
*/
void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
{
- u8 i;
+ u8 i, j;
u32 tx_win_size;
struct mwifiex_private *priv;
@@ -909,8 +909,8 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
if (tx_win_size != priv->add_ba_param.tx_win_size) {
if (!priv->media_connected)
continue;
- for (i = 0; i < MAX_NUM_TID; i++)
- mwifiex_send_delba_txbastream_tbl(priv, i);
+ for (j = 0; j < MAX_NUM_TID; j++)
+ mwifiex_send_delba_txbastream_tbl(priv, j);
}
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index a04b66284af4..391793a16adc 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -33,7 +33,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev.iftype, 0, NULL, NULL);
+ priv->wdev.iftype, 0, NULL, NULL, false);
while (!skb_queue_empty(&list)) {
struct rx_packet_hdr *rx_hdr;
diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig
index 2b4ff2b78a7e..b182f7155d66 100644
--- a/drivers/net/wireless/marvell/mwifiex/Kconfig
+++ b/drivers/net/wireless/marvell/mwifiex/Kconfig
@@ -10,13 +10,14 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997"
+ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8978/SD8987/SD8997"
depends on MWIFIEX && MMC
select FW_LOADER
select WANT_DEV_COREDUMP
help
This adds support for wireless adapters based on Marvell
- 8786/8787/8797/8887/8897/8977/8987/8997 chipsets with SDIO interface.
+ 8786/8787/8797/8887/8897/8977/8978/8987/8997 chipsets with
+ SDIO interface. SD8978 is also known as NXP IW416.
If you choose to build it as a module, it will be called
mwifiex_sdio.
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index d3339d67e7a0..3756aa247e77 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1607,6 +1607,11 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
api_rev->major_ver,
api_rev->minor_ver);
break;
+ case FW_HOTFIX_VER_ID:
+ mwifiex_dbg(adapter, INFO,
+ "Firmware hotfix version %d\n",
+ api_rev->major_ver);
+ break;
default:
mwifiex_dbg(adapter, FATAL,
"Unknown api_id: %d\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index b4f945a549f7..f2168fac95ed 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -41,7 +41,7 @@ struct mwifiex_fw_header {
struct mwifiex_fw_data {
struct mwifiex_fw_header header;
__le32 seq_num;
- u8 data[1];
+ u8 data[];
} __packed;
struct mwifiex_fw_dump_header {
@@ -641,7 +641,7 @@ struct mwifiex_ie_types_header {
struct mwifiex_ie_types_data {
struct mwifiex_ie_types_header header;
- u8 data[1];
+ u8 data[];
} __packed;
#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
@@ -794,12 +794,12 @@ struct mwifiex_ie_types_chan_band_list_param_set {
struct mwifiex_ie_types_rates_param_set {
struct mwifiex_ie_types_header header;
- u8 rates[1];
+ u8 rates[];
} __packed;
struct mwifiex_ie_types_ssid_param_set {
struct mwifiex_ie_types_header header;
- u8 ssid[1];
+ u8 ssid[];
} __packed;
struct mwifiex_ie_types_num_probes {
@@ -907,7 +907,7 @@ struct mwifiex_ie_types_tdls_idle_timeout {
struct mwifiex_ie_types_rsn_param_set {
struct mwifiex_ie_types_header header;
- u8 rsn_ie[1];
+ u8 rsn_ie[];
} __packed;
#define KEYPARAMSET_FIXED_LEN 6
@@ -1048,6 +1048,7 @@ enum API_VER_ID {
FW_API_VER_ID = 2,
UAP_FW_API_VER_ID = 3,
CHANRPT_API_VER_ID = 4,
+ FW_HOTFIX_VER_ID = 5,
};
struct hw_spec_api_rev {
@@ -1433,7 +1434,7 @@ struct mwifiex_tdls_stop_cs_params {
struct host_cmd_ds_tdls_config {
__le16 tdls_action;
- u8 tdls_data[1];
+ u8 tdls_data[];
} __packed;
struct mwifiex_chan_desc {
@@ -1574,13 +1575,13 @@ struct ie_body {
struct host_cmd_ds_802_11_scan {
u8 bss_mode;
u8 bssid[ETH_ALEN];
- u8 tlv_buffer[1];
+ u8 tlv_buffer[];
} __packed;
struct host_cmd_ds_802_11_scan_rsp {
__le16 bss_descript_size;
u8 number_of_sets;
- u8 bss_desc_and_tlv_buffer[1];
+ u8 bss_desc_and_tlv_buffer[];
} __packed;
struct host_cmd_ds_802_11_scan_ext {
@@ -1596,7 +1597,7 @@ struct mwifiex_ie_types_bss_mode {
struct mwifiex_ie_types_bss_scan_rsp {
struct mwifiex_ie_types_header header;
u8 bssid[ETH_ALEN];
- u8 frame_body[1];
+ u8 frame_body[];
} __packed;
struct mwifiex_ie_types_bss_scan_info {
@@ -1733,7 +1734,7 @@ struct mwifiex_ie_types_local_pwr_constraint {
struct mwifiex_ie_types_wmm_param_set {
struct mwifiex_ie_types_header header;
- u8 wmm_ie[1];
+ u8 wmm_ie[];
} __packed;
struct mwifiex_ie_types_mgmt_frame {
@@ -1959,7 +1960,7 @@ struct host_cmd_tlv_wep_key {
struct mwifiex_ie_types_header header;
u8 key_index;
u8 is_default;
- u8 key[1];
+ u8 key[];
};
struct host_cmd_tlv_auth_type {
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index b8dc3b5c9ad9..c64e24c10ea6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -263,7 +263,7 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
0x68, 0x69, 0x6a},
};
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd89xx = {
.start_rd_port = 0,
.start_wr_port = 0,
.base_0_reg = 0xF8,
@@ -394,6 +394,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
.can_ext_scan = true,
};
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
+ .firmware_sdiouart = SD8978_SDIOUART_FW_NAME,
+ .reg = &mwifiex_reg_sd89xx,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.firmware = SD8997_DEFAULT_FW_NAME,
.firmware_sdiouart = SD8997_SDIOUART_FW_NAME,
@@ -428,7 +444,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
.firmware = SD8987_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8987,
+ .reg = &mwifiex_reg_sd89xx,
.max_ports = 32,
.mp_agg_pkt_limit = 16,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
@@ -480,8 +496,11 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
};
static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+ { .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" },
+ { .compatible = "marvell,sd8978" },
{ .compatible = "marvell,sd8997" },
+ { .compatible = "nxp,iw416" },
{ }
};
@@ -919,6 +938,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977_WLAN),
.driver_data = (unsigned long)&mwifiex_sdio_sd8977},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8978_WLAN),
+ .driver_data = (unsigned long)&mwifiex_sdio_sd8978},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987_WLAN),
.driver_data = (unsigned long)&mwifiex_sdio_sd8987},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997_WLAN),
@@ -3163,6 +3184,7 @@ MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8978_SDIOUART_FW_NAME);
MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_SDIOUART_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 3a24bb48b299..ae94c172310f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -25,6 +25,7 @@
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
+#define SD8978_SDIOUART_FW_NAME "mrvl/sdiouartiw416_combo_v0.bin"
#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
#define SD8997_SDIOUART_FW_NAME "mrvl/sdiouart8997_combo_v4.bin"
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index d7f90a0eb21e..18152c16c36f 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config MT76_CORE
tristate
+ select PAGE_POOL
config MT76_LEDS
bool
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index 11b0b3d62f29..57fbcc83e074 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -112,7 +112,7 @@ mt76_register_debugfs_fops(struct mt76_phy *phy,
if (!dir)
return NULL;
- debugfs_create_u8("led_pin", 0600, dir, &dev->led_pin);
+ debugfs_create_u8("led_pin", 0600, dir, &phy->leds.pin);
debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, dev, fops);
debugfs_create_file_unsafe("napi_threaded", 0600, dir, dev,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 06161815c180..da281cd1d36f 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -165,7 +165,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
local_bh_enable();
}
-static void
+void
mt76_free_pending_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
@@ -173,11 +173,12 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
local_bh_disable();
while ((t = __mt76_get_rxwi(dev)) != NULL) {
if (t->ptr)
- skb_free_frag(t->ptr);
+ mt76_put_page_pool_buf(t->ptr, false);
kfree(t);
}
local_bh_enable();
}
+EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
@@ -218,8 +219,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
- if ((q->flags & MT_QFLAG_WED) &&
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+ if (mt76_queue_is_wed_rx(q)) {
txwi = mt76_get_rxwi(dev);
if (!txwi)
return -ENOMEM;
@@ -401,8 +401,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (info)
*info = le32_to_cpu(desc->info);
- if ((q->flags & MT_QFLAG_WED) &&
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+ if (mt76_queue_is_wed_rx(q)) {
u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
le32_to_cpu(desc->buf1));
struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
@@ -410,9 +409,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (!t)
return NULL;
- dma_unmap_single(dev->dma_dev, t->dma_addr,
- SKB_WITH_OVERHEAD(q->buf_size),
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
buf = t->ptr;
t->dma_addr = 0;
@@ -429,9 +428,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
} else {
buf = e->buf;
e->buf = NULL;
- dma_unmap_single(dev->dma_dev, e->dma_addr[0],
- SKB_WITH_OVERHEAD(q->buf_size),
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
}
return buf;
@@ -582,26 +581,12 @@ free_skb:
return ret;
}
-static struct page_frag_cache *
-mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q)
-{
- struct page_frag_cache *rx_page = &q->rx_page;
-
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
- if ((q->flags & MT_QFLAG_WED) &&
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX)
- rx_page = &dev->mmio.wed.rx_buf_ring.rx_page;
-#endif
- return rx_page;
-}
-
static int
-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct)
{
- struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q);
int len = SKB_WITH_OVERHEAD(q->buf_size);
- int frames = 0, offset = q->buf_offset;
- dma_addr_t addr;
+ int frames = 0;
if (!q->ndesc)
return 0;
@@ -609,26 +594,25 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
+ enum dma_data_direction dir;
struct mt76_queue_buf qbuf;
- void *buf = NULL;
+ dma_addr_t addr;
+ int offset;
+ void *buf;
- buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
if (!buf)
break;
- addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
- skb_free_frag(buf);
- break;
- }
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
- qbuf.addr = addr + offset;
- qbuf.len = len - offset;
+ qbuf.addr = addr + q->buf_offset;
+ qbuf.len = len - q->buf_offset;
qbuf.skip_unmap = false;
if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
- dma_unmap_single(dev->dma_dev, addr, len,
- DMA_FROM_DEVICE);
- skb_free_frag(buf);
+ mt76_put_page_pool_buf(buf, allow_direct);
break;
}
frames++;
@@ -642,14 +626,17 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
return frames;
}
-static int
-mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
+int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mmio.wed;
int ret, type, ring;
- u8 flags = q->flags;
+ u8 flags;
+ if (!q || !q->ndesc)
+ return -EINVAL;
+
+ flags = q->flags;
if (!mtk_wed_device_active(wed))
q->flags &= ~MT_QFLAG_WED;
@@ -661,7 +648,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
switch (type) {
case MT76_WED_Q_TX:
- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, false);
+ ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
if (!ret)
q->wed_regs = wed->tx_ring[ring].reg_base;
break;
@@ -669,7 +656,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
mt76_dma_queue_reset(dev, q);
- mt76_dma_rx_fill(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
q->flags = flags;
ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
@@ -677,7 +664,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
q->wed_regs = wed->txfree_ring.reg_base;
break;
case MT76_WED_Q_RX:
- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, false);
+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
if (!ret)
q->wed_regs = wed->rx_ring[ring].reg_base;
break;
@@ -690,6 +677,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
return 0;
#endif
}
+EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
@@ -716,7 +704,11 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (!q->entry)
return -ENOMEM;
- ret = mt76_dma_wed_setup(dev, q);
+ ret = mt76_create_page_pool(dev, q);
+ if (ret)
+ return ret;
+
+ ret = mt76_dma_wed_setup(dev, q, false);
if (ret)
return ret;
@@ -729,7 +721,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct page *page;
void *buf;
bool more;
@@ -737,21 +728,21 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
return;
spin_lock_bh(&q->lock);
+
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
if (!buf)
break;
- skb_free_frag(buf);
+ mt76_put_page_pool_buf(buf, false);
} while (1);
- spin_unlock_bh(&q->lock);
- if (!q->rx_page.va)
- return;
+ if (q->rx_head) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+ }
- page = virt_to_page(q->rx_page.va);
- __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
- memset(&q->rx_page, 0, sizeof(q->rx_page));
+ spin_unlock_bh(&q->lock);
}
static void
@@ -767,14 +758,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q);
- mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill(dev, q);
-
- if (!q->rx_head)
- return;
- dev_kfree_skb(q->rx_head);
- q->rx_head = NULL;
+ /* reset WED rx queues */
+ mt76_dma_wed_setup(dev, q, true);
+ if (q->flags != MT_WED_Q_TXFREE) {
+ mt76_dma_sync_idx(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+ }
}
static void
@@ -791,7 +781,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
} else {
- skb_free_frag(data);
+ mt76_put_page_pool_buf(data, true);
}
if (more)
@@ -864,6 +854,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
goto free_frag;
skb_reserve(skb, q->buf_offset);
+ skb_mark_for_recycle(skb);
*(u32 *)skb->cb = info;
@@ -879,10 +870,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
continue;
free_frag:
- skb_free_frag(data);
+ mt76_put_page_pool_buf(data, true);
}
- mt76_dma_rx_fill(dev, q);
+ mt76_dma_rx_fill(dev, q, true);
return done;
}
@@ -922,10 +913,12 @@ mt76_dma_init(struct mt76_dev *dev,
snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
wiphy_name(dev->hw->wiphy));
dev->napi_dev.threaded = 1;
+ init_completion(&dev->mmio.wed_reset);
+ init_completion(&dev->mmio.wed_reset_complete);
mt76_for_each_q_rx(dev, i) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
- mt76_dma_rx_fill(dev, &dev->q_rx[i]);
+ mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
napi_enable(&dev->napi[i]);
}
@@ -975,8 +968,9 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
struct mt76_queue *q = &dev->q_rx[i];
netif_napi_del(&dev->napi[i]);
- if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags))
- mt76_dma_rx_cleanup(dev, q);
+ mt76_dma_rx_cleanup(dev, q);
+
+ page_pool_destroy(q->page_pool);
}
mt76_free_pending_txwi(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 53c6ce2528b2..4b9bc7f462b8 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -56,5 +56,6 @@ enum mt76_mcu_evt_type {
int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
+int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 9bc8758573fc..dce851d42e08 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -138,6 +138,7 @@ mt76_find_power_limits_node(struct mt76_dev *dev)
{
struct device_node *np = dev->dev->of_node;
const char *const region_names[] = {
+ [NL80211_DFS_UNSET] = "ww",
[NL80211_DFS_ETSI] = "etsi",
[NL80211_DFS_FCC] = "fcc",
[NL80211_DFS_JP] = "jp",
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index fc608b369b3c..b117e4467c87 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -4,6 +4,7 @@
*/
#include <linux/sched.h>
#include <linux/of.h>
+#include <net/page_pool.h>
#include "mt76.h"
#define CHAN2G(_idx, _freq) { \
@@ -192,42 +193,48 @@ static const struct cfg80211_sar_capa mt76_sar_capa = {
.freq_ranges = &mt76_sar_freq_ranges[0],
};
-static int mt76_led_init(struct mt76_dev *dev)
+static int mt76_led_init(struct mt76_phy *phy)
{
- struct device_node *np = dev->dev->of_node;
- struct ieee80211_hw *hw = dev->hw;
- int led_pin;
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_hw *hw = phy->hw;
- if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
return 0;
- snprintf(dev->led_name, sizeof(dev->led_name),
- "mt76-%s", wiphy_name(hw->wiphy));
+ snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
+ wiphy_name(hw->wiphy));
- dev->led_cdev.name = dev->led_name;
- dev->led_cdev.default_trigger =
+ phy->leds.cdev.name = phy->leds.name;
+ phy->leds.cdev.default_trigger =
ieee80211_create_tpt_led_trigger(hw,
IEEE80211_TPT_LEDTRIG_FL_RADIO,
mt76_tpt_blink,
ARRAY_SIZE(mt76_tpt_blink));
- np = of_get_child_by_name(np, "led");
- if (np) {
- if (!of_property_read_u32(np, "led-sources", &led_pin))
- dev->led_pin = led_pin;
- dev->led_al = of_property_read_bool(np, "led-active-low");
- of_node_put(np);
+ if (phy == &dev->phy) {
+ struct device_node *np = dev->dev->of_node;
+
+ np = of_get_child_by_name(np, "led");
+ if (np) {
+ int led_pin;
+
+ if (!of_property_read_u32(np, "led-sources", &led_pin))
+ phy->leds.pin = led_pin;
+ phy->leds.al = of_property_read_bool(np,
+ "led-active-low");
+ of_node_put(np);
+ }
}
- return led_classdev_register(dev->dev, &dev->led_cdev);
+ return led_classdev_register(dev->dev, &phy->leds.cdev);
}
-static void mt76_led_cleanup(struct mt76_dev *dev)
+static void mt76_led_cleanup(struct mt76_phy *phy)
{
- if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
return;
- led_classdev_unregister(&dev->led_cdev);
+ led_classdev_unregister(&phy->leds.cdev);
}
static void mt76_init_stream_cap(struct mt76_phy *phy,
@@ -517,6 +524,12 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
return ret;
}
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ ret = mt76_led_init(phy);
+ if (ret)
+ return ret;
+ }
+
wiphy_read_of_freq_limits(phy->hw->wiphy);
mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
@@ -536,12 +549,55 @@ void mt76_unregister_phy(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
+ if (IS_ENABLED(CONFIG_MT76_LEDS))
+ mt76_led_cleanup(phy);
mt76_tx_status_check(dev, true);
ieee80211_unregister_hw(phy->hw);
dev->phys[phy->band_idx] = NULL;
}
EXPORT_SYMBOL_GPL(mt76_unregister_phy);
+int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_PAGE_FRAG,
+ .nid = NUMA_NO_NODE,
+ .dev = dev->dma_dev,
+ };
+ int idx = q - dev->q_rx;
+
+ switch (idx) {
+ case MT_RXQ_MAIN:
+ case MT_RXQ_BAND1:
+ case MT_RXQ_BAND2:
+ pp_params.pool_size = 256;
+ break;
+ default:
+ pp_params.pool_size = 16;
+ break;
+ }
+
+ if (mt76_is_mmio(dev)) {
+ /* rely on page_pool for DMA mapping */
+ pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ pp_params.offset = 0;
+ }
+
+ q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(q->page_pool)) {
+ int err = PTR_ERR(q->page_pool);
+
+ q->page_pool = NULL;
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_create_page_pool);
+
struct mt76_dev *
mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
@@ -653,7 +709,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- ret = mt76_led_init(dev);
+ ret = mt76_led_init(phy);
if (ret)
return ret;
}
@@ -674,7 +730,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
struct ieee80211_hw *hw = dev->hw;
if (IS_ENABLED(CONFIG_MT76_LEDS))
- mt76_led_cleanup(dev);
+ mt76_led_cleanup(&dev->phy);
mt76_tx_status_check(dev, true);
ieee80211_unregister_hw(hw);
}
@@ -1644,7 +1700,7 @@ u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
- struct mt76_sta_stats *stats)
+ struct mt76_sta_stats *stats, bool eht)
{
int i, ei = wi->initial_stat_idx;
u64 *data = wi->data;
@@ -1660,17 +1716,37 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
+ if (eht) {
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
+ }
- for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++)
+ for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
data[ei++] += stats->tx_bw[i];
- for (i = 0; i < 12; i++)
+ for (i = 0; i < (eht ? 14 : 12); i++)
data[ei++] += stats->tx_mcs[i];
wi->worker_stat_count = ei - wi->initial_stat_idx;
}
EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
+void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
+{
+#ifdef CONFIG_PAGE_POOL_STATS
+ struct page_pool_stats stats = {};
+ int i;
+
+ mt76_for_each_q_rx(dev, i)
+ page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
+
+ page_pool_ethtool_stats_get(data, &stats);
+ *index += page_pool_ethtool_stats_get_count();
+#endif
+}
+EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
+
enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
{
struct ieee80211_hw *hw = phy->hw;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 32a77a0ae9da..ccca0162c8f8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -202,7 +202,7 @@ struct mt76_queue {
dma_addr_t desc_dma;
struct sk_buff *rx_head;
- struct page_frag_cache rx_page;
+ struct page_pool *page_pool;
};
struct mt76_mcu_ops {
@@ -264,12 +264,15 @@ enum mt76_phy_type {
MT_PHY_TYPE_HE_EXT_SU,
MT_PHY_TYPE_HE_TB,
MT_PHY_TYPE_HE_MU,
- __MT_PHY_TYPE_HE_MAX,
+ MT_PHY_TYPE_EHT_SU = 13,
+ MT_PHY_TYPE_EHT_TRIG,
+ MT_PHY_TYPE_EHT_MU,
+ __MT_PHY_TYPE_MAX,
};
struct mt76_sta_stats {
- u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
- u64 tx_bw[4]; /* 20, 40, 80, 160 */
+ u64 tx_mode[__MT_PHY_TYPE_MAX];
+ u64 tx_bw[5]; /* 20, 40, 80, 160, 320 */
u64 tx_nss[4]; /* 1, 2, 3, 4 */
u64 tx_mcs[16]; /* mcs idx */
u64 tx_bytes;
@@ -291,7 +294,7 @@ enum mt76_wcid_flags {
MT_WCID_FLAG_HDR_TRANS,
};
-#define MT76_N_WCIDS 544
+#define MT76_N_WCIDS 1088
/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_PHY GENMASK(3, 2)
@@ -413,6 +416,7 @@ enum {
MT76_STATE_SUSPEND,
MT76_STATE_ROC,
MT76_STATE_PM,
+ MT76_STATE_WED_RESET,
};
struct mt76_hw_cap {
@@ -591,6 +595,8 @@ struct mt76_mmio {
u32 irqmask;
struct mtk_wed_device wed;
+ struct completion wed_reset;
+ struct completion wed_reset_complete;
};
struct mt76_rx_status {
@@ -731,6 +737,13 @@ struct mt76_phy {
} rx_amsdu[__MT_RXQ_MAX];
struct mt76_freq_range_power *frp;
+
+ struct {
+ struct led_classdev cdev;
+ char name[32];
+ bool al;
+ u8 pin;
+ } leds;
};
struct mt76_dev {
@@ -812,11 +825,6 @@ struct mt76_dev {
u32 debugfs_reg;
- struct led_classdev led_cdev;
- char led_name[32];
- bool led_al;
- u8 led_pin;
-
u8 csa_complete;
u32 rxfilter;
@@ -886,7 +894,6 @@ extern struct ieee80211_rate mt76_rates[12];
#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
-#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
@@ -907,10 +914,11 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
- int timeout);
-
-#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout, int kick);
+#define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10)
+#define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10)
+#define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
@@ -1267,6 +1275,7 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
+void mt76_free_pending_rxwi(struct mt76_dev *dev);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
struct napi_struct *napi);
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
@@ -1309,8 +1318,9 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
+void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
- struct mt76_sta_stats *stats);
+ struct mt76_sta_stats *stats, bool eht);
int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
u16 val, u16 offset, void *buf, size_t len);
@@ -1407,6 +1417,12 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct mt76_power_limits *dest,
s8 target_power);
+static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
+{
+ return (q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
+}
+
struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
@@ -1414,6 +1430,25 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
struct mt76_txwi_cache *r, dma_addr_t phys);
+int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
+static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
+{
+ struct page *page = virt_to_head_page(buf);
+
+ page_pool_put_full_page(page->pp, page, allow_direct);
+}
+
+static inline void *
+mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
+{
+ struct page *page;
+
+ page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
+ if (!page)
+ return NULL;
+
+ return page_address(page) + *offset;
+}
static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 031d39a48a55..9a2e632d577a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -330,10 +330,10 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
-static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+static void mt7603_led_set_config(struct mt76_phy *mphy, u8 delay_on,
u8 delay_off)
{
- struct mt7603_dev *dev = container_of(mt76, struct mt7603_dev,
+ struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev,
mt76);
u32 val, addr;
@@ -341,15 +341,15 @@ static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
FIELD_PREP(MT_LED_STATUS_ON, delay_on);
- addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+ addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mphy->leds.pin));
mt76_wr(dev, addr, val);
- addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+ addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mphy->leds.pin));
mt76_wr(dev, addr, val);
- val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
- MT_LED_CTRL_KICK(mt76->led_pin);
- if (mt76->led_al)
- val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ val = MT_LED_CTRL_REPLAY(mphy->leds.pin) |
+ MT_LED_CTRL_KICK(mphy->leds.pin);
+ if (mphy->leds.al)
+ val |= MT_LED_CTRL_POLARITY(mphy->leds.pin);
addr = mt7603_reg_map(dev, MT_LED_CTRL);
mt76_wr(dev, addr, val);
}
@@ -358,27 +358,27 @@ static int mt7603_led_set_blink(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
- led_cdev);
+ struct mt76_phy *mphy = container_of(led_cdev, struct mt76_phy,
+ leds.cdev);
u8 delta_on, delta_off;
delta_off = max_t(u8, *delay_off / 10, 1);
delta_on = max_t(u8, *delay_on / 10, 1);
- mt7603_led_set_config(mt76, delta_on, delta_off);
+ mt7603_led_set_config(mphy, delta_on, delta_off);
return 0;
}
static void mt7603_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
- led_cdev);
+ struct mt76_phy *mphy = container_of(led_cdev, struct mt76_phy,
+ leds.cdev);
if (!brightness)
- mt7603_led_set_config(mt76, 0, 0xff);
+ mt7603_led_set_config(mphy, 0, 0xff);
else
- mt7603_led_set_config(mt76, 0xff, 0);
+ mt7603_led_set_config(mphy, 0xff, 0);
}
static u32 __mt7603_reg_addr(struct mt7603_dev *dev, u32 addr)
@@ -535,8 +535,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt7603_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
+ dev->mphy.leds.cdev.brightness_set = mt7603_led_set_brightness;
+ dev->mphy.leds.cdev.blink_set = mt7603_led_set_blink;
}
wiphy->reg_notifier = mt7603_regd_notifier;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 7884b952b720..301668c3cc92 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -221,7 +221,6 @@ int mt7603_mcu_init(struct mt7603_dev *dev)
.headroom = sizeof(struct mt7603_mcu_txd),
.mcu_skb_send_msg = mt7603_mcu_skb_send_msg,
.mcu_parse_response = mt7603_mcu_parse_response,
- .mcu_restart = mt7603_mcu_restart,
};
dev->mt76.mcu_ops = &mt7603_mcu_ops;
@@ -230,7 +229,7 @@ int mt7603_mcu_init(struct mt7603_dev *dev)
void mt7603_mcu_exit(struct mt7603_dev *dev)
{
- __mt76_mcu_restart(&dev->mt76);
+ mt7603_mcu_restart(&dev->mt76);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 07a1fea94f66..5fa6f097ec30 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -443,6 +443,85 @@ mt7615_cap_dbdc_disable(struct mt7615_dev *dev)
mt76_set_stream_caps(&dev->mphy, true);
}
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
+{
+ u32 base, offset;
+
+ if (is_mt7663(&dev->mt76)) {
+ base = addr & MT7663_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT7663_MCU_PCIE_REMAP_2_OFFSET;
+ } else {
+ base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+ }
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
+EXPORT_SYMBOL_GPL(mt7615_reg_map);
+
+static void
+mt7615_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7615_dev *dev;
+ struct mt76_phy *mphy;
+ u32 val, addr;
+ u8 index;
+
+ mphy = container_of(led_cdev, struct mt76_phy, leds.cdev);
+ dev = container_of(mphy->dev, struct mt7615_dev, mt76);
+
+ if (!mt76_connac_pm_ref(mphy, &dev->pm))
+ return;
+
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+
+ index = dev->dbdc_support ? mphy->band_idx : mphy->leds.pin;
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_0(index));
+ mt76_wr(dev, addr, val);
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_1(index));
+ mt76_wr(dev, addr, val);
+
+ val = MT_LED_CTRL_REPLAY(index) | MT_LED_CTRL_KICK(index);
+ if (dev->mphy.leds.al)
+ val |= MT_LED_CTRL_POLARITY(index);
+ if (mphy->band_idx)
+ val |= MT_LED_CTRL_BAND(index);
+
+ addr = mt7615_reg_map(dev, MT_LED_CTRL);
+ mt76_wr(dev, addr, val);
+
+ mt76_connac_pm_unref(mphy, &dev->pm);
+}
+
+int mt7615_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt7615_led_set_config(led_cdev, delta_on, delta_off);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_led_set_blink);
+
+void mt7615_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7615_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7615_led_set_config(led_cdev, 0xff, 0);
+}
+EXPORT_SYMBOL_GPL(mt7615_led_set_brightness);
+
int mt7615_register_ext_phy(struct mt7615_dev *dev)
{
struct mt7615_phy *phy = mt7615_ext_phy(dev);
@@ -497,6 +576,12 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
for (i = 0; i <= MT_TXQ_PSD ; i++)
mphy->q_tx[i] = dev->mphy.q_tx[i];
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ mphy->leds.cdev.brightness_set = mt7615_led_set_brightness;
+ mphy->leds.cdev.blink_set = mt7615_led_set_blink;
+ }
+
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 83f30305414d..eea398c79a98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -1692,7 +1692,6 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
.headroom = sizeof(struct mt7615_mcu_txd),
.mcu_skb_send_msg = mt7615_mcu_send_message,
.mcu_parse_response = mt7615_mcu_parse_response,
- .mcu_restart = mt7615_mcu_restart,
};
int ret;
@@ -1732,7 +1731,7 @@ EXPORT_SYMBOL_GPL(mt7615_mcu_init);
void mt7615_mcu_exit(struct mt7615_dev *dev)
{
- __mt76_mcu_restart(&dev->mt76);
+ mt7615_mcu_restart(&dev->mt76);
mt7615_mcu_set_fw_ctrl(dev);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index a784f9d9e935..83173efb56dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -63,22 +63,6 @@ const u32 mt7663e_reg_map[] = {
[MT_EFUSE_ADDR_BASE] = 0x78011000,
};
-u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
-{
- u32 base, offset;
-
- if (is_mt7663(&dev->mt76)) {
- base = addr & MT7663_MCU_PCIE_REMAP_2_BASE;
- offset = addr & MT7663_MCU_PCIE_REMAP_2_OFFSET;
- } else {
- base = addr & MT_MCU_PCIE_REMAP_2_BASE;
- offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
- }
- mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
-
- return MT_PCIE_REMAP_BASE_2 + offset;
-}
-
static void
mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 087d4886162e..43591b4c1d9a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -376,6 +376,12 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
int irq, const u32 *map);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+int mt7615_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+void mt7615_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
void mt7615_init_device(struct mt7615_dev *dev);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
index 87b4aa52ee0f..0680e002b981 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -66,64 +66,6 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
return 0;
}
-static void
-mt7615_led_set_config(struct led_classdev *led_cdev,
- u8 delay_on, u8 delay_off)
-{
- struct mt7615_dev *dev;
- struct mt76_dev *mt76;
- u32 val, addr;
-
- mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
- dev = container_of(mt76, struct mt7615_dev, mt76);
-
- if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm))
- return;
-
- val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
- FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
- FIELD_PREP(MT_LED_STATUS_ON, delay_on);
-
- addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
- mt76_wr(dev, addr, val);
- addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
- mt76_wr(dev, addr, val);
-
- val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
- MT_LED_CTRL_KICK(mt76->led_pin);
- if (mt76->led_al)
- val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
- addr = mt7615_reg_map(dev, MT_LED_CTRL);
- mt76_wr(dev, addr, val);
-
- mt76_connac_pm_unref(&dev->mphy, &dev->pm);
-}
-
-static int
-mt7615_led_set_blink(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- u8 delta_on, delta_off;
-
- delta_off = max_t(u8, *delay_off / 10, 1);
- delta_on = max_t(u8, *delay_on / 10, 1);
-
- mt7615_led_set_config(led_cdev, delta_on, delta_off);
-
- return 0;
-}
-
-static void
-mt7615_led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- if (!brightness)
- mt7615_led_set_config(led_cdev, 0, 0xff);
- else
- mt7615_led_set_config(led_cdev, 0xff, 0);
-}
-
int mt7615_register_device(struct mt7615_dev *dev)
{
int ret;
@@ -133,8 +75,8 @@ int mt7615_register_device(struct mt7615_dev *dev)
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
+ dev->mphy.leds.cdev.brightness_set = mt7615_led_set_brightness;
+ dev->mphy.leds.cdev.blink_set = mt7615_led_set_blink;
}
ret = mt7622_wmac_init(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index fa1b9b26b399..7cecb22c569e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -544,6 +544,7 @@ enum mt7615_reg_base {
#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n)))
+#define MT_LED_CTRL_BAND(_n) BIT(4 + (8 * (_n)))
#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n)))
#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index dc9a2f0b45a5..b0094205ba95 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -137,7 +137,6 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7663s_mcu_send_message,
.mcu_parse_response = mt7615_mcu_parse_response,
- .mcu_restart = mt7615_mcu_restart,
.mcu_rr = mt76_connac_mcu_reg_rr,
.mcu_wr = mt76_connac_mcu_reg_wr,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index 98bf2f6ae936..a8b1a0f8b2d7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -69,7 +69,6 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7663u_mcu_send_message,
.mcu_parse_response = mt7615_mcu_parse_response,
- .mcu_restart = mt7615_mcu_restart,
};
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 8ba883b03e50..b339c50bff20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -42,6 +42,7 @@ enum {
CMD_CBW_10MHZ,
CMD_CBW_5MHZ,
CMD_CBW_8080MHZ,
+ CMD_CBW_320MHZ,
CMD_HE_MCS_BW80 = 0,
CMD_HE_MCS_BW160,
@@ -239,6 +240,7 @@ static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
[NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
[NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
[NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_320] = CMD_CBW_320MHZ,
};
if (chandef->width >= ARRAY_SIZE(width_to_bw))
@@ -370,6 +372,9 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
+u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
+ struct ieee80211_vif *vif,
+ bool beacon, bool mcast);
bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
__le32 *txs_data);
bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index fd60123fb284..aed4ee95fb2e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -267,9 +267,9 @@ int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
}
EXPORT_SYMBOL_GPL(mt76_connac_init_tx_queues);
-static u16
-mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- bool beacon, bool mcast)
+u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
+ struct ieee80211_vif *vif,
+ bool beacon, bool mcast)
{
u8 mode = 0, band = mphy->chandef.chan->band;
int rateidx = 0, mcast_rate;
@@ -319,6 +319,7 @@ out:
return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
FIELD_PREP(MT_TX_RATE_MODE, mode);
}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_tx_rate_val);
static void
mt76_connac2_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb,
@@ -930,7 +931,7 @@ int mt76_connac2_reverse_frag0_hdr_trans(struct ieee80211_vif *vif,
ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
- break;
+ return -EINVAL;
}
skb_pull(skb, hdr_offset + sizeof(struct ethhdr) - 2);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 5a047e630860..efb9bfaa187f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -1329,6 +1329,40 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
+u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band)
+{
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_supported_band *sband;
+ u8 mode = 0;
+
+ if (band == NL80211_BAND_6GHZ)
+ mode |= PHY_MODE_AX_6G;
+
+ sband = phy->hw->wiphy->bands[band];
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
+
+ if (!eht_cap || !eht_cap->has_eht)
+ return mode;
+
+ switch (band) {
+ case NL80211_BAND_6GHZ:
+ mode |= PHY_MODE_BE_6G;
+ break;
+ case NL80211_BAND_5GHZ:
+ mode |= PHY_MODE_BE_5G;
+ break;
+ case NL80211_BAND_2GHZ:
+ mode |= PHY_MODE_BE_24G;
+ break;
+ default:
+ break;
+ }
+
+ return mode;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
+
const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
{
@@ -1341,6 +1375,18 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
}
EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
+const struct ieee80211_sta_eht_cap *
+mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
+{
+ enum nl80211_band band = phy->chandef.chan->band;
+ struct ieee80211_supported_band *sband;
+
+ sband = phy->hw->wiphy->bands[band];
+
+ return ieee80211_get_eht_iftype_cap(sband, vif->type);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_get_eht_phy_cap);
+
#define DEFAULT_HE_PE_DURATION 4
#define DEFAULT_HE_DURATION_RTS_THRES 1023
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index f1e942b9a887..a5e6ee4daf92 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -793,6 +793,7 @@ enum {
STA_REC_PHY = 0x15,
STA_REC_HE_6G = 0x17,
STA_REC_HE_V2 = 0x19,
+ STA_REC_EHT = 0x22,
STA_REC_HDRT = 0x28,
STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
@@ -882,12 +883,16 @@ enum {
#define PHY_MODE_AX_5G BIT(7)
#define PHY_MODE_AX_6G BIT(0) /* phymode_ext */
+#define PHY_MODE_BE_24G BIT(1)
+#define PHY_MODE_BE_5G BIT(2)
+#define PHY_MODE_BE_6G BIT(3)
#define MODE_CCK BIT(0)
#define MODE_OFDM BIT(1)
#define MODE_HT BIT(2)
#define MODE_VHT BIT(3)
#define MODE_HE BIT(4)
+#define MODE_EHT BIT(5)
#define STA_CAP_WMM BIT(0)
#define STA_CAP_SGI_20 BIT(4)
@@ -1171,6 +1176,7 @@ enum {
MCU_EXT_CMD_GET_MIB_INFO = 0x5a,
MCU_EXT_CMD_TXDPD_CAL = 0x60,
MCU_EXT_CMD_CAL_CACHE = 0x67,
+ MCU_EXT_CMD_RED_ENABLE = 0x68,
MCU_EXT_CMD_SET_RADAR_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
@@ -1198,7 +1204,8 @@ enum {
MCU_UNI_CMD_REPT_MUAR = 0x09,
MCU_UNI_CMD_WSYS_CONFIG = 0x0b,
MCU_UNI_CMD_REG_ACCESS = 0x0d,
- MCU_UNI_CMD_POWER_CREL = 0x0f,
+ MCU_UNI_CMD_CHIP_CONFIG = 0x0e,
+ MCU_UNI_CMD_POWER_CTRL = 0x0f,
MCU_UNI_CMD_RX_HDR_TRANS = 0x12,
MCU_UNI_CMD_SER = 0x13,
MCU_UNI_CMD_TWT = 0x14,
@@ -1238,6 +1245,7 @@ enum {
MCU_CE_CMD_TEST_CTRL = 0x01,
MCU_CE_CMD_START_HW_SCAN = 0x03,
MCU_CE_CMD_SET_PS_PROFILE = 0x05,
+ MCU_CE_CMD_SET_RX_FILTER = 0x0a,
MCU_CE_CMD_SET_CHAN_DOMAIN = 0x0f,
MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
MCU_CE_CMD_SET_BSS_ABORT = 0x17,
@@ -1730,7 +1738,7 @@ mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa)
}
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
-#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+#define to_wcid_hi(id) FIELD_GET(GENMASK(10, 8), (u16)id)
static inline void
mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
@@ -1866,8 +1874,12 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
+const struct ieee80211_sta_eht_cap *
+mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
enum nl80211_band band, struct ieee80211_sta *sta);
+u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band);
int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_connac_sta_key_conf *sta_key_conf,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 6c6c8ada7943..d543ef3de65b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -642,7 +642,12 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
if (tx_rate > 9)
return -EINVAL;
- *target_power = cur_power + dev->rate_power.vht[tx_rate];
+ *target_power = cur_power;
+ if (tx_rate > 7)
+ *target_power += dev->rate_power.vht[tx_rate - 8];
+ else
+ *target_power += dev->rate_power.ht[tx_rate];
+
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
break;
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index 45502fd4693f..6dc1f51f5658 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -148,6 +148,7 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
mt76_wr(dev, MT_USB_DMA_CFG, val);
ret = mt76x0u_upload_firmware(dev, hdr);
+ mt76x02_set_ethtool_fwver(dev, hdr);
release_firmware(fw);
mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 604ddcc21123..7451a63206a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -87,10 +87,9 @@ static const struct ieee80211_iface_combination mt76x02u_if_comb[] = {
};
static void
-mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
- u8 delay_off)
+mt76x02_led_set_config(struct mt76_phy *mphy, u8 delay_on, u8 delay_off)
{
- struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev,
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev,
mt76);
u32 val;
@@ -98,13 +97,13 @@ mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
FIELD_PREP(MT_LED_STATUS_ON, delay_on);
- mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
- mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
+ mt76_wr(dev, MT_LED_S0(mphy->leds.pin), val);
+ mt76_wr(dev, MT_LED_S1(mphy->leds.pin), val);
- val = MT_LED_CTRL_REPLAY(mdev->led_pin) |
- MT_LED_CTRL_KICK(mdev->led_pin);
- if (mdev->led_al)
- val |= MT_LED_CTRL_POLARITY(mdev->led_pin);
+ val = MT_LED_CTRL_REPLAY(mphy->leds.pin) |
+ MT_LED_CTRL_KICK(mphy->leds.pin);
+ if (mphy->leds.al)
+ val |= MT_LED_CTRL_POLARITY(mphy->leds.pin);
mt76_wr(dev, MT_LED_CTRL, val);
}
@@ -113,14 +112,14 @@ mt76x02_led_set_blink(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
- led_cdev);
+ struct mt76_phy *mphy = container_of(led_cdev, struct mt76_phy,
+ leds.cdev);
u8 delta_on, delta_off;
delta_off = max_t(u8, *delay_off / 10, 1);
delta_on = max_t(u8, *delay_on / 10, 1);
- mt76x02_led_set_config(mdev, delta_on, delta_off);
+ mt76x02_led_set_config(mphy, delta_on, delta_off);
return 0;
}
@@ -129,13 +128,13 @@ static void
mt76x02_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
- led_cdev);
+ struct mt76_phy *mphy = container_of(led_cdev, struct mt76_phy,
+ leds.cdev);
if (!brightness)
- mt76x02_led_set_config(mdev, 0, 0xff);
+ mt76x02_led_set_config(mphy, 0, 0xff);
else
- mt76x02_led_set_config(mdev, 0xff, 0);
+ mt76x02_led_set_config(mphy, 0xff, 0);
}
int mt76x02_init_device(struct mt76x02_dev *dev)
@@ -167,9 +166,9 @@ int mt76x02_init_device(struct mt76x02_dev *dev)
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set =
+ dev->mphy.leds.cdev.brightness_set =
mt76x02_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt76x02_led_set_blink;
+ dev->mphy.leds.cdev.blink_set = mt76x02_led_set_blink;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index fb46c2c1784f..5a46813a59ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -811,7 +811,7 @@ mt7915_hw_queue_read(struct seq_file *s, u32 size,
if (val & BIT(map[i].index))
continue;
- ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
+ ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
head = mt76_get_field(dev, MT_FL_Q2_CTRL,
@@ -996,7 +996,7 @@ mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
ret = mt7915_mcu_get_txpower_sku(phy, txpwr, sizeof(txpwr));
if (ret)
- return ret;
+ goto out;
/* Txpower propagation path: TMAC -> TXV -> BBP */
len += scnprintf(buf + len, sz - len,
@@ -1047,6 +1047,8 @@ mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
mt76_get_field(dev, reg, MT_WF_PHY_TPC_POWER));
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+out:
kfree(buf);
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index e3fa064918bf..abe17dac9996 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -559,9 +559,32 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
return 0;
}
+static void mt7915_dma_wed_reset(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+
+ if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state))
+ return;
+
+ complete(&mdev->mmio.wed_reset);
+
+ if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete,
+ 3 * HZ))
+ dev_err(dev->mt76.dev, "wed reset complete timeout\n");
+}
+
+static void
+mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q)
+{
+ mt76_queue_reset(dev, q);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt76_dma_wed_setup(&dev->mt76, q, true);
+}
+
int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
{
struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
int i;
/* clean up hw queues */
@@ -581,28 +604,40 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
if (force)
mt7915_wfsys_reset(dev);
+ if (mtk_wed_device_active(wed))
+ mtk_wed_device_dma_reset(wed);
+
mt7915_dma_disable(dev, force);
+ mt7915_dma_wed_reset(dev);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
- mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ mt7915_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]);
if (mphy_ext)
- mt76_queue_reset(dev, mphy_ext->q_tx[i]);
+ mt7915_dma_reset_tx_queue(dev, mphy_ext->q_tx[i]);
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
- mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE)
+ continue;
+
mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ }
mt76_tx_status_check(&dev->mt76, true);
- mt7915_dma_enable(dev);
-
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_reset(dev, i);
+ if (mtk_wed_device_active(wed) && is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
+ MT_WFDMA0_EXT0_RXWB_KEEP);
+
+ mt7915_dma_enable(dev);
+
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 59069fb86414..a79628933948 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -33,11 +33,14 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev)
u8 *eeprom = dev->mt76.eeprom.data;
u16 val = get_unaligned_le16(eeprom);
+#define CHECK_EEPROM_ERR(match) (match ? 0 : -EINVAL)
switch (val) {
case 0x7915:
+ return CHECK_EEPROM_ERR(is_mt7915(&dev->mt76));
case 0x7916:
+ return CHECK_EEPROM_ERR(is_mt7916(&dev->mt76));
case 0x7986:
- return 0;
+ return CHECK_EEPROM_ERR(is_mt7986(&dev->mt76));
default:
return -EINVAL;
}
@@ -110,18 +113,23 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
} else {
u8 free_block_num;
u32 block_num, i;
+ u32 eeprom_blk_size = MT7915_EEPROM_BLOCK_SIZE;
+
+ ret = mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
+ if (ret < 0)
+ return ret;
- mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
- /* efuse info not enough */
+ /* efuse info isn't enough */
if (free_block_num >= 29)
return -EINVAL;
/* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(eeprom_size,
- MT7915_EEPROM_BLOCK_SIZE);
- for (i = 0; i < block_num; i++)
- mt7915_mcu_get_eeprom(dev,
- i * MT7915_EEPROM_BLOCK_SIZE);
+ block_num = DIV_ROUND_UP(eeprom_size, eeprom_blk_size);
+ for (i = 0; i < block_num; i++) {
+ ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size);
+ if (ret < 0)
+ return ret;
+ }
}
return mt7915_check_eeprom(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index c810c31fbd6e..1ab768feccaa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -38,8 +38,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160) |
- BIT(NL80211_CHAN_WIDTH_80P80),
+ BIT(NL80211_CHAN_WIDTH_160),
}
};
@@ -83,9 +82,23 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
mutex_lock(&phy->dev->mt76.mutex);
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130);
+
+ if ((i - 1 == MT7915_CRIT_TEMP_IDX &&
+ val > phy->throttle_temp[MT7915_MAX_TEMP_IDX]) ||
+ (i - 1 == MT7915_MAX_TEMP_IDX &&
+ val < phy->throttle_temp[MT7915_CRIT_TEMP_IDX])) {
+ dev_err(phy->dev->mt76.dev,
+ "temp1_max shall be greater than temp1_crit.");
+ return -EINVAL;
+ }
+
phy->throttle_temp[i - 1] = val;
mutex_unlock(&phy->dev->mt76.mutex);
+ ret = mt7915_mcu_set_thermal_protect(phy);
+ if (ret)
+ return ret;
+
return count;
}
@@ -131,11 +144,11 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
u8 throttling = MT7915_THERMAL_THROTTLE_MAX - state;
int ret;
- if (state > MT7915_CDEV_THROTTLE_MAX)
+ if (state > MT7915_CDEV_THROTTLE_MAX) {
+ dev_err(phy->dev->mt76.dev,
+ "please specify a valid throttling state\n");
return -EINVAL;
-
- if (phy->throttle_temp[0] > phy->throttle_temp[1])
- return 0;
+ }
if (state == phy->cdev_state)
return 0;
@@ -164,7 +177,7 @@ static void mt7915_unregister_thermal(struct mt7915_phy *phy)
struct wiphy *wiphy = phy->mt76->hw->wiphy;
if (!phy->cdev)
- return;
+ return;
sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
thermal_cooling_device_unregister(phy->cdev);
@@ -198,41 +211,41 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
return PTR_ERR(hwmon);
/* initialize critical/maximum high temperature */
- phy->throttle_temp[0] = 110;
- phy->throttle_temp[1] = 120;
+ phy->throttle_temp[MT7915_CRIT_TEMP_IDX] = MT7915_CRIT_TEMP;
+ phy->throttle_temp[MT7915_MAX_TEMP_IDX] = MT7915_MAX_TEMP;
- return mt7915_mcu_set_thermal_throttling(phy,
- MT7915_THERMAL_THROTTLE_MAX);
+ return 0;
}
static void mt7915_led_set_config(struct led_classdev *led_cdev,
u8 delay_on, u8 delay_off)
{
struct mt7915_dev *dev;
- struct mt76_dev *mt76;
+ struct mt76_phy *mphy;
u32 val;
- mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
- dev = container_of(mt76, struct mt7915_dev, mt76);
+ mphy = container_of(led_cdev, struct mt76_phy, leds.cdev);
+ dev = container_of(mphy->dev, struct mt7915_dev, mt76);
- /* select TX blink mode, 2: only data frames */
- mt76_rmw_field(dev, MT_TMAC_TCR0(0), MT_TMAC_TCR0_TX_BLINK, 2);
+ /* set PWM mode */
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+ mt76_wr(dev, MT_LED_STATUS_0(mphy->band_idx), val);
+ mt76_wr(dev, MT_LED_STATUS_1(mphy->band_idx), val);
/* enable LED */
- mt76_wr(dev, MT_LED_EN(0), 1);
-
- /* set LED Tx blink on/off time */
- val = FIELD_PREP(MT_LED_TX_BLINK_ON_MASK, delay_on) |
- FIELD_PREP(MT_LED_TX_BLINK_OFF_MASK, delay_off);
- mt76_wr(dev, MT_LED_TX_BLINK(0), val);
+ mt76_wr(dev, MT_LED_EN(mphy->band_idx), 1);
/* control LED */
- val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK;
- if (dev->mt76.led_al)
+ val = MT_LED_CTRL_KICK;
+ if (dev->mphy.leds.al)
val |= MT_LED_CTRL_POLARITY;
+ if (mphy->band_idx)
+ val |= MT_LED_CTRL_BAND;
- mt76_wr(dev, MT_LED_CTRL(0), val);
- mt76_clear(dev, MT_LED_CTRL(0), MT_LED_CTRL_KICK);
+ mt76_wr(dev, MT_LED_CTRL(mphy->band_idx), val);
+ mt76_clear(dev, MT_LED_CTRL(mphy->band_idx), MT_LED_CTRL_KICK);
}
static int mt7915_led_set_blink(struct led_classdev *led_cdev,
@@ -319,9 +332,10 @@ mt7915_regd_notifier(struct wiphy *wiphy,
}
static void
-mt7915_init_wiphy(struct ieee80211_hw *hw)
+mt7915_init_wiphy(struct mt7915_phy *phy)
{
- struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
struct mt76_dev *mdev = &phy->dev->mt76;
struct wiphy *wiphy = hw->wiphy;
struct mt7915_dev *dev = phy->dev;
@@ -392,11 +406,6 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
phy->mt76->sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
-
- if (!dev->dbdc_support)
- phy->mt76->sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
} else {
phy->mt76->sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
@@ -415,6 +424,12 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy->available_antennas_rx = phy->mt76->antenna_mask;
wiphy->available_antennas_tx = phy->mt76->antenna_mask;
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ mphy->leds.cdev.brightness_set = mt7915_led_set_brightness;
+ mphy->leds.cdev.blink_set = mt7915_led_set_blink;
+ }
}
static void
@@ -473,6 +488,72 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
}
+static void
+mt7915_init_led_mux(struct mt7915_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MT76_LEDS))
+ return;
+
+ if (dev->dbdc_support) {
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX2,
+ GENMASK(11, 8), 4);
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX3,
+ GENMASK(11, 8), 4);
+ break;
+ case 0x7986:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(7, 4), 1);
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(11, 8), 1);
+ break;
+ case 0x7916:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(27, 24), 3);
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(31, 28), 3);
+ break;
+ default:
+ break;
+ }
+ } else if (dev->mphy.leds.pin) {
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX3,
+ GENMASK(11, 8), 4);
+ break;
+ case 0x7986:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(11, 8), 1);
+ break;
+ case 0x7916:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(31, 28), 3);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX2,
+ GENMASK(11, 8), 4);
+ break;
+ case 0x7986:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(7, 4), 1);
+ break;
+ case 0x7916:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(27, 24), 3);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
@@ -497,10 +578,7 @@ void mt7915_mac_init(struct mt7915_dev *dev)
for (i = 0; i < 2; i++)
mt7915_mac_init_band(dev, i);
- if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- i = dev->mt76.led_pin ? MT_LED_GPIO_MUX3 : MT_LED_GPIO_MUX2;
- mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
- }
+ mt7915_init_led_mux(dev);
}
int mt7915_txbf_init(struct mt7915_dev *dev)
@@ -569,7 +647,7 @@ mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy)
mt76_eeprom_override(mphy);
/* init wiphy according to mphy and phy */
- mt7915_init_wiphy(mphy->hw);
+ mt7915_init_wiphy(phy);
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
@@ -763,13 +841,9 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
int sts = hweight8(phy->mt76->chainmask);
u8 c, sts_160 = sts;
- /* Can do 1/2 of STS in 160Mhz mode for mt7915 */
- if (is_mt7915(&dev->mt76)) {
- if (!dev->dbdc_support)
- sts_160 /= 2;
- else
- sts_160 = 0;
- }
+ /* mt7915 doesn't support bw160 */
+ if (is_mt7915(&dev->mt76))
+ sts_160 = 0;
#ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT)
@@ -823,9 +897,6 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
- /* num_snd_dim
- * for mt7915, max supported sts is 2 for bw > 80MHz and 0 if dbdc
- */
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1);
if (sts_160)
@@ -873,15 +944,10 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
u16 mcs_map = 0;
u16 mcs_map_160 = 0;
- u8 nss_160;
+ u8 nss_160 = nss;
- if (!is_mt7915(&dev->mt76))
- nss_160 = nss;
- else if (!dev->dbdc_support)
- /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
- nss_160 = nss / 2;
- else
- /* Can't do 160MHz with mt7915 dbdc */
+ /* Can't do 160MHz with mt7915 */
+ if (is_mt7915(&dev->mt76))
nss_160 = 0;
for (i = 0; i < 8; i++) {
@@ -931,8 +997,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
else if (nss_160)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
else
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
@@ -1004,12 +1069,11 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
break;
}
+ memset(he_mcs, 0, sizeof(*he_mcs));
he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map_160);
he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map_160);
- he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160);
- he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160);
mt7915_set_stream_he_txbf_caps(phy, he_cap, i);
@@ -1101,10 +1165,8 @@ static void mt7915_stop_hardware(struct mt7915_dev *dev)
mt7986_wmac_disable(dev);
}
-
int mt7915_register_device(struct mt7915_dev *dev)
{
- struct ieee80211_hw *hw = mt76_hw(dev);
struct mt7915_phy *phy2;
int ret;
@@ -1133,18 +1195,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
if (ret)
goto free_phy2;
- mt7915_init_wiphy(hw);
+ mt7915_init_wiphy(&dev->phy);
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7915_testmode_ops;
#endif
- /* init led callbacks */
- if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt7915_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt7915_led_set_blink;
- }
-
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index f0d5a3603902..97ca55d283fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -256,8 +256,7 @@ mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
if (!msta || !msta->vif)
return;
- if (!(q->flags & MT_QFLAG_WED) ||
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX)
+ if (!mt76_queue_is_wed_rx(q))
return;
if (!(info & MT_DMA_INFO_PPE_VLD))
@@ -1061,9 +1060,6 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
u16 wcidx;
u8 pid;
- if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
- return;
-
wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
@@ -1582,6 +1578,12 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
return;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ mtk_wed_device_stop(&dev->mt76.mmio.wed);
+ if (!is_mt7986(&dev->mt76))
+ mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
+ }
+
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
ieee80211_stop_queues(ext_phy->hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 0511d6a505b0..3bbccbdfc5eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -57,6 +57,17 @@ int mt7915_run(struct ieee80211_hw *hw)
mt7915_mac_enable_nf(dev, phy->mt76->band_idx);
}
+ ret = mt7915_mcu_set_thermal_throttling(phy,
+ MT7915_THERMAL_THROTTLE_MAX);
+
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_thermal_protect(phy);
+
+ if (ret)
+ goto out;
+
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
phy->mt76->band_idx);
if (ret)
@@ -1280,19 +1291,22 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 sset, u8 *data)
{
- if (sset == ETH_SS_STATS)
- memcpy(data, *mt7915_gstrings_stats,
- sizeof(mt7915_gstrings_stats));
+ if (sset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
+ data += sizeof(mt7915_gstrings_stats);
+ page_pool_ethtool_stats_get_strings(data);
}
static
int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int sset)
{
- if (sset == ETH_SS_STATS)
- return MT7915_SSTATS_LEN;
+ if (sset != ETH_SS_STATS)
+ return 0;
- return 0;
+ return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
}
static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
@@ -1303,7 +1317,7 @@ static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->wcid.stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats, false);
}
static
@@ -1320,7 +1334,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
};
struct mib_stats *mib = &phy->mib;
/* See mt7915_ampdu_stat_read_phy, etc */
- int i, ei = 0;
+ int i, ei = 0, stats_size;
mutex_lock(&dev->mt76.mutex);
@@ -1401,9 +1415,12 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
return;
ei += wi.worker_stat_count;
- if (ei != MT7915_SSTATS_LEN)
- dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
- ei, (int)MT7915_SSTATS_LEN);
+
+ mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
+
+ stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
+ if (ei != stats_size)
+ dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index b2652de082ba..5545a8bdf1d0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -232,8 +232,11 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
c = (struct mt7915_mcu_csa_notify *)skb->data;
+ if (c->band_idx > MT_BAND1)
+ return;
+
if ((c->band_idx && !dev->phy.mt76->band_idx) &&
- dev->mt76.phys[MT_BAND1])
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -252,8 +255,11 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
return;
+ if (t->ctrl.band_idx > MT_BAND1)
+ return;
+
if ((t->ctrl.band_idx && !dev->phy.mt76->band_idx) &&
- dev->mt76.phys[MT_BAND1])
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
phy = (struct mt7915_phy *)mphy->priv;
@@ -268,8 +274,11 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
r = (struct mt7915_mcu_rdd_report *)skb->data;
+ if (r->band_idx > MT_BAND1)
+ return;
+
if ((r->band_idx && !dev->phy.mt76->band_idx) &&
- dev->mt76.phys[MT_BAND1])
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
if (r->band_idx == MT_RX_SEL2)
@@ -326,7 +335,11 @@ mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
b = (struct mt7915_mcu_bcc_notify *)skb->data;
- if ((b->band_idx && !dev->phy.mt76->band_idx) && dev->mt76.phys[MT_BAND1])
+ if (b->band_idx > MT_BAND1)
+ return;
+
+ if ((b->band_idx && !dev->phy.mt76->band_idx) &&
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -2104,7 +2117,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev)
/* make sure fw is download state */
if (mt7915_firmware_state(dev, false)) {
/* restart firmware once */
- __mt76_mcu_restart(&dev->mt76);
+ mt76_connac_mcu_restart(&dev->mt76);
ret = mt7915_firmware_state(dev, false);
if (ret) {
dev_err(dev->mt76.dev,
@@ -2278,6 +2291,53 @@ mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev)
sizeof(req), true);
}
+static int mt7915_red_set_watermark(struct mt7915_dev *dev)
+{
+#define RED_GLOBAL_TOKEN_WATERMARK 2
+ struct {
+ __le32 args[3];
+ u8 cmd;
+ u8 version;
+ u8 __rsv1[4];
+ __le16 len;
+ __le16 high_mark;
+ __le16 low_mark;
+ u8 __rsv2[12];
+ } __packed req = {
+ .args[0] = cpu_to_le32(MCU_WA_PARAM_RED_SETTING),
+ .cmd = RED_GLOBAL_TOKEN_WATERMARK,
+ .len = cpu_to_le16(sizeof(req) - sizeof(req.args)),
+ .high_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256),
+ .low_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256 - 1536),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_PARAM_CMD(SET), &req,
+ sizeof(req), false);
+}
+
+static int mt7915_mcu_set_red(struct mt7915_dev *dev, bool enabled)
+{
+#define RED_DISABLE 0
+#define RED_BY_WA_ENABLE 2
+ int ret;
+ u32 red_type = enabled ? RED_BY_WA_ENABLE : RED_DISABLE;
+ __le32 req = cpu_to_le32(red_type);
+
+ if (enabled) {
+ ret = mt7915_red_set_watermark(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RED_ENABLE), &req,
+ sizeof(req), false);
+ if (ret < 0)
+ return ret;
+
+ return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
+ MCU_WA_PARAM_RED, enabled, 0);
+}
+
int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
{
int ret;
@@ -2326,8 +2386,7 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
if (ret)
return ret;
- return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
- MCU_WA_PARAM_RED, 0, 0);
+ return mt7915_mcu_set_red(dev, mtk_wed_device_active(&dev->mt76.mmio.wed));
}
int mt7915_mcu_init(struct mt7915_dev *dev)
@@ -2336,7 +2395,6 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
.headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
- .mcu_restart = mt76_connac_mcu_restart,
};
dev->mt76.mcu_ops = &mt7915_mcu_ops;
@@ -2346,16 +2404,17 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
void mt7915_mcu_exit(struct mt7915_dev *dev)
{
- __mt76_mcu_restart(&dev->mt76);
+ mt76_connac_mcu_restart(&dev->mt76);
if (mt7915_firmware_state(dev, false)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
- return;
+ goto out;
}
mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
if (dev->hif2)
mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
MT_TOP_LPCR_HOST_FW_OWN);
+out:
skb_queue_purge(&dev->mt76.mcu.res_q);
}
@@ -2792,8 +2851,9 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
int ret;
u8 *buf;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS), &req,
- sizeof(req), true, &skb);
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_EXT_QUERY(EFUSE_ACCESS),
+ &req, sizeof(req), true, &skb);
if (ret)
return ret;
@@ -2818,8 +2878,9 @@ int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num)
struct sk_buff *skb;
int ret;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_FREE_BLOCK), &req,
- sizeof(req), true, &skb);
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_EXT_QUERY(EFUSE_FREE_BLOCK),
+ &req, sizeof(req), true, &skb);
if (ret)
return ret;
@@ -2974,38 +3035,42 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
{
- /* strict order */
- static const u32 offs[] = {
- MIB_NON_WIFI_TIME,
- MIB_TX_TIME,
- MIB_RX_TIME,
- MIB_OBSS_AIRTIME,
- MIB_TXOP_INIT_COUNT,
- /* v2 */
- MIB_NON_WIFI_TIME_V2,
- MIB_TX_TIME_V2,
- MIB_RX_TIME_V2,
- MIB_OBSS_AIRTIME_V2
- };
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
struct mt7915_dev *dev = phy->dev;
struct mt7915_mcu_mib *res, req[5];
struct sk_buff *skb;
- int i, ret, start = 0, ofs = 20;
+ static const u32 *offs;
+ int i, ret, len, offs_cc;
u64 cc_tx;
- if (!is_mt7915(&dev->mt76)) {
- start = 5;
- ofs = 0;
+ /* strict order */
+ if (is_mt7915(&dev->mt76)) {
+ static const u32 chip_offs[] = {
+ MIB_NON_WIFI_TIME,
+ MIB_TX_TIME,
+ MIB_RX_TIME,
+ MIB_OBSS_AIRTIME,
+ MIB_TXOP_INIT_COUNT,
+ };
+ len = ARRAY_SIZE(chip_offs);
+ offs = chip_offs;
+ offs_cc = 20;
+ } else {
+ static const u32 chip_offs[] = {
+ MIB_NON_WIFI_TIME_V2,
+ MIB_TX_TIME_V2,
+ MIB_RX_TIME_V2,
+ MIB_OBSS_AIRTIME_V2
+ };
+ len = ARRAY_SIZE(chip_offs);
+ offs = chip_offs;
+ offs_cc = 0;
}
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < len; i++) {
req[i].band = cpu_to_le32(phy->mt76->band_idx);
- req[i].offs = cpu_to_le32(offs[i + start]);
-
- if (!is_mt7915(&dev->mt76) && i == 3)
- break;
+ req[i].offs = cpu_to_le32(offs[i]);
}
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
@@ -3013,7 +3078,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
if (ret)
return ret;
- res = (struct mt7915_mcu_mib *)(skb->data + ofs);
+ res = (struct mt7915_mcu_mib *)(skb->data + offs_cc);
#define __res_u64(s) le64_to_cpu(res[s].data)
/* subtract Tx backoff time from Tx duration */
@@ -3060,6 +3125,29 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
{
struct mt7915_dev *dev = phy->dev;
+ struct mt7915_mcu_thermal_ctrl req = {
+ .band_idx = phy->mt76->band_idx,
+ .ctrl_id = THERMAL_PROTECT_DUTY_CONFIG,
+ };
+ int level, ret;
+
+ /* set duty cycle and level */
+ for (level = 0; level < 4; level++) {
+ req.duty.duty_level = level;
+ req.duty.duty_cycle = state;
+ state /= 2;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ &req, sizeof(req), false);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
struct {
struct mt7915_mcu_thermal_ctrl ctrl;
@@ -3070,29 +3158,18 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
} __packed req = {
.ctrl = {
.band_idx = phy->mt76->band_idx,
+ .type.protect_type = 1,
+ .type.trigger_type = 1,
},
};
- int level;
-
- if (!state) {
- req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
- goto out;
- }
-
- /* set duty cycle and level */
- for (level = 0; level < 4; level++) {
- int ret;
+ int ret;
- req.ctrl.ctrl_id = THERMAL_PROTECT_DUTY_CONFIG;
- req.ctrl.duty.duty_level = level;
- req.ctrl.duty.duty_cycle = state;
- state /= 2;
+ req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ &req, sizeof(req.ctrl), false);
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
- &req, sizeof(req.ctrl), false);
- if (ret)
- return ret;
- }
+ if (ret)
+ return ret;
/* set high-temperature trigger threshold */
req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE;
@@ -3101,10 +3178,6 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
req.trigger_temp = cpu_to_le32(phy->throttle_temp[1]);
req.sustain_time = cpu_to_le16(10);
-out:
- req.ctrl.type.protect_type = 1;
- req.ctrl.type.trigger_type = 1;
-
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
&req, sizeof(req), false);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 29b5434bfdb8..b9ea297f382c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -278,6 +278,7 @@ enum {
MCU_WA_PARAM_PDMA_RX = 0x04,
MCU_WA_PARAM_CPU_UTIL = 0x0b,
MCU_WA_PARAM_RED = 0x0e,
+ MCU_WA_PARAM_RED_SETTING = 0x40,
};
enum mcu_mmps_mode {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 8388e2a65853..225a19604d3e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -4,10 +4,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/pci.h>
#include "mt7915.h"
#include "mac.h"
+#include "mcu.h"
#include "../trace.h"
#include "../dma.h"
@@ -495,7 +497,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
if (dev_is_pci(dev->mt76.dev) &&
((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
- (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
+ addr >= MT_CBTOP2_PHY_START))
return mt7915_reg_map_l1(dev, addr);
/* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
@@ -594,7 +596,6 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
- struct page *page;
int i;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
@@ -605,58 +606,50 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
if (!t || !t->ptr)
continue;
- dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
- wed->wlan.rx_size, DMA_FROM_DEVICE);
- skb_free_frag(t->ptr);
+ mt76_put_page_pool_buf(t->ptr, false);
t->ptr = NULL;
mt76_put_rxwi(&dev->mt76, t);
}
- if (!wed->rx_buf_ring.rx_page.va)
- return;
-
- page = virt_to_page(wed->rx_buf_ring.rx_page.va);
- __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
- memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
+ mt76_free_pending_rxwi(&dev->mt76);
}
static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt76_txwi_cache *t = NULL;
struct mt7915_dev *dev;
- u32 length;
- int i;
+ struct mt76_queue *q;
+ int i, len;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
- length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
- sizeof(struct skb_shared_info));
+ q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+ len = SKB_WITH_OVERHEAD(q->buf_size);
for (i = 0; i < size; i++) {
- struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
- dma_addr_t phy_addr;
+ enum dma_data_direction dir;
+ dma_addr_t addr;
+ u32 offset;
int token;
- void *ptr;
+ void *buf;
- ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length,
- GFP_KERNEL);
- if (!ptr)
+ t = mt76_get_rxwi(&dev->mt76);
+ if (!t)
goto unmap;
- phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
- wed->wlan.rx_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
- skb_free_frag(ptr);
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
+ if (!buf)
goto unmap;
- }
- desc->buf0 = cpu_to_le32(phy_addr);
- token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
+
+ desc->buf0 = cpu_to_le32(addr);
+ token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
if (token < 0) {
- dma_unmap_single(dev->mt76.dma_dev, phy_addr,
- wed->wlan.rx_size, DMA_TO_DEVICE);
- skb_free_frag(ptr);
+ mt76_put_page_pool_buf(buf, false);
goto unmap;
}
@@ -668,6 +661,8 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
return 0;
unmap:
+ if (t)
+ mt76_put_rxwi(&dev->mt76, t);
mt7915_mmio_wed_release_rx_buf(wed);
return -ENOMEM;
}
@@ -696,6 +691,42 @@ static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed,
rcu_read_unlock();
}
+
+static int mt7915_mmio_wed_reset(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *mdev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt76_phy *mphy = &dev->mphy;
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state))
+ return -EBUSY;
+
+ ret = mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L1,
+ mphy->band_idx);
+ if (ret)
+ goto out;
+
+ rtnl_unlock();
+ if (!wait_for_completion_timeout(&mdev->mmio.wed_reset, 20 * HZ)) {
+ dev_err(mdev->dev, "wed reset timeout\n");
+ ret = -ETIMEDOUT;
+ }
+ rtnl_lock();
+out:
+ clear_bit(MT76_STATE_WED_RESET, &mphy->state);
+
+ return ret;
+}
+
+static void mt7915_mmio_wed_reset_complete(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ complete(&dev->mmio.wed_reset_complete);
+}
#endif
int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
@@ -751,7 +782,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG;
wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE;
}
- wed->wlan.nbuf = 4096;
+ wed->wlan.nbuf = MT7915_HW_TOKEN_SIZE;
wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30;
wed->wlan.tx_tbit[1] = is_mt7915(&dev->mt76) ? 5 : 31;
wed->wlan.txfree_tbit = is_mt7986(&dev->mt76) ? 2 : 1;
@@ -778,6 +809,8 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.init_rx_buf = mt7915_mmio_wed_init_rx_buf;
wed->wlan.release_rx_buf = mt7915_mmio_wed_release_rx_buf;
wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
+ wed->wlan.reset = mt7915_mmio_wed_reset;
+ wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
dev->mt76.rx_token_size = wed->wlan.rx_npkt;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 6351feba6bdf..3cbfb9b6a305 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -53,6 +53,7 @@
#define MT7916_EEPROM_SIZE 4096
#define MT7915_EEPROM_BLOCK_SIZE 16
+#define MT7915_HW_TOKEN_SIZE 4096
#define MT7915_TOKEN_SIZE 8192
#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
@@ -70,6 +71,11 @@
#define MT7915_WED_RX_TOKEN_SIZE 12288
+#define MT7915_CRIT_TEMP_IDX 0
+#define MT7915_MAX_TEMP_IDX 1
+#define MT7915_CRIT_TEMP 110
+#define MT7915_MAX_TEMP 120
+
struct mt7915_vif;
struct mt7915_sta;
struct mt7915_dfs_pulse;
@@ -543,6 +549,7 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch);
int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
+int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index aca1b2f1e9e3..c8e478a55081 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -803,7 +803,6 @@ enum offs_rev {
#define MT_CBTOP1_PHY_START 0x70000000
#define MT_CBTOP1_PHY_END __REG(CBTOP1_PHY_END)
#define MT_CBTOP2_PHY_START 0xf0000000
-#define MT_CBTOP2_PHY_END 0xffffffff
#define MT_INFRA_MCU_START 0x7c000000
#define MT_INFRA_MCU_END __REG(INFRA_MCU_ADDR_END)
#define MT_CONN_INFRA_OFFSET(p) ((p) - MT_INFRA_BASE)
@@ -1055,6 +1054,7 @@ enum offs_rev {
#define MT_LED_CTRL(_n) MT_LED_PHYS(0x00 + ((_n) * 4))
#define MT_LED_CTRL_KICK BIT(7)
+#define MT_LED_CTRL_BAND BIT(4)
#define MT_LED_CTRL_BLINK_MODE BIT(2)
#define MT_LED_CTRL_POLARITY BIT(1)
@@ -1062,11 +1062,18 @@ enum offs_rev {
#define MT_LED_TX_BLINK_ON_MASK GENMASK(7, 0)
#define MT_LED_TX_BLINK_OFF_MASK GENMASK(15, 8)
+#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x20 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x24 + ((_n) * 8))
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
+
#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
+#define MT_LED_GPIO_MUX0 0x70005050 /* GPIO 1 and GPIO 2 */
+#define MT_LED_GPIO_MUX1 0x70005054 /* GPIO 14 and 15 */
#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
-#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
-#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
+#define MT_LED_GPIO_MUX3 0x7000505c /* GPIO 26 */
/* MT TOP */
#define MT_TOP_BASE 0x18060000
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index c06c56a0270d..2ac0a0f2859c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -278,6 +278,7 @@ static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev)
return -EINVAL;
rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
if (!rmem)
return -EINVAL;
@@ -882,6 +883,8 @@ static int mt7986_wmac_wm_enable(struct mt7915_dev *dev, bool enable)
{
u32 cur;
+ mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, 0);
+
mt76_rmw_field(dev, MT7986_TOP_WM_RESET,
MT7986_TOP_WM_RESET_MASK, enable);
if (!enable)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
index 47e034a9b003..48dd0decac5d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
@@ -33,14 +33,17 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
sar_root->package.elements[0].type != ACPI_TYPE_INTEGER) {
dev_err(mdev->dev, "sar cnt = %d\n",
sar_root->package.count);
+ ret = -EINVAL;
goto free;
}
if (!*tbl) {
*tbl = devm_kzalloc(mdev->dev, sar_root->package.count,
GFP_KERNEL);
- if (!*tbl)
+ if (!*tbl) {
+ ret = -ENOMEM;
goto free;
+ }
}
if (len)
*len = sar_root->package.count;
@@ -52,9 +55,9 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
break;
*(*tbl + i) = (u8)sar_unit->integer.value;
}
-free:
ret = (i == sar_root->package.count) ? 0 : -EINVAL;
+free:
kfree(sar_root);
return ret;
@@ -135,6 +138,22 @@ mt7921_asar_acpi_read_mtgs(struct mt7921_dev *dev, u8 **table, u8 version)
return ret;
}
+/* MTFG : Flag Table */
+static int
+mt7921_asar_acpi_read_mtfg(struct mt7921_dev *dev, u8 **table)
+{
+ int len, ret;
+
+ ret = mt7921_acpi_read(dev, MT7921_ACPI_MTFG, table, &len);
+ if (ret)
+ return ret;
+
+ if (len < MT7921_ASAR_MIN_FG)
+ ret = -EINVAL;
+
+ return ret;
+}
+
int mt7921_init_acpi_sar(struct mt7921_dev *dev)
{
struct mt7921_acpi_sar *asar;
@@ -162,6 +181,12 @@ int mt7921_init_acpi_sar(struct mt7921_dev *dev)
asar->geo = NULL;
}
+ /* MTFG is optional */
+ ret = mt7921_asar_acpi_read_mtfg(dev, (u8 **)&asar->fg);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->fg);
+ asar->fg = NULL;
+ }
dev->phy.acpisar = asar;
return 0;
@@ -280,3 +305,36 @@ int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
return 0;
}
+
+u8 mt7921_acpi_get_flags(struct mt7921_phy *phy)
+{
+ struct mt7921_asar_fg *fg;
+ struct {
+ u8 acpi_idx;
+ u8 chip_idx;
+ } map[] = {
+ {1, 1},
+ {4, 2},
+ };
+ u8 flags = BIT(0);
+ int i, j;
+
+ if (!phy->acpisar)
+ return 0;
+
+ fg = phy->acpisar->fg;
+ if (!fg)
+ return flags;
+
+ /* pickup necessary settings per device and
+ * translate the index of bitmap for chip command.
+ */
+ for (i = 0; i < fg->nr_flag; i++)
+ for (j = 0; j < ARRAY_SIZE(map); j++)
+ if (fg->flag[i] == map[j].acpi_idx) {
+ flags |= BIT(map[j].chip_idx);
+ break;
+ }
+
+ return flags;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
index 23f86bfae0c0..35268b0890ad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
@@ -8,10 +8,12 @@
#define MT7921_ASAR_MAX_DYN 8
#define MT7921_ASAR_MIN_GEO 3
#define MT7921_ASAR_MAX_GEO 8
+#define MT7921_ASAR_MIN_FG 8
#define MT7921_ACPI_MTCL "MTCL"
#define MT7921_ACPI_MTDS "MTDS"
#define MT7921_ACPI_MTGS "MTGS"
+#define MT7921_ACPI_MTFG "MTFG"
struct mt7921_asar_dyn_limit {
u8 idx;
@@ -77,6 +79,15 @@ struct mt7921_asar_cl {
u8 cl6g[6];
} __packed;
+struct mt7921_asar_fg {
+ u8 names[4];
+ u8 version;
+ u8 rsvd;
+ u8 nr_flag;
+ u8 rsvd1;
+ u8 flag[0];
+} __packed;
+
struct mt7921_acpi_sar {
u8 ver;
union {
@@ -88,6 +99,7 @@ struct mt7921_acpi_sar {
struct mt7921_asar_geo_v2 *geo_v2;
};
struct mt7921_asar_cl *countrylist;
+ struct mt7921_asar_fg *fg;
};
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 542dfd425129..80c71acfe159 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -120,6 +120,7 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
@@ -142,6 +143,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
static void
mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
{
+ u32 mask, set;
+
mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
mt76_set(dev, MT_TMAC_CTCR0(band),
@@ -158,6 +161,12 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
/* disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
+
+ /* filter out non-resp frames and get instantaneous signal reporting */
+ mask = MT_WTBLOFF_TOP_RSCR_RCPI_MODE | MT_WTBLOFF_TOP_RSCR_RCPI_PARAM;
+ set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) |
+ FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3);
+ mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
}
u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
@@ -175,7 +184,7 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
dev_err(dev, "Invalid firmware\n");
- return -EINVAL;
+ goto out;
}
data = fw->data;
@@ -206,6 +215,7 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
data += le16_to_cpu(rel_info->len) + rel_info->pad_len;
}
+out:
release_firmware(fw);
return features ? features->data : 0;
@@ -228,8 +238,6 @@ int mt7921_mac_init(struct mt7921_dev *dev)
for (i = 0; i < 2; i++)
mt7921_mac_init_band(dev, i);
- dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0));
-
return mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
}
EXPORT_SYMBOL_GPL(mt7921_mac_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 82db3762be33..557c20190c2b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -59,6 +59,7 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
struct rate_info *rate;
+ s8 rssi[4];
int i;
spin_lock_bh(&dev->sta_poll_lock);
@@ -160,6 +161,20 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
else
rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
}
+
+ /* get signal strength of resp frames (CTS/BA/ACK) */
+ addr = mt7921_mac_wtbl_lmac_addr(idx, 30);
+ val = mt76_rr(dev, addr);
+
+ rssi[0] = to_rssi(GENMASK(7, 0), val);
+ rssi[1] = to_rssi(GENMASK(15, 8), val);
+ rssi[2] = to_rssi(GENMASK(23, 16), val);
+ rssi[3] = to_rssi(GENMASK(31, 14), val);
+
+ msta->ack_signal =
+ mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
+
+ ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
}
}
EXPORT_SYMBOL_GPL(mt7921_mac_sta_poll);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 76ac5069638f..75eaf86c6a78 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -422,15 +422,15 @@ void mt7921_roc_timer(struct timer_list *timer)
static int mt7921_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif)
{
- int err;
-
- if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
- return 0;
+ int err = 0;
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
- err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
- clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+
+ mt7921_mutex_acquire(phy->dev);
+ if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
+ mt7921_mutex_release(phy->dev);
return err;
}
@@ -487,13 +487,8 @@ static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_phy *phy = mt7921_hw_phy(hw);
- int err;
- mt7921_mutex_acquire(phy->dev);
- err = mt7921_abort_roc(phy, mvif);
- mt7921_mutex_release(phy->dev);
-
- return err;
+ return mt7921_abort_roc(phy, mvif);
}
static int mt7921_set_channel(struct mt7921_phy *phy)
@@ -681,7 +676,6 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_sniffer_interface_iter, dev);
- dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0));
}
out:
@@ -710,53 +704,12 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
- MT_WF_RFCR1_DROP_BF_POLL |
- MT_WF_RFCR1_DROP_BA |
- MT_WF_RFCR1_DROP_CFEND |
- MT_WF_RFCR1_DROP_CFACK;
- u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- dev->mt76.rxfilter &= ~(_hw); \
- dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
- } while (0)
mt7921_mutex_acquire(dev);
-
- dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
- MT_WF_RFCR_DROP_OTHER_BEACON |
- MT_WF_RFCR_DROP_FRAME_REPORT |
- MT_WF_RFCR_DROP_PROBEREQ |
- MT_WF_RFCR_DROP_MCAST_FILTERED |
- MT_WF_RFCR_DROP_MCAST |
- MT_WF_RFCR_DROP_BCAST |
- MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
- MT_WF_RFCR_DROP_UNWANTED_CTL |
- MT_WF_RFCR_DROP_STBC_MULTI);
-
- MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
- MT_WF_RFCR_DROP_A3_MAC |
- MT_WF_RFCR_DROP_A3_BSSID);
-
- MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
-
- MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
- MT_WF_RFCR_DROP_RTS |
- MT_WF_RFCR_DROP_CTL_RSV |
- MT_WF_RFCR_DROP_NDPA);
-
- *total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter);
-
- if (*total_flags & FIF_CONTROL)
- mt76_clear(dev, MT_WF_RFCR1(0), ctl_flags);
- else
- mt76_set(dev, MT_WF_RFCR1(0), ctl_flags);
-
+ mt7921_mcu_set_rxfilter(dev, *total_flags, 0, 0);
mt7921_mutex_release(dev);
+
+ *total_flags &= (FIF_OTHER_BSS | FIF_FCSFAIL | FIF_CONTROL);
}
static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
@@ -860,6 +813,8 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
true, mvif->ctx);
+ ewma_avg_signal_init(&msta->avg_ack_signal);
+
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
@@ -1135,17 +1090,34 @@ static void
mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 sset, u8 *data)
{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
if (sset != ETH_SS_STATS)
return;
memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
+
+ if (mt76_is_sdio(&dev->mt76))
+ return;
+
+ data += sizeof(mt7921_gstrings_stats);
+ page_pool_ethtool_stats_get_strings(data);
}
static int
mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int sset)
{
- return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ if (mt76_is_sdio(&dev->mt76))
+ return ARRAY_SIZE(mt7921_gstrings_stats);
+
+ return ARRAY_SIZE(mt7921_gstrings_stats) +
+ page_pool_ethtool_stats_get_count();
}
static void
@@ -1157,7 +1129,7 @@ mt7921_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->wcid.stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats, false);
}
static
@@ -1165,6 +1137,7 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ethtool_stats *stats, u64 *data)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ int stats_size = ARRAY_SIZE(mt7921_gstrings_stats);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
struct mt7921_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
@@ -1220,9 +1193,14 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return;
ei += wi.worker_stat_count;
- if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
- dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
- ei, ARRAY_SIZE(mt7921_gstrings_stats));
+
+ if (!mt76_is_sdio(&dev->mt76)) {
+ mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
+ stats_size += page_pool_ethtool_stats_get_count();
+ }
+
+ if (ei != stats_size)
+ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size);
}
static u64
@@ -1430,6 +1408,12 @@ static void mt7921_sta_statistics(struct ieee80211_hw *hw,
}
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ sinfo->ack_signal = (s8)msta->ack_signal;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
+
+ sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
#ifdef CONFIG_PM
@@ -1711,7 +1695,10 @@ static void mt7921_ctx_iter(void *priv, u8 *mac,
if (ctx != mvif->ctx)
return;
- mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
+ if (vif->type & NL80211_IFTYPE_MONITOR)
+ mt7921_mcu_config_sniffer(mvif, ctx);
+ else
+ mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
}
static void
@@ -1778,11 +1765,8 @@ static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw,
struct ieee80211_prep_tx_info *info)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
- struct mt7921_dev *dev = mt7921_hw_dev(hw);
- mt7921_mutex_acquire(dev);
mt7921_abort_roc(mvif->phy, mvif);
- mt7921_mutex_release(dev);
}
const struct ieee80211_ops mt7921_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index fb9c0f66cb27..c5e7ad06f877 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -174,7 +174,7 @@ mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb)
wake_up(&dev->phy.roc_wait);
duration = le32_to_cpu(grant->max_interval);
mod_timer(&dev->phy.roc_timer,
- round_jiffies_up(jiffies + msecs_to_jiffies(duration)));
+ jiffies + msecs_to_jiffies(duration));
}
static void
@@ -1019,6 +1019,8 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
struct ieee80211_vif *vif,
bool enable)
{
+#define MT7921_FIF_BIT_CLR BIT(1)
+#define MT7921_FIF_BIT_SET BIT(0)
int err;
if (enable) {
@@ -1026,7 +1028,11 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
if (err)
return err;
- mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+ err = mt7921_mcu_set_rxfilter(dev, 0,
+ MT7921_FIF_BIT_SET,
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ if (err)
+ return err;
return 0;
}
@@ -1035,7 +1041,11 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
if (err)
return err;
- mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+ err = mt7921_mcu_set_rxfilter(dev, 0,
+ MT7921_FIF_BIT_CLR,
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ if (err)
+ return err;
return 0;
}
@@ -1093,6 +1103,74 @@ int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
true);
}
+int mt7921_mcu_config_sniffer(struct mt7921_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct cfg80211_chan_def *chandef = &ctx->def;
+ int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
+ const u8 ch_band[] = {
+ [NL80211_BAND_2GHZ] = 1,
+ [NL80211_BAND_5GHZ] = 2,
+ [NL80211_BAND_6GHZ] = 3,
+ };
+ const u8 ch_width[] = {
+ [NL80211_CHAN_WIDTH_20_NOHT] = 0,
+ [NL80211_CHAN_WIDTH_20] = 0,
+ [NL80211_CHAN_WIDTH_40] = 0,
+ [NL80211_CHAN_WIDTH_80] = 1,
+ [NL80211_CHAN_WIDTH_160] = 2,
+ [NL80211_CHAN_WIDTH_80P80] = 3,
+ [NL80211_CHAN_WIDTH_5] = 4,
+ [NL80211_CHAN_WIDTH_10] = 5,
+ [NL80211_CHAN_WIDTH_320] = 6,
+ };
+ struct {
+ struct {
+ u8 band_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct config_tlv {
+ __le16 tag;
+ __le16 len;
+ u16 aid;
+ u8 ch_band;
+ u8 bw;
+ u8 control_ch;
+ u8 sco;
+ u8 center_ch;
+ u8 center_ch2;
+ u8 drop_err;
+ u8 pad[3];
+ } __packed tlv;
+ } __packed req = {
+ .hdr = {
+ .band_idx = vif->mt76.band_idx,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(1),
+ .len = cpu_to_le16(sizeof(req.tlv)),
+ .control_ch = chandef->chan->hw_value,
+ .center_ch = ieee80211_frequency_to_channel(freq1),
+ .drop_err = 1,
+ },
+ };
+ if (chandef->chan->band < ARRAY_SIZE(ch_band))
+ req.tlv.ch_band = ch_band[chandef->chan->band];
+ if (chandef->width < ARRAY_SIZE(ch_width))
+ req.tlv.bw = ch_width[chandef->width];
+
+ if (freq2)
+ req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2);
+
+ if (req.tlv.control_ch < req.tlv.center_ch)
+ req.tlv.sco = 1; /* SCA */
+ else if (req.tlv.control_ch > req.tlv.center_ch)
+ req.tlv.sco = 3; /* SCB */
+
+ return mt76_mcu_send_msg(vif->phy->mt76->dev, MCU_UNI_CMD(SNIFFER),
+ &req, sizeof(req), true);
+}
+
int
mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
struct ieee80211_hw *hw,
@@ -1184,13 +1262,15 @@ int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
__le16 len;
u8 idx;
u8 env;
- u8 pad1[2];
+ u8 acpi_conf;
+ u8 pad1;
u8 alpha2[2];
u8 type[2];
u8 rsvd[64];
} __packed req = {
.idx = idx,
.env = env_cap,
+ .acpi_conf = mt7921_acpi_get_flags(&dev->phy),
};
int ret, valid_cnt = 0;
u8 i, *pos;
@@ -1253,3 +1333,25 @@ int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
}
return 0;
}
+
+int mt7921_mcu_set_rxfilter(struct mt7921_dev *dev, u32 fif,
+ u8 bit_op, u32 bit_map)
+{
+ struct {
+ u8 rsv[4];
+ u8 mode;
+ u8 rsv2[3];
+ __le32 fif;
+ __le32 bit_map; /* bit_* for bitmap update */
+ u8 bit_op;
+ u8 pad[51];
+ } __packed data = {
+ .mode = fif ? 1 : 2,
+ .fif = cpu_to_le32(fif),
+ .bit_map = cpu_to_le32(bit_map),
+ .bit_op = bit_op,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER),
+ &data, sizeof(data), false);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 15d6b7fe1c6c..1af70dac723b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -144,6 +144,8 @@ enum mt7921_rxq_id {
MT7921_RXQ_MCU_WM = 0,
};
+DECLARE_EWMA(avg_signal, 10, 8)
+
struct mt7921_sta {
struct mt76_wcid wcid; /* must be first */
@@ -152,6 +154,9 @@ struct mt7921_sta {
struct list_head poll_list;
u32 airtime_ac[8];
+ int ack_signal;
+ struct ewma_avg_signal avg_ack_signal;
+
unsigned long last_txs;
unsigned long ampdu_state;
@@ -383,6 +388,8 @@ int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl);
void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb);
+int mt7921_mcu_set_rxfilter(struct mt7921_dev *dev, u32 fif,
+ u8 bit_op, u32 bit_map);
static inline void mt7921_irq_enable(struct mt7921_dev *dev, u32 mask)
{
@@ -529,6 +536,8 @@ void mt7921_set_ipv6_ns_work(struct work_struct *work);
int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
+int mt7921_mcu_config_sniffer(struct mt7921_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -554,6 +563,7 @@ int mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
#ifdef CONFIG_ACPI
int mt7921_init_acpi_sar(struct mt7921_dev *dev);
int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default);
+u8 mt7921_acpi_get_flags(struct mt7921_phy *phy);
#else
static inline int
mt7921_init_acpi_sar(struct mt7921_dev *dev)
@@ -566,6 +576,12 @@ mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
{
return 0;
}
+
+static inline u8
+mt7921_acpi_get_flags(struct mt7921_phy *phy)
+{
+ return 0;
+}
#endif
int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
index 86340d3205c5..1aefbb6cf0ab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -44,7 +44,6 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
.headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7921_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
- .mcu_restart = mt76_connac_mcu_restart,
};
int err;
@@ -69,8 +68,8 @@ int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
- if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
- PCIE_LPCR_HOST_OWN_SYNC, 0, 50))
+ if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL,
+ PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1))
break;
}
@@ -110,8 +109,8 @@ int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev)
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
- if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
- PCIE_LPCR_HOST_OWN_SYNC, 4, 50))
+ if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL,
+ PCIE_LPCR_HOST_OWN_SYNC, 4, 50, 1))
break;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index c65582acfa55..e52977ff3349 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -80,6 +80,14 @@
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
+/* WTBLOFF TOP: band 0(0x820e9000),band 1(0x820f9000) */
+#define MT_WTBLOFF_TOP_BASE(_band) ((_band) ? 0x820f9000 : 0x820e9000)
+#define MT_WTBLOFF_TOP(_band, ofs) (MT_WTBLOFF_TOP_BASE(_band) + (ofs))
+
+#define MT_WTBLOFF_TOP_RSCR(_band) MT_WTBLOFF_TOP(_band, 0x008)
+#define MT_WTBLOFF_TOP_RSCR_RCPI_MODE GENMASK(31, 30)
+#define MT_WTBLOFF_TOP_RSCR_RCPI_PARAM GENMASK(25, 24)
+
/* LPON: band 0(0x24200), band 1(0xa4200) */
#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
index bdec8684ce94..7f408212e716 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
@@ -59,7 +59,6 @@ mt7921_tm_set(struct mt7921_dev *dev, struct mt7921_tm_cmd *req)
cancel_work_sync(&pm->wake_work);
__mt7921_mcu_drv_pmctrl(dev);
- mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter);
phy->test.state = MT76_TM_STATE_ON;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 5321d20dcdcb..8fef09ed29c9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -15,6 +15,9 @@
static const struct usb_device_id mt7921u_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ /* Comfast CF-952AX */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3574, 0x6211, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ },
};
@@ -133,7 +136,6 @@ static int mt7921u_mcu_init(struct mt7921_dev *dev)
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7921u_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
- .mcu_restart = mt76_connac_mcu_restart,
};
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 2e4a8909b9e8..9c5e9ac1c335 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -457,7 +457,7 @@ mt7996_hw_queue_read(struct seq_file *s, u32 size,
if (val & BIT(map[i].index))
continue;
- ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
+ ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
head = mt76_get_field(dev, MT_FL_Q2_CTRL,
@@ -653,8 +653,9 @@ static int
mt7996_rf_regval_set(void *data, u64 val)
{
struct mt7996_dev *dev = data;
+ u32 val32 = val;
- return mt7996_mcu_rf_regval(dev, dev->mt76.debugfs_reg, (u32 *)&val, true);
+ return mt7996_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &val32, true);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7996_rf_regval_get,
@@ -790,10 +791,10 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
else
buf[count] = '\0';
- /* mode - cck: 0, ofdm: 1, ht: 2, gf: 3, vht: 4, he_su: 8, he_er: 9
- * bw - bw20: 0, bw40: 1, bw80: 2, bw160: 3
- * nss - vht: 1~4, he: 1~4, others: ignore
- * mcs - cck: 0~4, ofdm: 0~7, ht: 0~32, vht: 0~9, he_su: 0~11, he_er: 0~2
+ /* mode - cck: 0, ofdm: 1, ht: 2, gf: 3, vht: 4, he_su: 8, he_er: 9 EHT: 15
+ * bw - bw20: 0, bw40: 1, bw80: 2, bw160: 3, BW320: 4
+ * nss - vht: 1~4, he: 1~4, eht: 1~4, others: ignore
+ * mcs - cck: 0~4, ofdm: 0~7, ht: 0~32, vht: 0~9, he_su: 0~11, he_er: 0~2, eht: 0~13
* gi - (ht/vht) lgi: 0, sgi: 1; (he) 0.8us: 0, 1.6us: 1, 3.2us: 2
* preamble - short: 1, long: 0
* ldpc - off: 0, on: 1
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
index b9f62bedbc48..2e48c5a40f81 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -65,22 +65,50 @@ static int mt7996_eeprom_load(struct mt7996_dev *dev)
} else {
u8 free_block_num;
u32 block_num, i;
+ u32 eeprom_blk_size = MT7996_EEPROM_BLOCK_SIZE;
- /* TODO: check free block event */
- mt7996_mcu_get_eeprom_free_block(dev, &free_block_num);
- /* efuse info not enough */
+ ret = mt7996_mcu_get_eeprom_free_block(dev, &free_block_num);
+ if (ret < 0)
+ return ret;
+
+ /* efuse info isn't enough */
if (free_block_num >= 59)
return -EINVAL;
/* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, MT7996_EEPROM_BLOCK_SIZE);
- for (i = 0; i < block_num; i++)
- mt7996_mcu_get_eeprom(dev, i * MT7996_EEPROM_BLOCK_SIZE);
+ block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, eeprom_blk_size);
+ for (i = 0; i < block_num; i++) {
+ ret = mt7996_mcu_get_eeprom(dev, i * eeprom_blk_size);
+ if (ret < 0)
+ return ret;
+ }
}
return mt7996_check_eeprom(dev);
}
+static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_dev *dev)
+{
+#define MODE_HE_ONLY BIT(0)
+#define WTBL_SIZE_GROUP GENMASK(31, 28)
+ u32 cap = 0;
+ int ret;
+
+ ret = mt7996_mcu_get_chip_config(dev, &cap);
+ if (ret)
+ return ret;
+
+ if (cap) {
+ dev->has_eht = !(cap & MODE_HE_ONLY);
+ dev->wtbl_size_group = u32_get_bits(cap, WTBL_SIZE_GROUP);
+ }
+
+ if (dev->wtbl_size_group < 2 || dev->wtbl_size_group > 4)
+ dev->wtbl_size_group = 2; /* set default */
+
+ return 0;
+}
+
static int mt7996_eeprom_parse_band_config(struct mt7996_phy *phy)
{
u8 *eeprom = phy->dev->mt76.eeprom.data;
@@ -127,6 +155,7 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
u8 path, nss, band_idx = phy->mt76->band_idx;
u8 *eeprom = dev->mt76.eeprom.data;
struct mt76_phy *mphy = phy->mt76;
+ int ret;
switch (band_idx) {
case MT_BAND1:
@@ -161,6 +190,10 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
dev->chainshift[band_idx + 1] = dev->chainshift[band_idx] +
hweight16(mphy->chainmask);
+ ret = mt7996_eeprom_parse_efuse_hw_cap(dev);
+ if (ret)
+ return ret;
+
return mt7996_eeprom_parse_band_config(phy);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 46b290526092..946da93eed32 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -37,8 +37,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160) |
- BIT(NL80211_CHAN_WIDTH_80P80),
+ BIT(NL80211_CHAN_WIDTH_160),
}
};
@@ -46,11 +45,11 @@ static void mt7996_led_set_config(struct led_classdev *led_cdev,
u8 delay_on, u8 delay_off)
{
struct mt7996_dev *dev;
- struct mt76_dev *mt76;
+ struct mt76_phy *mphy;
u32 val;
- mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
- dev = container_of(mt76, struct mt7996_dev, mt76);
+ mphy = container_of(led_cdev, struct mt76_phy, leds.cdev);
+ dev = container_of(mphy->dev, struct mt7996_dev, mt76);
/* select TX blink mode, 2: only data frames */
mt76_rmw_field(dev, MT_TMAC_TCR0(0), MT_TMAC_TCR0_TX_BLINK, 2);
@@ -65,7 +64,7 @@ static void mt7996_led_set_config(struct led_classdev *led_cdev,
/* control LED */
val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK;
- if (dev->mt76.led_al)
+ if (mphy->leds.al)
val |= MT_LED_CTRL_POLARITY;
mt76_wr(dev, MT_LED_CTRL(0), val);
@@ -153,10 +152,12 @@ mt7996_init_wiphy(struct ieee80211_hw *hw)
struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt76_dev *mdev = &phy->dev->mt76;
struct wiphy *wiphy = hw->wiphy;
+ u16 max_subframes = phy->dev->has_eht ? IEEE80211_MAX_AMPDU_BUF_EHT :
+ IEEE80211_MAX_AMPDU_BUF_HE;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_rx_aggregation_subframes = max_subframes;
+ hw->max_tx_aggregation_subframes = max_subframes;
hw->netdev_features = NETIF_F_RXCSUM;
hw->radiotap_timestamp.units_pos =
@@ -214,7 +215,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw)
mt76_set_stream_caps(phy->mt76, true);
mt7996_set_stream_vht_txbf_caps(phy);
- mt7996_set_stream_he_caps(phy);
+ mt7996_set_stream_he_eht_caps(phy);
wiphy->available_antennas_rx = phy->mt76->antenna_mask;
wiphy->available_antennas_tx = phy->mt76->antenna_mask;
@@ -256,12 +257,12 @@ static void mt7996_mac_init(struct mt7996_dev *dev)
mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
- for (i = 0; i < MT7996_WTBL_SIZE; i++)
+ for (i = 0; i < mt7996_wtbl_size(dev); i++)
mt7996_mac_wtbl_update(dev, i,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- i = dev->mt76.led_pin ? MT_LED_GPIO_MUX3 : MT_LED_GPIO_MUX2;
+ i = dev->mphy.leds.pin ? MT_LED_GPIO_MUX3 : MT_LED_GPIO_MUX2;
mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
}
@@ -465,7 +466,7 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
- (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+ FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, sts - 1);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
@@ -572,11 +573,15 @@ mt7996_gen_ppe_thresh(u8 *he_ppet, int nss)
(0xff >> (8 - (ppet_bits - 1) % 8));
}
-static int
+static void
mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
- struct ieee80211_sband_iftype_data *data)
+ struct ieee80211_sband_iftype_data *data,
+ enum nl80211_iftype iftype)
{
- int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
+ struct ieee80211_sta_he_cap *he_cap = &data->he_cap;
+ struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp *he_mcs = &he_cap->he_mcs_nss_supp;
+ int i, nss = hweight8(phy->mt76->antenna_mask);
u16 mcs_map = 0;
for (i = 0; i < 8; i++) {
@@ -586,179 +591,254 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
}
- for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
- struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
- struct ieee80211_he_cap_elem *he_cap_elem =
- &he_cap->he_cap_elem;
- struct ieee80211_he_mcs_nss_supp *he_mcs =
- &he_cap->he_mcs_nss_supp;
-
- switch (i) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_AP:
-#ifdef CONFIG_MAC80211_MESH
- case NL80211_IFTYPE_MESH_POINT:
-#endif
- break;
- default:
- continue;
- }
+ he_cap->has_he = true;
- data[idx].types_mask = BIT(i);
- he_cap->has_he = true;
+ he_cap_elem->mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ he_cap_elem->mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
+ he_cap_elem->mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
- he_cap_elem->mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE;
- he_cap_elem->mac_cap_info[3] =
- IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
- he_cap_elem->mac_cap_info[4] =
- IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+ he_cap_elem->phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_BSR;
+ he_cap_elem->mac_cap_info[4] |= IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[5] |=
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[1] |=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
if (band == NL80211_BAND_2GHZ)
- he_cap_elem->phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
else
- he_cap_elem->phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
-
- he_cap_elem->phy_cap_info[1] =
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
- he_cap_elem->phy_cap_info[2] =
- IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] |=
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[7] |=
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ break;
+ default:
+ break;
+ }
- switch (i) {
- case NL80211_IFTYPE_AP:
- he_cap_elem->mac_cap_info[0] |=
- IEEE80211_HE_MAC_CAP0_TWT_RES;
- he_cap_elem->mac_cap_info[2] |=
- IEEE80211_HE_MAC_CAP2_BSR;
- he_cap_elem->mac_cap_info[4] |=
- IEEE80211_HE_MAC_CAP4_BQR;
- he_cap_elem->mac_cap_info[5] |=
- IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
- he_cap_elem->phy_cap_info[3] |=
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
- he_cap_elem->phy_cap_info[6] |=
- IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
- he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
- break;
- case NL80211_IFTYPE_STATION:
- he_cap_elem->mac_cap_info[1] |=
- IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
-
- if (band == NL80211_BAND_2GHZ)
- he_cap_elem->phy_cap_info[0] |=
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
- else
- he_cap_elem->phy_cap_info[0] |=
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
-
- he_cap_elem->phy_cap_info[1] |=
- IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
- IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
- he_cap_elem->phy_cap_info[3] |=
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
- he_cap_elem->phy_cap_info[6] |=
- IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
- IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
- he_cap_elem->phy_cap_info[7] |=
- IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
- he_cap_elem->phy_cap_info[8] |=
- IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
- IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
- he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
- IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
- IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
- break;
- }
+ he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
+
+ mt7996_set_stream_he_txbf_caps(phy, he_cap, iftype);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ mt7996_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ } else {
+ he_cap_elem->phy_cap_info[9] |=
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ }
- he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
- he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
- he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map);
-
- mt7996_set_stream_he_txbf_caps(phy, he_cap, i);
-
- memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
- if (he_cap_elem->phy_cap_info[6] &
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt7996_gen_ppe_thresh(he_cap->ppe_thres, nss);
- } else {
- he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US;
- }
+ if (band == NL80211_BAND_6GHZ) {
+ u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
- if (band == NL80211_BAND_6GHZ) {
- u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
- IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+ cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_2,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
- cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_2,
- IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
- u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
- IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
- u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
- IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ data->he_6ghz_capa.capa = cpu_to_le16(cap);
+ }
+}
- data[idx].he_6ghz_capa.capa = cpu_to_le16(cap);
- }
+static void
+mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ struct ieee80211_sband_iftype_data *data,
+ enum nl80211_iftype iftype)
+{
+ struct ieee80211_sta_eht_cap *eht_cap = &data->eht_cap;
+ struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp *eht_nss = &eht_cap->eht_mcs_nss_supp;
+ enum nl80211_chan_width width = phy->mt76->chandef.width;
+ int nss = hweight8(phy->mt76->antenna_mask);
+ int sts = hweight16(phy->mt76->chainmask);
+ u8 val;
- idx++;
- }
+ if (!phy->dev->has_eht)
+ return;
- return idx;
+ eht_cap->has_eht = true;
+
+ eht_cap_elem->mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
+
+ eht_cap_elem->phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
+
+ eht_cap_elem->phy_cap_info[0] |=
+ u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[1] =
+ u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] =
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) |
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) |
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK;
+
+ eht_cap_elem->phy_cap_info[4] =
+ u8_encode_bits(min_t(int, sts - 1, 2),
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
+
+ eht_cap_elem->phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
+ u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK);
+
+ val = width == NL80211_CHAN_WIDTH_320 ? 0xf :
+ width == NL80211_CHAN_WIDTH_160 ? 0x7 :
+ width == NL80211_CHAN_WIDTH_80 ? 0x3 : 0x1;
+ eht_cap_elem->phy_cap_info[6] =
+ u8_encode_bits(u8_get_bits(0x11, GENMASK(4, 2)),
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
+ u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
+
+ eht_cap_elem->phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
+
+ val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
+ u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX);
+#define SET_EHT_MAX_NSS(_bw, _val) do { \
+ eht_nss->bw._##_bw.rx_tx_mcs9_max_nss = _val; \
+ eht_nss->bw._##_bw.rx_tx_mcs11_max_nss = _val; \
+ eht_nss->bw._##_bw.rx_tx_mcs13_max_nss = _val; \
+ } while (0)
+
+ SET_EHT_MAX_NSS(80, val);
+ SET_EHT_MAX_NSS(160, val);
+ SET_EHT_MAX_NSS(320, val);
+#undef SET_EHT_MAX_NSS
}
-void mt7996_set_stream_he_caps(struct mt7996_phy *phy)
+static void
+__mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy,
+ struct ieee80211_supported_band *sband,
+ enum nl80211_band band)
{
- struct ieee80211_sband_iftype_data *data;
- struct ieee80211_supported_band *band;
- int n;
+ struct ieee80211_sband_iftype_data *data = phy->iftype[band];
+ int i, n = 0;
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+#endif
+ break;
+ default:
+ continue;
+ }
- if (phy->mt76->cap.has_2ghz) {
- data = phy->iftype[NL80211_BAND_2GHZ];
- n = mt7996_init_he_caps(phy, NL80211_BAND_2GHZ, data);
+ data[n].types_mask = BIT(i);
+ mt7996_init_he_caps(phy, band, &data[n], i);
+ mt7996_init_eht_caps(phy, band, &data[n], i);
- band = &phy->mt76->sband_2g.sband;
- band->iftype_data = data;
- band->n_iftype_data = n;
+ n++;
}
- if (phy->mt76->cap.has_5ghz) {
- data = phy->iftype[NL80211_BAND_5GHZ];
- n = mt7996_init_he_caps(phy, NL80211_BAND_5GHZ, data);
+ sband->iftype_data = data;
+ sband->n_iftype_data = n;
+}
- band = &phy->mt76->sband_5g.sband;
- band->iftype_data = data;
- band->n_iftype_data = n;
- }
+void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy)
+{
+ if (phy->mt76->cap.has_2ghz)
+ __mt7996_set_stream_he_eht_caps(phy, &phy->mt76->sband_2g.sband,
+ NL80211_BAND_2GHZ);
- if (phy->mt76->cap.has_6ghz) {
- data = phy->iftype[NL80211_BAND_6GHZ];
- n = mt7996_init_he_caps(phy, NL80211_BAND_6GHZ, data);
+ if (phy->mt76->cap.has_5ghz)
+ __mt7996_set_stream_he_eht_caps(phy, &phy->mt76->sband_5g.sband,
+ NL80211_BAND_5GHZ);
- band = &phy->mt76->sband_6g.sband;
- band->iftype_data = data;
- band->n_iftype_data = n;
- }
+ if (phy->mt76->cap.has_6ghz)
+ __mt7996_set_stream_he_eht_caps(phy, &phy->mt76->sband_6g.sband,
+ NL80211_BAND_6GHZ);
}
int mt7996_register_device(struct mt7996_dev *dev)
@@ -787,8 +867,8 @@ int mt7996_register_device(struct mt7996_dev *dev)
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt7996_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt7996_led_set_blink;
+ dev->mphy.leds.cdev.brightness_set = mt7996_led_set_brightness;
+ dev->mphy.leds.cdev.blink_set = mt7996_led_set_blink;
}
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 0b3e28748e76..c9a9f0e31771 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -189,6 +189,9 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
rate = &msta->wcid.rate;
switch (rate->bw) {
+ case RATE_INFO_BW_320:
+ bw = IEEE80211_STA_RX_BW_320;
+ break;
case RATE_INFO_BW_160:
bw = IEEE80211_STA_RX_BW_160;
break;
@@ -205,7 +208,11 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6);
val = mt76_rr(dev, addr);
- if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
+ addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 5);
+ val = mt76_rr(dev, addr);
+ rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
+ } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
u8 offs = 24 + 2 * bw;
rate->he_gi = (val & (0x3 << offs)) >> offs;
@@ -469,7 +476,7 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
- break;
+ return -EINVAL;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
@@ -560,6 +567,15 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
status->he_dcm = dcm;
break;
+ case MT_PHY_TYPE_EHT_SU:
+ case MT_PHY_TYPE_EHT_TRIG:
+ case MT_PHY_TYPE_EHT_MU:
+ /* TODO: currently report rx rate with HE rate */
+ status->nss = nss;
+ status->encoding = RX_ENC_HE;
+ bw = min_t(int, bw, IEEE80211_STA_RX_BW_160);
+ i = min_t(int, i & 0xf, 11);
+ break;
default:
return -EINVAL;
}
@@ -584,6 +600,9 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
case IEEE80211_STA_RX_BW_160:
status->bw = RATE_INFO_BW_160;
break;
+ case IEEE80211_STA_RX_BW_320:
+ status->bw = RATE_INFO_BW_320;
+ break;
default:
return -EINVAL;
}
@@ -959,51 +978,6 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
}
}
-static u16
-mt7996_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- bool beacon, bool mcast)
-{
- u8 mode = 0, band = mphy->chandef.chan->band;
- int rateidx = 0, mcast_rate;
-
- if (beacon) {
- struct cfg80211_bitrate_mask *mask;
-
- mask = &vif->bss_conf.beacon_tx_rate;
- if (hweight16(mask->control[band].he_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
- mode = MT_PHY_TYPE_HE_SU;
- goto out;
- } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
- mode = MT_PHY_TYPE_VHT;
- goto out;
- } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
- mode = MT_PHY_TYPE_HT;
- goto out;
- } else if (hweight32(mask->control[band].legacy) == 1) {
- rateidx = ffs(mask->control[band].legacy) - 1;
- goto legacy;
- }
- }
-
- mcast_rate = vif->bss_conf.mcast_rate[band];
- if (mcast && mcast_rate > 0)
- rateidx = mcast_rate - 1;
- else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
-
-legacy:
- rateidx = mt76_calculate_default_rate(mphy, rateidx);
- mode = rateidx >> 8;
- rateidx &= GENMASK(7, 0);
-
-out:
- return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
- FIELD_PREP(MT_TX_RATE_MODE, mode);
-}
-
void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
struct ieee80211_key_conf *key, u32 changed)
@@ -1091,7 +1065,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
/* Fixed rata is available just for 802.11 txd */
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool multicast = is_multicast_ether_addr(hdr->addr1);
- u16 rate = mt7996_mac_tx_rate_val(mphy, vif, beacon, multicast);
+ u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
+ multicast);
/* fix to bw 20 */
val = MT_TXD6_FIXED_BW |
@@ -1113,8 +1088,8 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_connac_txp_common *txp;
struct mt76_txwi_cache *t;
- struct mt7996_txp *txp;
int id, i, pid, nbuf = tx_info->nbuf - 1;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
u8 *txwi = (u8 *)txwi_ptr;
@@ -1148,35 +1123,35 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid,
key, 0);
- txp = (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
+ txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
- txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
- txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->fw.len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
}
- txp->nbuf = nbuf;
+ txp->fw.nbuf = nbuf;
- txp->flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
+ txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
- txp->flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
if (!key)
- txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
- txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
if (vif) {
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- txp->bss_idx = mvif->mt76.idx;
+ txp->fw.bss_idx = mvif->mt76.idx;
}
- txp->token = cpu_to_le16(id);
+ txp->fw.token = cpu_to_le16(id);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
- txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
+ txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
else
- txp->rept_wds_wcid = cpu_to_le16(0xfff);
+ txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
tx_info->skb = DMA_DUMMY_DATA;
/* pass partial skb header to fw */
@@ -1213,18 +1188,6 @@ mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
}
static void
-mt7996_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- struct mt7996_txp *txp;
- int i;
-
- txp = mt7996_txwi_to_txp(dev, t);
- for (i = 0; i < txp->nbuf; i++)
- dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
- le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
-}
-
-static void
mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, struct list_head *free_list)
{
@@ -1233,7 +1196,7 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
__le32 *txwi;
u16 wcid_idx;
- mt7996_txp_skb_unmap(mdev, t);
+ mt76_connac_txp_skb_unmap(mdev, t);
if (!t->skb)
goto out;
@@ -1434,6 +1397,15 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
rate.flags = RATE_INFO_FLAGS_HE_MCS;
break;
+ case MT_PHY_TYPE_EHT_SU:
+ case MT_PHY_TYPE_EHT_TRIG:
+ case MT_PHY_TYPE_EHT_MU:
+ if (rate.mcs > 13)
+ goto out;
+
+ rate.eht_gi = wcid->rate.eht_gi;
+ rate.flags = RATE_INFO_FLAGS_EHT_MCS;
+ break;
default:
goto out;
}
@@ -1441,6 +1413,10 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
stats->tx_mode[mode]++;
switch (FIELD_GET(MT_TXS0_BW, txs)) {
+ case IEEE80211_STA_RX_BW_320:
+ rate.bw = RATE_INFO_BW_320;
+ stats->tx_bw[4]++;
+ break;
case IEEE80211_STA_RX_BW_160:
rate.bw = RATE_INFO_BW_160;
stats->tx_bw[3]++;
@@ -1486,7 +1462,7 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
if (pid < MT_PACKET_ID_FIRST)
return;
- if (wcidx >= MT7996_WTBL_SIZE)
+ if (wcidx >= mt7996_wtbl_size(dev))
return;
rcu_read_lock();
@@ -1589,27 +1565,6 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
-void mt7996_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7996_txp *txp;
-
- txp = mt7996_txwi_to_txp(mdev, e->txwi);
- t = mt76_token_put(mdev, le16_to_cpu(txp->token));
- e->skb = t ? t->skb : NULL;
- }
-
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
@@ -1690,7 +1645,7 @@ void mt7996_mac_set_timing(struct mt7996_phy *phy)
else
val = MT7996_CFEND_RATE_11B;
- mt76_rmw_field(dev, MT_AGG_ACR0(band_idx), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_rmw_field(dev, MT_RATE_HRCR0(band_idx), MT_RATE_HRCR0_CFEND_RATE, val);
mt76_clear(dev, MT_ARB_SCR(band_idx),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
index 9f68852012b9..27184cbac619 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
@@ -268,17 +268,6 @@ enum tx_mgnt_type {
/* VHT/HE only use bits 0-3 */
#define MT_TX_RATE_IDX GENMASK(5, 0)
-struct mt7996_txp {
- __le16 flags;
- __le16 token;
- u8 bss_idx;
- __le16 rept_wds_wcid;
- u8 nbuf;
-#define MT_TXP_MAX_BUF_NUM 6
- __le32 buf[MT_TXP_MAX_BUF_NUM];
- __le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed __aligned(4);
-
#define MT_TXFREE0_PKT_TYPE GENMASK(31, 27)
#define MT_TXFREE0_MSDU_CNT GENMASK(25, 16)
#define MT_TXFREE0_RX_BYTE GENMASK(15, 0)
@@ -382,17 +371,4 @@ struct mt7996_dfs_radar_spec {
struct mt7996_dfs_pattern radar_pattern[16];
};
-static inline struct mt7996_txp *
-mt7996_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- u8 *txwi;
-
- if (!t)
- return NULL;
-
- txwi = mt76_get_txwi_ptr(dev, t);
-
- return (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
-}
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 4421cd54311b..3e4da0350d96 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -170,7 +170,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
phy->monitor_vif = vif;
mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mvif->mt76.idx >= (MT7996_MAX_INTERFACES << dev->dbdc_support)) {
+ if (mvif->mt76.idx >= mt7996_max_interface_num(dev)) {
ret = -ENOSPC;
goto out;
}
@@ -880,14 +880,17 @@ mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
phy->mt76->antenna_mask = tx_ant;
/* restore to the origin chainmask which might have auxiliary path */
- if (hweight8(tx_ant) == max_nss)
+ if (hweight8(tx_ant) == max_nss && band_idx < MT_BAND2)
+ phy->mt76->chainmask = ((dev->chainmask >> shift) &
+ (BIT(dev->chainshift[band_idx + 1] - shift) - 1)) << shift;
+ else if (hweight8(tx_ant) == max_nss)
phy->mt76->chainmask = (dev->chainmask >> shift) << shift;
else
phy->mt76->chainmask = tx_ant << shift;
mt76_set_stream_caps(phy->mt76, true);
mt7996_set_stream_vht_txbf_caps(phy);
- mt7996_set_stream_he_caps(phy);
+ mt7996_set_stream_he_eht_caps(phy);
mutex_unlock(&dev->mt76.mutex);
@@ -1081,10 +1084,14 @@ static const char mt7996_gstrings_stats[][ETH_GSTRING_LEN] = {
"v_tx_mode_he_ext_su",
"v_tx_mode_he_tb",
"v_tx_mode_he_mu",
+ "v_tx_mode_eht_su",
+ "v_tx_mode_eht_trig",
+ "v_tx_mode_eht_mu",
"v_tx_bw_20",
"v_tx_bw_40",
"v_tx_bw_80",
"v_tx_bw_160",
+ "v_tx_bw_320",
"v_tx_mcs_0",
"v_tx_mcs_1",
"v_tx_mcs_2",
@@ -1097,6 +1104,8 @@ static const char mt7996_gstrings_stats[][ETH_GSTRING_LEN] = {
"v_tx_mcs_9",
"v_tx_mcs_10",
"v_tx_mcs_11",
+ "v_tx_mcs_12",
+ "v_tx_mcs_13",
};
#define MT7996_SSTATS_LEN ARRAY_SIZE(mt7996_gstrings_stats)
@@ -1130,7 +1139,7 @@ static void mt7996_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->stats);
+ mt76_ethtool_worker(wi, &msta->stats, true);
}
static
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 04e1d10bbd21..dbe30832fd88 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -70,6 +70,7 @@ struct mt7996_fw_region {
#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
+#define EHT_PHY(p, c) u8_get_bits(c, IEEE80211_EHT_PHY_##p)
static bool sr_scene_detect = true;
module_param(sr_scene_detect, bool, 0644);
@@ -335,6 +336,9 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
r = (struct mt7996_mcu_rdd_report *)skb->data;
+ if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
+ return;
+
mphy = dev->mt76.phys[r->band_idx];
if (!mphy)
return;
@@ -412,6 +416,9 @@ mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb)
struct header *hdr = (struct header *)data;
struct tlv *tlv = (struct tlv *)(data + 4);
+ if (hdr->band >= ARRAY_SIZE(dev->mt76.phys))
+ return;
+
if (hdr->band && dev->mt76.phys[hdr->band])
mphy = dev->mt76.phys[hdr->band];
@@ -765,9 +772,8 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
bss->dtim_period = vif->bss_conf.dtim_period;
bss->phymode = mt76_connac_get_phy_mode(phy, vif,
chandef->chan->band, NULL);
-
- if (chandef->chan->band == NL80211_BAND_6GHZ)
- bss->phymode_ext |= PHY_MODE_AX_6G;
+ bss->phymode_ext = mt76_connac_get_phy_mode_ext(phy, vif,
+ chandef->chan->band);
return 0;
}
@@ -903,8 +909,8 @@ mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he = (struct sta_rec_he_v2 *)tlv;
for (i = 0; i < 11; i++) {
if (i < 6)
- he->he_mac_cap[i] = cpu_to_le16(elem->mac_cap_info[i]);
- he->he_phy_cap[i] = cpu_to_le16(elem->phy_cap_info[i]);
+ he->he_mac_cap[i] = elem->mac_cap_info[i];
+ he->he_phy_cap[i] = elem->phy_cap_info[i];
}
mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
@@ -946,6 +952,35 @@ mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
+mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct ieee80211_eht_mcs_nss_supp *mcs_map;
+ struct ieee80211_eht_cap_elem_fixed *elem;
+ struct sta_rec_eht *eht;
+ struct tlv *tlv;
+
+ if (!sta->deflink.eht_cap.has_eht)
+ return;
+
+ mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp;
+ elem = &sta->deflink.eht_cap.eht_cap_elem;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht));
+
+ eht = (struct sta_rec_eht *)tlv;
+ eht->tid_bitmap = 0xff;
+ eht->mac_cap = cpu_to_le16(*(u16 *)elem->mac_cap_info);
+ eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
+ eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
+ memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
+ memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
+ memcpy(eht->mcs_map_bw320, &mcs_map->bw._320, sizeof(eht->mcs_map_bw320));
+}
+
+static void
mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
struct sta_rec_ht *ht;
@@ -1019,15 +1054,27 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool bfee)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
+ int sts = hweight16(phy->mt76->chainmask);
if (vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_AP)
return false;
- if (!bfee && tx_ant < 2)
+ if (!bfee && sts < 2)
return false;
+ if (sta->deflink.eht_cap.has_eht) {
+ struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
+ struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
+
+ if (bfee)
+ return mvif->cap.eht_su_ebfee &&
+ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
+ else
+ return mvif->cap.eht_su_ebfer &&
+ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
+ }
+
if (sta->deflink.he_cap.has_he) {
struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
@@ -1185,12 +1232,68 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
}
static void
+mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ struct mt7996_phy *phy, struct sta_rec_bf *bf)
+{
+ struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
+ struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp *eht_nss = &pc->eht_mcs_nss_supp;
+ const struct ieee80211_sta_eht_cap *vc =
+ mt76_connac_get_eht_phy_cap(phy->mt76, vif);
+ const struct ieee80211_eht_cap_elem_fixed *ve = &vc->eht_cap_elem;
+ u8 nss_mcs = u8_get_bits(eht_nss->bw._80.rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_RX) - 1;
+ u8 snd_dim, sts;
+
+ bf->tx_mode = MT_PHY_TYPE_EHT_MU;
+
+ mt7996_mcu_sta_sounding_rate(bf);
+
+ bf->trigger_su = EHT_PHY(CAP3_TRIG_SU_BF_FDBK, pe->phy_cap_info[3]);
+ bf->trigger_mu = EHT_PHY(CAP3_TRIG_MU_BF_PART_BW_FDBK, pe->phy_cap_info[3]);
+ snd_dim = EHT_PHY(CAP2_SOUNDING_DIM_80MHZ_MASK, ve->phy_cap_info[2]);
+ sts = EHT_PHY(CAP0_BEAMFORMEE_SS_80MHZ_MASK, pe->phy_cap_info[0]) +
+ (EHT_PHY(CAP1_BEAMFORMEE_SS_80MHZ_MASK, pe->phy_cap_info[1]) << 1);
+ bf->nrow = min_t(u8, snd_dim, sts);
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = bf->ncol;
+
+ if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_160)
+ return;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ snd_dim = EHT_PHY(CAP2_SOUNDING_DIM_160MHZ_MASK, ve->phy_cap_info[2]);
+ sts = EHT_PHY(CAP1_BEAMFORMEE_SS_160MHZ_MASK, pe->phy_cap_info[1]);
+ nss_mcs = u8_get_bits(eht_nss->bw._160.rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_RX) - 1;
+
+ bf->nrow_gt_bw80 = min_t(u8, snd_dim, sts);
+ bf->ncol_gt_bw80 = nss_mcs;
+ break;
+ case IEEE80211_STA_RX_BW_320:
+ snd_dim = EHT_PHY(CAP2_SOUNDING_DIM_320MHZ_MASK, ve->phy_cap_info[2]) +
+ (EHT_PHY(CAP3_SOUNDING_DIM_320MHZ_MASK,
+ ve->phy_cap_info[3]) << 1);
+ sts = EHT_PHY(CAP1_BEAMFORMEE_SS_320MHZ_MASK, pe->phy_cap_info[1]);
+ nss_mcs = u8_get_bits(eht_nss->bw._320.rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_RX) - 1;
+
+ bf->nrow_gt_bw80 = min_t(u8, snd_dim, sts) << 4;
+ bf->ncol_gt_bw80 = nss_mcs << 4;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_phy *phy = mvif->phy;
- int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
+ int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
const u8 matrix[4][4] = {
@@ -1211,11 +1314,13 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
bf = (struct sta_rec_bf *)tlv;
- /* he: eBF only, in accordance with spec
+ /* he/eht: eBF only, in accordance with spec
* vht: support eBF and iBF
* ht: iBF only, since mac80211 lacks of eBF support
*/
- if (sta->deflink.he_cap.has_he && ebf)
+ if (sta->deflink.eht_cap.has_eht && ebf)
+ mt7996_mcu_sta_bfer_eht(sta, vif, phy, bf);
+ else if (sta->deflink.he_cap.has_he && ebf)
mt7996_mcu_sta_bfer_he(sta, vif, phy, bf);
else if (sta->deflink.vht_cap.vht_supported)
mt7996_mcu_sta_bfer_vht(sta, phy, bf, ebf);
@@ -1430,8 +1535,9 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
ra->auto_rate = true;
ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
ra->channel = chandef->chan->hw_value;
- ra->bw = sta->deflink.bandwidth;
- ra->phy.bw = sta->deflink.bandwidth;
+ ra->bw = (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) ?
+ CMD_CBW_320MHZ : sta->deflink.bandwidth;
+ ra->phy.bw = ra->bw;
ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode);
if (supp_rate) {
@@ -1613,6 +1719,8 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
mt7996_mcu_sta_he_tlv(skb, sta);
/* starec he 6g*/
mt7996_mcu_sta_he_6g_tlv(skb, sta);
+ /* starec eht */
+ mt7996_mcu_sta_eht_tlv(skb, sta);
/* TODO: starec muru */
/* starec bfee */
mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta);
@@ -1809,6 +1917,7 @@ mt7996_mcu_beacon_check_caps(struct mt7996_phy *phy, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_vif_cap *vc = &mvif->cap;
+ const struct ieee80211_eht_cap_elem_fixed *eht;
const struct ieee80211_he_cap_elem *he;
const struct ieee80211_vht_cap *vht;
const struct ieee80211_ht_cap *ht;
@@ -1879,6 +1988,23 @@ mt7996_mcu_beacon_check_caps(struct mt7996_phy *phy, struct ieee80211_vif *vif,
HE_PHY(CAP4_MU_BEAMFORMER, he->phy_cap_info[4]) &&
HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4]);
}
+
+ ie = cfg80211_find_ext_ie(WLAN_EID_EXT_EHT_CAPABILITY,
+ mgmt->u.beacon.variable, len);
+ if (ie && ie[1] >= sizeof(*eht) + 1) {
+ const struct ieee80211_sta_eht_cap *pc =
+ mt76_connac_get_eht_phy_cap(phy->mt76, vif);
+ const struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
+
+ eht = (void *)(ie + 3);
+
+ vc->eht_su_ebfer =
+ EHT_PHY(CAP0_SU_BEAMFORMER, eht->phy_cap_info[0]) &&
+ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
+ vc->eht_su_ebfee =
+ EHT_PHY(CAP0_SU_BEAMFORMEE, eht->phy_cap_info[0]) &&
+ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
+ }
}
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
@@ -2241,6 +2367,26 @@ mt7996_firmware_state(struct mt7996_dev *dev, bool wa)
return 0;
}
+static int
+mt7996_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 power_mode;
+ u8 __rsv2[3];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_POWER_OFF),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .power_mode = 1,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_WM_UNI_CMD(POWER_CTRL), &req,
+ sizeof(req), false);
+}
+
static int mt7996_load_firmware(struct mt7996_dev *dev)
{
int ret;
@@ -2248,7 +2394,7 @@ static int mt7996_load_firmware(struct mt7996_dev *dev)
/* make sure fw is download state */
if (mt7996_firmware_state(dev, false)) {
/* restart firmware once */
- __mt76_mcu_restart(&dev->mt76);
+ mt7996_mcu_restart(&dev->mt76);
ret = mt7996_firmware_state(dev, false);
if (ret) {
dev_err(dev->mt76.dev,
@@ -2377,33 +2523,12 @@ mt7996_mcu_init_rx_airtime(struct mt7996_dev *dev)
MCU_WM_UNI_CMD(VOW), true);
}
-static int
-mt7996_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 __rsv1[4];
-
- __le16 tag;
- __le16 len;
- u8 power_mode;
- u8 __rsv2[3];
- } __packed req = {
- .tag = cpu_to_le16(UNI_POWER_OFF),
- .len = cpu_to_le16(sizeof(req) - 4),
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_WM_UNI_CMD(POWER_CREL), &req,
- sizeof(req), false);
-}
-
int mt7996_mcu_init(struct mt7996_dev *dev)
{
static const struct mt76_mcu_ops mt7996_mcu_ops = {
.headroom = sizeof(struct mt76_connac2_mcu_txd), /* reuse */
.mcu_skb_send_msg = mt7996_mcu_send_message,
.mcu_parse_response = mt7996_mcu_parse_response,
- .mcu_restart = mt7996_mcu_restart,
};
int ret;
@@ -2451,16 +2576,17 @@ int mt7996_mcu_init(struct mt7996_dev *dev)
void mt7996_mcu_exit(struct mt7996_dev *dev)
{
- __mt76_mcu_restart(&dev->mt76);
+ mt7996_mcu_restart(&dev->mt76);
if (mt7996_firmware_state(dev, false)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
- return;
+ goto out;
}
mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
if (dev->hif2)
mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
MT_TOP_LPCR_HOST_FW_OWN);
+out:
skb_queue_purge(&dev->mt76.mcu.res_q);
}
@@ -2921,8 +3047,9 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
bool valid;
int ret;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL), &req,
- sizeof(req), true, &skb);
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL),
+ &req, sizeof(req), true, &skb);
if (ret)
return ret;
@@ -2970,6 +3097,52 @@ int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num)
return 0;
}
+int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap)
+{
+#define NIC_CAP 3
+#define UNI_EVENT_CHIP_CONFIG_EFUSE_VERSION 0x21
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ } __packed req = {
+ .tag = cpu_to_le16(NIC_CAP),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ };
+ struct sk_buff *skb;
+ u8 *buf;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_WM_UNI_CMD_QUERY(CHIP_CONFIG), &req,
+ sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ /* fixed field */
+ skb_pull(skb, 4);
+
+ buf = skb->data;
+ while (buf - skb->data < skb->len) {
+ struct tlv *tlv = (struct tlv *)buf;
+
+ switch (le16_to_cpu(tlv->tag)) {
+ case UNI_EVENT_CHIP_CONFIG_EFUSE_VERSION:
+ *cap = le32_to_cpu(*(__le32 *)(buf + sizeof(*tlv)));
+ break;
+ default:
+ break;
+ };
+
+ buf += le16_to_cpu(tlv->len);
+ }
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
{
struct {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 6084b2337598..dd0c5ac52703 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -347,6 +347,21 @@ struct sta_rec_ba_uni {
u8 __rsv[3];
} __packed;
+struct sta_rec_eht {
+ __le16 tag;
+ __le16 len;
+ u8 tid_bitmap;
+ u8 _rsv;
+ __le16 mac_cap;
+ __le64 phy_cap;
+ __le64 phy_cap_ext;
+ u8 mcs_map_bw20[4];
+ u8 mcs_map_bw80[3];
+ u8 mcs_map_bw160[3];
+ u8 mcs_map_bw320[3];
+ u8 _rsv2[3];
+} __packed;
+
struct sec_key_uni {
__le16 wlan_idx;
u8 mgmt_prot;
@@ -554,6 +569,7 @@ enum {
sizeof(struct sta_rec_sec) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
+ sizeof(struct sta_rec_eht) + \
sizeof(struct sta_rec_hdrt) + \
sizeof(struct sta_rec_hdr_trans) + \
sizeof(struct tlv))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 521769eb6b0e..902370a2a639 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -21,6 +21,7 @@ static const struct __base mt7996_reg_base[] = {
[WF_ETBF_BASE] = { { 0x820ea000, 0x820fa000, 0x830ea000 } },
[WF_LPON_BASE] = { { 0x820eb000, 0x820fb000, 0x830eb000 } },
[WF_MIB_BASE] = { { 0x820ed000, 0x820fd000, 0x830ed000 } },
+ [WF_RATE_BASE] = { { 0x820ee000, 0x820fe000, 0x830ee000 } },
};
static const struct __map mt7996_reg_map[] = {
@@ -149,7 +150,7 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
if (dev_is_pci(dev->mt76.dev) &&
((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
- (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
+ addr >= MT_CBTOP2_PHY_START))
return mt7996_reg_map_l1(dev, addr);
/* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
@@ -317,7 +318,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7996_txp),
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE |
MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
@@ -325,7 +326,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7996_TOKEN_SIZE,
.tx_prepare_skb = mt7996_tx_prepare_skb,
- .tx_complete_skb = mt7996_tx_complete_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
.rx_skb = mt7996_queue_rx_skb,
.rx_check = mt7996_rx_check,
.rx_poll_complete = mt7996_rx_poll_complete,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 725344791b4c..018dfd2b36b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -11,12 +11,11 @@
#include "../mt76_connac.h"
#include "regs.h"
-#define MT7996_MAX_INTERFACES 19
+#define MT7996_MAX_INTERFACES 19 /* per-band */
#define MT7996_MAX_WMM_SETS 4
-#define MT7996_WTBL_SIZE 544
-#define MT7996_WTBL_RESERVED (MT7996_WTBL_SIZE - 1)
+#define MT7996_WTBL_RESERVED (mt7996_wtbl_size(dev) - 1)
#define MT7996_WTBL_STA (MT7996_WTBL_RESERVED - \
- MT7996_MAX_INTERFACES)
+ mt7996_max_interface_num(dev))
#define MT7996_WATCHDOG_TIME (HZ / 10)
#define MT7996_RESET_TIMEOUT (30 * HZ)
@@ -124,6 +123,8 @@ struct mt7996_vif_cap {
bool he_su_ebfer:1;
bool he_su_ebfee:1;
bool he_mu_ebfer:1;
+ bool eht_su_ebfer:1;
+ bool eht_su_ebfee:1;
};
struct mt7996_vif {
@@ -264,6 +265,7 @@ struct mt7996_dev {
bool dbdc_support:1;
bool tbtc_support:1;
bool flash_mode:1;
+ bool has_eht:1;
bool ibf;
u8 fw_debug_wm;
@@ -281,6 +283,8 @@ struct mt7996_dev {
u32 reg_l1_backup;
u32 reg_l2_backup;
+
+ u8 wtbl_size_group;
};
enum {
@@ -419,6 +423,7 @@ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
int mt7996_mcu_set_eeprom(struct mt7996_dev *dev);
int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset);
int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num);
+int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap);
int mt7996_mcu_set_ser(struct mt7996_dev *dev, u8 action, u8 set, u8 band);
int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action);
int mt7996_mcu_set_fcc5_lpn(struct mt7996_dev *dev, int val);
@@ -443,6 +448,16 @@ int mt7996_mcu_fw_dbg_ctrl(struct mt7996_dev *dev, u32 module, u8 level);
void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb);
void mt7996_mcu_exit(struct mt7996_dev *dev);
+static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev)
+{
+ return MT7996_MAX_INTERFACES * (1 + dev->dbdc_support + dev->tbtc_support);
+}
+
+static inline u16 mt7996_wtbl_size(struct mt7996_dev *dev)
+{
+ return (dev->wtbl_size_group << 8) + 64;
+}
+
void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
u32 clear, u32 set);
@@ -493,7 +508,6 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7996_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7996_tx_token_put(struct mt7996_dev *dev);
void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb, u32 *info);
@@ -502,7 +516,7 @@ void mt7996_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
void mt7996_stats_work(struct work_struct *work);
int mt76_dfs_start_rdd(struct mt7996_dev *dev, bool force);
int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy);
-void mt7996_set_stream_he_caps(struct mt7996_phy *phy);
+void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy);
void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy);
void mt7996_update_channel(struct mt76_phy *mphy);
int mt7996_init_debugfs(struct mt7996_phy *phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
index 794f61b93a46..7a28cae34e34 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -33,6 +33,7 @@ enum base_rev {
WF_ETBF_BASE,
WF_LPON_BASE,
WF_MIB_BASE,
+ WF_RATE_BASE,
__MT_REG_BASE_MAX,
};
@@ -235,13 +236,6 @@ enum base_rev {
FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
-/* AGG: band 0(0x820e2000), band 1(0x820f2000), band 2(0x830e2000) */
-#define MT_WF_AGG_BASE(_band) __BASE(WF_AGG_BASE, (_band))
-#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
-
-#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x054)
-#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
-
/* ARB: band 0(0x820e3000), band 1(0x820f3000), band 2(0x830e3000) */
#define MT_WF_ARB_BASE(_band) __BASE(WF_ARB_BASE, (_band))
#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
@@ -300,6 +294,13 @@ enum base_rev {
#define MT_WF_RMAC_RSVD0(_band) MT_WF_RMAC(_band, 0x03e0)
#define MT_WF_RMAC_RSVD0_EIFS_CLR BIT(21)
+/* RATE: band 0(0x820ee000), band 1(0x820fe000), band 2(0x830ee000) */
+#define MT_WF_RATE_BASE(_band) __BASE(WF_RATE_BASE, (_band))
+#define MT_WF_RATE(_band, ofs) (MT_WF_RATE_BASE(_band) + (ofs))
+
+#define MT_RATE_HRCR0(_band) MT_WF_RATE(_band, 0x050)
+#define MT_RATE_HRCR0_CFEND_RATE GENMASK(14, 0)
+
/* WFDMA0 */
#define MT_WFDMA0_BASE 0xd4000
#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
@@ -463,7 +464,6 @@ enum base_rev {
#define MT_CBTOP1_PHY_START 0x70000000
#define MT_CBTOP1_PHY_END 0x77ffffff
#define MT_CBTOP2_PHY_START 0xf0000000
-#define MT_CBTOP2_PHY_END 0xffffffff
#define MT_INFRA_MCU_START 0x7c000000
#define MT_INFRA_MCU_END 0x7c3fffff
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 228bc7d45011..419723118ded 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -562,6 +562,10 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
q->entry[q->head].buf_sz = len;
q->entry[q->head].skb = skb;
+
+ /* ensure the entry fully updated before bus access */
+ smp_wmb();
+
q->head = (q->head + 1) % q->ndesc;
q->queued++;
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index bfc4de50a4d2..ddd8c0cc744d 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -254,6 +254,10 @@ static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
__skb_put_zero(e->skb, 4);
+ err = __skb_grow(e->skb, roundup(e->skb->len,
+ sdio->func->cur_blksize));
+ if (err)
+ return err;
err = __mt76s_xmit_queue(dev, e->skb->data,
e->skb->len);
if (err)
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 3e281715fcd4..b88959ef38aa 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -319,29 +319,27 @@ mt76u_set_endpoints(struct usb_interface *intf,
static int
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
- int nsgs, gfp_t gfp)
+ int nsgs)
{
int i;
for (i = 0; i < nsgs; i++) {
- struct page *page;
void *data;
int offset;
- data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+ data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
if (!data)
break;
- page = virt_to_head_page(data);
- offset = data - page_address(page);
- sg_set_page(&urb->sg[i], page, q->buf_size, offset);
+ sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
+ offset);
}
if (i < nsgs) {
int j;
for (j = nsgs; j < urb->num_sgs; j++)
- skb_free_frag(sg_virt(&urb->sg[j]));
+ mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
urb->num_sgs = i;
}
@@ -354,15 +352,16 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
static int
mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
- struct urb *urb, int nsgs, gfp_t gfp)
+ struct urb *urb, int nsgs)
{
enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ int offset;
if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
- return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
+ return mt76u_fill_rx_sg(dev, q, urb, nsgs);
urb->transfer_buffer_length = q->buf_size;
- urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+ urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
return urb->transfer_buffer ? 0 : -ENOMEM;
}
@@ -400,7 +399,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
if (err)
return err;
- return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
+ return mt76u_refill_rx(dev, q, e->urb, sg_size);
}
static void mt76u_urb_free(struct urb *urb)
@@ -408,10 +407,10 @@ static void mt76u_urb_free(struct urb *urb)
int i;
for (i = 0; i < urb->num_sgs; i++)
- skb_free_frag(sg_virt(&urb->sg[i]));
+ mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
if (urb->transfer_buffer)
- skb_free_frag(urb->transfer_buffer);
+ mt76_put_page_pool_buf(urb->transfer_buffer, false);
usb_free_urb(urb);
}
@@ -547,6 +546,8 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
len -= data_len;
nsgs++;
}
+
+ skb_mark_for_recycle(skb);
dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
return nsgs;
@@ -612,7 +613,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
count = mt76u_process_rx_entry(dev, urb, q->buf_size);
if (count > 0) {
- err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
+ err = mt76u_refill_rx(dev, q, urb, count);
if (err < 0)
break;
}
@@ -663,6 +664,10 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
struct mt76_queue *q = &dev->q_rx[qid];
int i, err;
+ err = mt76_create_page_pool(dev, q);
+ if (err)
+ return err;
+
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
@@ -691,7 +696,6 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
static void
mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct page *page;
int i;
for (i = 0; i < q->ndesc; i++) {
@@ -701,13 +705,7 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
mt76u_urb_free(q->entry[i].urb);
q->entry[i].urb = NULL;
}
-
- if (!q->rx_page.va)
- return;
-
- page = virt_to_page(q->rx_page.va);
- __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
- memset(&q->rx_page, 0, sizeof(q->rx_page));
+ page_pool_destroy(q->page_pool);
}
static void mt76u_free_rx(struct mt76_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index 581964425468..fc76c66ff1a5 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -24,23 +24,23 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
}
EXPORT_SYMBOL_GPL(__mt76_poll);
-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
- int timeout)
+bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout, int tick)
{
u32 cur;
- timeout /= 10;
+ timeout /= tick;
do {
cur = __mt76_rr(dev, offset) & mask;
if (cur == val)
return true;
- usleep_range(10000, 20000);
+ usleep_range(1000 * tick, 2000 * tick);
} while (timeout-- > 0);
return false;
}
-EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+EXPORT_SYMBOL_GPL(____mt76_poll_msec);
int mt76_wcid_alloc(u32 *mask, int size)
{
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 457147394edc..773a1cc2f852 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -123,7 +123,8 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
if (data_len < min_seg_len ||
WARN_ON_ONCE(!dma_len) ||
WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
- WARN_ON_ONCE(dma_len & 0x3))
+ WARN_ON_ONCE(dma_len & 0x3) ||
+ WARN_ON_ONCE(dma_len < min_seg_len))
return 0;
return MT_DMA_HDRS + dma_len;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 9b319a455b96..e9f59de31b0b 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -730,6 +730,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb->dev != ndev) {
netdev_err(ndev, "Packet not destined to this device\n");
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -980,7 +981,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
ndev->name);
if (!wl->hif_workqueue) {
ret = -ENOMEM;
- goto error;
+ goto unregister_netdev;
}
ndev->needs_free_netdev = true;
@@ -995,6 +996,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
return vif;
+unregister_netdev:
+ if (rtnl_locked)
+ cfg80211_unregister_netdevice(ndev);
+ else
+ unregister_netdev(ndev);
error:
free_netdev(ndev);
return ERR_PTR(ret);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 4fafe370101a..31bc58e96ac0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -478,7 +478,7 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
continue;
mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->netdev, &chandef, 0);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef, 0, 0);
mutex_unlock(&vif->wdev.mtx);
}
@@ -662,6 +662,7 @@ qtnf_event_handle_update_owe(struct qtnf_vif *vif,
memcpy(ie, owe_ev->ies, ie_len);
owe_info.ie_len = ie_len;
owe_info.ie = ie;
+ owe_info.assoc_link_id = -1;
pr_info("%s: external OWE processing: peer=%pM\n",
vif->netdev->name, owe_ev->peer);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 12b700c7b9c3..1226a883cd67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -8924,8 +8924,6 @@ static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev)
if (i < 2 && (bbptemp & 0x800000))
result = (bbptemp & 0xffffff) - 0x1000000;
- else if (i == 4)
- result = bbptemp;
else
result = bbptemp;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
index 631d078278be..2eed20b0988c 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
@@ -5,12 +5,13 @@
config RTL8XXXU
tristate "Realtek 802.11n USB wireless chips support"
depends on MAC80211 && USB
+ depends on LEDS_CLASS
help
This is an alternative driver for various Realtek RTL8XXX
parts written to utilize the Linux mac80211 stack.
The driver is known to work with a number of RTL8723AU,
RL8188CU, RTL8188RU, RTL8191CU, RTL8192CU, RTL8723BU, RTL8192EU,
- and RTL8188FU devices.
+ RTL8188FU, and RTL8188EU devices.
This driver is under development and has a limited feature
set. In particular it does not yet support 40MHz channels
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
index c4ad5325f5e7..0cb58fb30228 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Makefile
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -2,4 +2,5 @@
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o
rtl8xxxu-y := rtl8xxxu_core.o rtl8xxxu_8192e.o rtl8xxxu_8723b.o \
- rtl8xxxu_8723a.o rtl8xxxu_8192c.o rtl8xxxu_8188f.o
+ rtl8xxxu_8723a.o rtl8xxxu_8192c.o rtl8xxxu_8188f.o \
+ rtl8xxxu_8188e.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index d26df4095da0..c8cee4a24755 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -36,6 +36,7 @@
#define TX_TOTAL_PAGE_NUM 0xf8
#define TX_TOTAL_PAGE_NUM_8188F 0xf7
+#define TX_TOTAL_PAGE_NUM_8188E 0xa9
#define TX_TOTAL_PAGE_NUM_8192E 0xf3
#define TX_TOTAL_PAGE_NUM_8723B 0xf7
/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
@@ -49,6 +50,11 @@
#define TX_PAGE_NUM_LO_PQ_8188F 0x02
#define TX_PAGE_NUM_NORM_PQ_8188F 0x02
+#define TX_PAGE_NUM_PUBQ_8188E 0x47
+#define TX_PAGE_NUM_HI_PQ_8188E 0x29
+#define TX_PAGE_NUM_LO_PQ_8188E 0x1c
+#define TX_PAGE_NUM_NORM_PQ_8188E 0x1c
+
#define TX_PAGE_NUM_PUBQ_8192E 0xe7
#define TX_PAGE_NUM_HI_PQ_8192E 0x08
#define TX_PAGE_NUM_LO_PQ_8192E 0x0c
@@ -153,7 +159,8 @@ struct rtl8xxxu_rxdesc16 {
u32 htc:1;
u32 eosp:1;
u32 bssidfit:2;
- u32 reserved1:16;
+ u32 rpt_sel:2; /* 8188e */
+ u32 reserved1:14;
u32 unicastwake:1;
u32 magicwake:1;
@@ -211,7 +218,8 @@ struct rtl8xxxu_rxdesc16 {
u32 magicwake:1;
u32 unicastwake:1;
- u32 reserved1:16;
+ u32 reserved1:14;
+ u32 rpt_sel:2; /* 8188e */
u32 bssidfit:2;
u32 eosp:1;
u32 htc:1;
@@ -502,6 +510,8 @@ struct rtl8xxxu_txdesc40 {
#define TXDESC_AMPDU_DENSITY_SHIFT 20
#define TXDESC40_BT_INT BIT(23)
#define TXDESC40_GID_SHIFT 24
+#define TXDESC_ANTENNA_SELECT_A BIT(24)
+#define TXDESC_ANTENNA_SELECT_B BIT(25)
/* Word 3 */
#define TXDESC40_USE_DRIVER_RATE BIT(8)
@@ -521,6 +531,7 @@ struct rtl8xxxu_txdesc40 {
#define TXDESC32_CTS_SELF_ENABLE BIT(11)
#define TXDESC32_RTS_CTS_ENABLE BIT(12)
#define TXDESC32_HW_RTS_ENABLE BIT(13)
+#define TXDESC32_PT_STAGE_MASK GENMASK(17, 15)
#define TXDESC_PRIME_CH_OFF_LOWER BIT(20)
#define TXDESC_PRIME_CH_OFF_UPPER BIT(21)
#define TXDESC32_SHORT_PREAMBLE BIT(24)
@@ -546,6 +557,10 @@ struct rtl8xxxu_txdesc40 {
/* Word 6 */
#define TXDESC_MAX_AGG_SHIFT 11
+#define TXDESC_USB_TX_AGG_SHIT 24
+
+/* Word 7 */
+#define TXDESC_ANTENNA_SELECT_C BIT(29)
/* Word 8 */
#define TXDESC40_HW_SEQ_ENABLE BIT(15)
@@ -562,6 +577,9 @@ struct phy_rx_agc_info {
#endif
};
+#define CCK_AGC_RPT_LNA_IDX_MASK GENMASK(7, 5)
+#define CCK_AGC_RPT_VGA_IDX_MASK GENMASK(4, 0)
+
struct rtl8723au_phy_stats {
struct phy_rx_agc_info path_agc[RTL8723A_MAX_RF_PATHS];
u8 ch_corr[RTL8723A_MAX_RF_PATHS];
@@ -909,6 +927,42 @@ struct rtl8188fu_efuse {
u8 res11[0xc3];
};
+struct rtl8188eu_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0e];
+ struct rtl8192eu_efuse_tx_power tx_power_index_A; /* 0x10 */
+ u8 res1[0x7e]; /* 0x3a */
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 res2[5];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 res3[3];
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 res4[6];
+ u8 vid; /* 0xd0 */
+ u8 res5[1];
+ u8 pid; /* 0xd2 */
+ u8 res6[1];
+ u8 usb_optional_function;
+ u8 res7[2];
+ u8 mac_addr[ETH_ALEN]; /* 0xd7 */
+ u8 res8[2];
+ u8 vendor_name[7];
+ u8 res9[2];
+ u8 device_name[0x0b]; /* 0xe8 */
+ u8 res10[2];
+ u8 serial[0x0b]; /* 0xf5 */
+ u8 res11[0x30];
+ u8 unknown[0x0d]; /* 0x130 */
+ u8 res12[0xc3];
+} __packed;
+
struct rtl8xxxu_reg8val {
u16 reg;
u8 val;
@@ -1114,6 +1168,26 @@ struct h2c_cmd {
u8 cmd;
u8 data;
} __packed bt_grant;
+ struct {
+ u8 cmd;
+ u8 macid;
+ u8 unknown0;
+ u8 rssi;
+ /*
+ * [0] - is_rx
+ * [1] - stbc_en
+ * [2] - noisy_decision
+ * [6] - bf_en
+ */
+ u8 data;
+ /*
+ * [0:6] - ra_th_offset
+ * [7] - ra_offset_direction
+ */
+ u8 ra_th_offset;
+ u8 unknown1;
+ u8 unknown2;
+ } __packed rssi_report;
};
};
@@ -1323,6 +1397,39 @@ struct rtl8xxxu_ra_report {
u8 desc_rate;
};
+struct rtl8xxxu_ra_info {
+ u8 rate_id;
+ u32 rate_mask;
+ u32 ra_use_rate;
+ u8 rate_sgi;
+ u8 rssi_sta_ra; /* Percentage */
+ u8 pre_rssi_sta_ra;
+ u8 sgi_enable;
+ u8 decision_rate;
+ u8 pre_rate;
+ u8 highest_rate;
+ u8 lowest_rate;
+ u32 nsc_up;
+ u32 nsc_down;
+ u32 total;
+ u16 retry[5];
+ u16 drop;
+ u16 rpt_time;
+ u16 pre_min_rpt_time;
+ u8 dynamic_tx_rpt_timing_counter;
+ u8 ra_waiting_counter;
+ u8 ra_pending_counter;
+ u8 ra_drop_after_down;
+ u8 pt_try_state; /* 0 trying state, 1 for decision state */
+ u8 pt_stage; /* 0~6 */
+ u8 pt_stop_count; /* Stop PT counter */
+ u8 pt_pre_rate; /* if rate change do PT */
+ u8 pt_pre_rssi; /* if RSSI change 5% do PT */
+ u8 pt_mode_ss; /* decide which rate should do PT */
+ u8 ra_stage; /* StageRA, decide how many times RA will be done between PT */
+ u8 pt_smooth_factor;
+};
+
#define CFO_TH_XTAL_HIGH 20 /* kHz */
#define CFO_TH_XTAL_LOW 10 /* kHz */
#define CFO_TH_ATC 80 /* kHz */
@@ -1336,6 +1443,8 @@ struct rtl8xxxu_cfo_tracking {
u32 packet_count_pre;
};
+#define RTL8XXXU_HW_LED_CONTROL 2
+
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
struct usb_device *udev;
@@ -1432,6 +1541,7 @@ struct rtl8xxxu_priv {
struct rtl8192cu_efuse efuse8192;
struct rtl8192eu_efuse efuse8192eu;
struct rtl8188fu_efuse efuse8188fu;
+ struct rtl8188eu_efuse efuse8188eu;
} efuse_wifi;
u32 adda_backup[RTL8XXXU_ADDA_REGS];
u32 mac_backup[RTL8XXXU_MAC_REGS];
@@ -1455,6 +1565,11 @@ struct rtl8xxxu_priv {
struct rtl8xxxu_btcoex bt_coex;
struct rtl8xxxu_ra_report ra_report;
struct rtl8xxxu_cfo_tracking cfo_tracking;
+ struct rtl8xxxu_ra_info ra_info;
+
+ bool led_registered;
+ char led_name[32];
+ struct led_classdev led_cdev;
};
struct rtl8xxxu_rx_urb {
@@ -1496,6 +1611,7 @@ struct rtl8xxxu_fileops {
u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
+ void (*report_rssi) (struct rtl8xxxu_priv *priv, u8 macid, u8 rssi);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
@@ -1503,6 +1619,8 @@ struct rtl8xxxu_fileops {
u32 rts_rate);
void (*set_crystal_cap) (struct rtl8xxxu_priv *priv, u8 crystal_cap);
s8 (*cck_rssi) (struct rtl8xxxu_priv *priv, u8 cck_agc_rpt);
+ int (*led_classdev_brightness_set) (struct led_classdev *led_cdev,
+ enum led_brightness brightness);
int writeN_block_size;
int rx_agg_buf_size;
char tx_desc_size;
@@ -1523,6 +1641,7 @@ struct rtl8xxxu_fileops {
u8 page_num_hi;
u8 page_num_lo;
u8 page_num_norm;
+ u8 last_llt_entry;
};
extern int rtl8xxxu_debug;
@@ -1560,7 +1679,7 @@ int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
enum rtl8xxxu_rfpath path);
int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
const struct rtl8xxxu_reg32val *array);
-int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name);
+int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, const char *fw_name);
void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv);
void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv);
void rtl8xxxu_identify_vendor_1bit(struct rtl8xxxu_priv *priv, u32 vendor);
@@ -1582,6 +1701,8 @@ void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv,
int channel, bool ht40);
+void rtl8188f_set_tx_power(struct rtl8xxxu_priv *priv,
+ int channel, bool ht40);
void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
@@ -1594,6 +1715,8 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
+void rtl8xxxu_gen1_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi);
+void rtl8xxxu_gen2_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi);
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv);
@@ -1602,6 +1725,8 @@ void rtl8xxxu_init_burst(struct rtl8xxxu_priv *priv);
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_gen2_channel_to_group(int channel);
+bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2);
bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
int result[][8], int c1, int c2);
void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
@@ -1614,13 +1739,24 @@ void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
bool short_preamble, bool ampdu_enable,
u32 rts_rate);
+void rtl8xxxu_fill_txdesc_v3(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *tx_info,
+ struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
+ bool short_preamble, bool ampdu_enable,
+ u32 rts_rate);
void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5);
void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv);
void rtl8723a_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap);
+void rtl8188f_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap);
s8 rtl8723a_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt);
+void rtl8xxxu_update_ra_report(struct rtl8xxxu_ra_report *rarpt,
+ u8 rate, u8 sgi, u8 bw);
+void rtl8188e_ra_info_init_all(struct rtl8xxxu_ra_info *ra);
+void rtl8188e_handle_ra_tx_report2(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
extern struct rtl8xxxu_fileops rtl8188fu_fops;
+extern struct rtl8xxxu_fileops rtl8188eu_fops;
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;
extern struct rtl8xxxu_fileops rtl8723au_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
new file mode 100644
index 000000000000..a99ddb41cd24
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
@@ -0,0 +1,1899 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RTL8XXXU mac80211 USB driver - 8188e specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@gmail.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+static const struct rtl8xxxu_reg8val rtl8188e_mac_init_table[] = {
+ {0x026, 0x41}, {0x027, 0x35}, {0x040, 0x00}, {0x421, 0x0f},
+ {0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x01},
+ {0x432, 0x02}, {0x433, 0x04}, {0x434, 0x05}, {0x435, 0x06},
+ {0x436, 0x07}, {0x437, 0x08}, {0x438, 0x00}, {0x439, 0x00},
+ {0x43a, 0x01}, {0x43b, 0x02}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+ {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+ {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x480, 0x08},
+ {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, {0x4cd, 0xff},
+ {0x4ce, 0x01}, {0x4d3, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x516, 0x0a}, {0x525, 0x4f}, {0x550, 0x10}, {0x551, 0x10},
+ {0x559, 0x02}, {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e},
+ {0x609, 0x2a}, {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff},
+ {0x623, 0xff}, {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff},
+ {0x627, 0xff}, {0x63c, 0x08}, {0x63d, 0x08}, {0x63e, 0x0c},
+ {0x63f, 0x0c}, {0x640, 0x40}, {0x652, 0x20}, {0x66e, 0x05},
+ {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87},
+ {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65}, {0x70b, 0x87},
+ {0xffff, 0xff},
+};
+
+static const struct rtl8xxxu_reg32val rtl8188eu_phy_init_table[] = {
+ {0x800, 0x80040000}, {0x804, 0x00000003},
+ {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+ {0x810, 0x10001331}, {0x814, 0x020c3d10},
+ {0x818, 0x02200385}, {0x81c, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00390204},
+ {0x828, 0x00000000}, {0x82c, 0x00000000},
+ {0x830, 0x00000000}, {0x834, 0x00000000},
+ {0x838, 0x00000000}, {0x83c, 0x00000000},
+ {0x840, 0x00010000}, {0x844, 0x00000000},
+ {0x848, 0x00000000}, {0x84c, 0x00000000},
+ {0x850, 0x00000000}, {0x854, 0x00000000},
+ {0x858, 0x569a11a9}, {0x85c, 0x01000014},
+ {0x860, 0x66f60110}, {0x864, 0x061f0649},
+ {0x868, 0x00000000}, {0x86c, 0x27272700},
+ {0x870, 0x07000760}, {0x874, 0x25004000},
+ {0x878, 0x00000808}, {0x87c, 0x00000000},
+ {0x880, 0xb0000c1c}, {0x884, 0x00000001},
+ {0x888, 0x00000000}, {0x88c, 0xccc000c0},
+ {0x890, 0x00000800}, {0x894, 0xfffffffe},
+ {0x898, 0x40302010}, {0x89c, 0x00706050},
+ {0x900, 0x00000000}, {0x904, 0x00000023},
+ {0x908, 0x00000000}, {0x90c, 0x81121111},
+ {0x910, 0x00000002}, {0x914, 0x00000201},
+ {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c},
+ {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f},
+ {0xa10, 0x9500bb7e}, {0xa14, 0x1114d028},
+ {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+ {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+ {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+ {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+ {0xa78, 0x00000900}, {0xa7c, 0x225b0606},
+ {0xa80, 0x218075b1}, {0xb2c, 0x80000000},
+ {0xc00, 0x48071d40}, {0xc04, 0x03a05611},
+ {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+ {0xc10, 0x08800000}, {0xc14, 0x40000100},
+ {0xc18, 0x08800000}, {0xc1c, 0x40000100},
+ {0xc20, 0x00000000}, {0xc24, 0x00000000},
+ {0xc28, 0x00000000}, {0xc2c, 0x00000000},
+ {0xc30, 0x69e9ac47}, {0xc34, 0x469652af},
+ {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+ {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+ {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+ {0xc50, 0x69553420}, {0xc54, 0x43bc0094},
+ {0xc58, 0x00013169}, {0xc5c, 0x00250492},
+ {0xc60, 0x00000000}, {0xc64, 0x7112848b},
+ {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+ {0xc70, 0x2c7f000d}, {0xc74, 0x020610db},
+ {0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
+ {0xc80, 0x390000e4}, {0xc84, 0x21f60000},
+ {0xc88, 0x40000100}, {0xc8c, 0x20200000},
+ {0xc90, 0x00091521}, {0xc94, 0x00000000},
+ {0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+ {0xca0, 0x00000000}, {0xca4, 0x000300a0},
+ {0xca8, 0x00000000}, {0xcac, 0x00000000},
+ {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+ {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+ {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+ {0xcc8, 0x00000000}, {0xccc, 0x00000000},
+ {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+ {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+ {0xce0, 0x00222222}, {0xce4, 0x00000000},
+ {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+ {0xd00, 0x00000740}, {0xd04, 0x00020401},
+ {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+ {0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+ {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975},
+ {0xd30, 0x00000000}, {0xd34, 0x80608000},
+ {0xd38, 0x00000000}, {0xd3c, 0x00127353},
+ {0xd40, 0x00000000}, {0xd44, 0x00000000},
+ {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+ {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+ {0xd58, 0x00000282}, {0xd5c, 0x30032064},
+ {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+ {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+ {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+ {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d},
+ {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d},
+ {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d},
+ {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d},
+ {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+ {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+ {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+ {0xe44, 0x01004800}, {0xe48, 0xfb000000},
+ {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+ {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+ {0xe5c, 0x28160d05}, {0xe60, 0x00000048},
+ {0xe68, 0x001b25a4}, {0xe6c, 0x00c00014},
+ {0xe70, 0x00c00014}, {0xe74, 0x01000014},
+ {0xe78, 0x01000014}, {0xe7c, 0x01000014},
+ {0xe80, 0x01000014}, {0xe84, 0x00c00014},
+ {0xe88, 0x01000014}, {0xe8c, 0x00c00014},
+ {0xed0, 0x00c00014}, {0xed4, 0x00c00014},
+ {0xed8, 0x00c00014}, {0xedc, 0x00000014},
+ {0xee0, 0x00000014}, {0xee8, 0x21555448},
+ {0xeec, 0x01c00014}, {0xf14, 0x00000003},
+ {0xf4c, 0x00000000}, {0xf00, 0x00000300},
+ {0xffff, 0xffffffff},
+};
+
+static const struct rtl8xxxu_reg32val rtl8188e_agc_table[] = {
+ {0xc78, 0xfb000001}, {0xc78, 0xfb010001},
+ {0xc78, 0xfb020001}, {0xc78, 0xfb030001},
+ {0xc78, 0xfb040001}, {0xc78, 0xfb050001},
+ {0xc78, 0xfa060001}, {0xc78, 0xf9070001},
+ {0xc78, 0xf8080001}, {0xc78, 0xf7090001},
+ {0xc78, 0xf60a0001}, {0xc78, 0xf50b0001},
+ {0xc78, 0xf40c0001}, {0xc78, 0xf30d0001},
+ {0xc78, 0xf20e0001}, {0xc78, 0xf10f0001},
+ {0xc78, 0xf0100001}, {0xc78, 0xef110001},
+ {0xc78, 0xee120001}, {0xc78, 0xed130001},
+ {0xc78, 0xec140001}, {0xc78, 0xeb150001},
+ {0xc78, 0xea160001}, {0xc78, 0xe9170001},
+ {0xc78, 0xe8180001}, {0xc78, 0xe7190001},
+ {0xc78, 0xe61a0001}, {0xc78, 0xe51b0001},
+ {0xc78, 0xe41c0001}, {0xc78, 0xe31d0001},
+ {0xc78, 0xe21e0001}, {0xc78, 0xe11f0001},
+ {0xc78, 0x8a200001}, {0xc78, 0x89210001},
+ {0xc78, 0x88220001}, {0xc78, 0x87230001},
+ {0xc78, 0x86240001}, {0xc78, 0x85250001},
+ {0xc78, 0x84260001}, {0xc78, 0x83270001},
+ {0xc78, 0x82280001}, {0xc78, 0x6b290001},
+ {0xc78, 0x6a2a0001}, {0xc78, 0x692b0001},
+ {0xc78, 0x682c0001}, {0xc78, 0x672d0001},
+ {0xc78, 0x662e0001}, {0xc78, 0x652f0001},
+ {0xc78, 0x64300001}, {0xc78, 0x63310001},
+ {0xc78, 0x62320001}, {0xc78, 0x61330001},
+ {0xc78, 0x46340001}, {0xc78, 0x45350001},
+ {0xc78, 0x44360001}, {0xc78, 0x43370001},
+ {0xc78, 0x42380001}, {0xc78, 0x41390001},
+ {0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+ {0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+ {0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+ {0xc78, 0xfb400001}, {0xc78, 0xfb410001},
+ {0xc78, 0xfb420001}, {0xc78, 0xfb430001},
+ {0xc78, 0xfb440001}, {0xc78, 0xfb450001},
+ {0xc78, 0xfb460001}, {0xc78, 0xfb470001},
+ {0xc78, 0xfb480001}, {0xc78, 0xfa490001},
+ {0xc78, 0xf94a0001}, {0xc78, 0xf84b0001},
+ {0xc78, 0xf74c0001}, {0xc78, 0xf64d0001},
+ {0xc78, 0xf54e0001}, {0xc78, 0xf44f0001},
+ {0xc78, 0xf3500001}, {0xc78, 0xf2510001},
+ {0xc78, 0xf1520001}, {0xc78, 0xf0530001},
+ {0xc78, 0xef540001}, {0xc78, 0xee550001},
+ {0xc78, 0xed560001}, {0xc78, 0xec570001},
+ {0xc78, 0xeb580001}, {0xc78, 0xea590001},
+ {0xc78, 0xe95a0001}, {0xc78, 0xe85b0001},
+ {0xc78, 0xe75c0001}, {0xc78, 0xe65d0001},
+ {0xc78, 0xe55e0001}, {0xc78, 0xe45f0001},
+ {0xc78, 0xe3600001}, {0xc78, 0xe2610001},
+ {0xc78, 0xc3620001}, {0xc78, 0xc2630001},
+ {0xc78, 0xc1640001}, {0xc78, 0x8b650001},
+ {0xc78, 0x8a660001}, {0xc78, 0x89670001},
+ {0xc78, 0x88680001}, {0xc78, 0x87690001},
+ {0xc78, 0x866a0001}, {0xc78, 0x856b0001},
+ {0xc78, 0x846c0001}, {0xc78, 0x676d0001},
+ {0xc78, 0x666e0001}, {0xc78, 0x656f0001},
+ {0xc78, 0x64700001}, {0xc78, 0x63710001},
+ {0xc78, 0x62720001}, {0xc78, 0x61730001},
+ {0xc78, 0x60740001}, {0xc78, 0x46750001},
+ {0xc78, 0x45760001}, {0xc78, 0x44770001},
+ {0xc78, 0x43780001}, {0xc78, 0x42790001},
+ {0xc78, 0x417a0001}, {0xc78, 0x407b0001},
+ {0xc78, 0x407c0001}, {0xc78, 0x407d0001},
+ {0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+ {0xc50, 0x69553422}, {0xc50, 0x69553420},
+ {0xffff, 0xffffffff}
+};
+
+static const struct rtl8xxxu_rfregval rtl8188eu_radioa_init_table[] = {
+ {0x00, 0x00030000}, {0x08, 0x00084000},
+ {0x18, 0x00000407}, {0x19, 0x00000012},
+ {0x1e, 0x00080009}, {0x1f, 0x00000880},
+ {0x2f, 0x0001a060}, {0x3f, 0x00000000},
+ {0x42, 0x000060c0}, {0x57, 0x000d0000},
+ {0x58, 0x000be180}, {0x67, 0x00001552},
+ {0x83, 0x00000000}, {0xb0, 0x000ff8fc},
+ {0xb1, 0x00054400}, {0xb2, 0x000ccc19},
+ {0xb4, 0x00043003}, {0xb6, 0x0004953e},
+ {0xb7, 0x0001c718}, {0xb8, 0x000060ff},
+ {0xb9, 0x00080001}, {0xba, 0x00040000},
+ {0xbb, 0x00000400}, {0xbf, 0x000c0000},
+ {0xc2, 0x00002400}, {0xc3, 0x00000009},
+ {0xc4, 0x00040c91}, {0xc5, 0x00099999},
+ {0xc6, 0x000000a3}, {0xc7, 0x00088820},
+ {0xc8, 0x00076c06}, {0xc9, 0x00000000},
+ {0xca, 0x00080000}, {0xdf, 0x00000180},
+ {0xef, 0x000001a0}, {0x51, 0x0006b27d},
+ {0x52, 0x0007e49d}, /* Set to 0x0007e4dd for SDIO */
+ {0x53, 0x00000073}, {0x56, 0x00051ff3},
+ {0x35, 0x00000086}, {0x35, 0x00000186},
+ {0x35, 0x00000286}, {0x36, 0x00001c25},
+ {0x36, 0x00009c25}, {0x36, 0x00011c25},
+ {0x36, 0x00019c25}, {0xb6, 0x00048538},
+ {0x18, 0x00000c07}, {0x5a, 0x0004bd00},
+ {0x19, 0x000739d0}, {0x34, 0x0000adf3},
+ {0x34, 0x00009df0}, {0x34, 0x00008ded},
+ {0x34, 0x00007dea}, {0x34, 0x00006de7},
+ {0x34, 0x000054ee}, {0x34, 0x000044eb},
+ {0x34, 0x000034e8}, {0x34, 0x0000246b},
+ {0x34, 0x00001468}, {0x34, 0x0000006d},
+ {0x00, 0x00030159}, {0x84, 0x00068200},
+ {0x86, 0x000000ce}, {0x87, 0x00048a00},
+ {0x8e, 0x00065540}, {0x8f, 0x00088000},
+ {0xef, 0x000020a0}, {0x3b, 0x000f02b0},
+ {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+ {0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+ {0x3b, 0x000a0080}, {0x3b, 0x00090080},
+ {0x3b, 0x0008f780}, {0x3b, 0x000722b0},
+ {0x3b, 0x0006f7b0}, {0x3b, 0x00054fb0},
+ {0x3b, 0x0004f060}, {0x3b, 0x00030090},
+ {0x3b, 0x00020080}, {0x3b, 0x00010080},
+ {0x3b, 0x0000f780}, {0xef, 0x000000a0},
+ {0x00, 0x00010159}, {0x18, 0x0000f407},
+ {0xFE, 0x00000000}, {0xFE, 0x00000000},
+ {0x1F, 0x00080003}, {0xFE, 0x00000000},
+ {0xFE, 0x00000000}, {0x1E, 0x00000001},
+ {0x1F, 0x00080000}, {0x00, 0x00033e60},
+ {0xff, 0xffffffff}
+};
+
+#define PERENTRY 23
+#define RETRYSIZE 5
+#define RATESIZE 28
+#define TX_RPT2_ITEM_SIZE 8
+
+static const u8 retry_penalty[PERENTRY][RETRYSIZE + 1] = {
+ {5, 4, 3, 2, 0, 3}, /* 92 , idx=0 */
+ {6, 5, 4, 3, 0, 4}, /* 86 , idx=1 */
+ {6, 5, 4, 2, 0, 4}, /* 81 , idx=2 */
+ {8, 7, 6, 4, 0, 6}, /* 75 , idx=3 */
+ {10, 9, 8, 6, 0, 8}, /* 71 , idx=4 */
+ {10, 9, 8, 4, 0, 8}, /* 66 , idx=5 */
+ {10, 9, 8, 2, 0, 8}, /* 62 , idx=6 */
+ {10, 9, 8, 0, 0, 8}, /* 59 , idx=7 */
+ {18, 17, 16, 8, 0, 16}, /* 53 , idx=8 */
+ {26, 25, 24, 16, 0, 24}, /* 50 , idx=9 */
+ {34, 33, 32, 24, 0, 32}, /* 47 , idx=0x0a */
+ {34, 31, 28, 20, 0, 32}, /* 43 , idx=0x0b */
+ {34, 31, 27, 18, 0, 32}, /* 40 , idx=0x0c */
+ {34, 31, 26, 16, 0, 32}, /* 37 , idx=0x0d */
+ {34, 30, 22, 16, 0, 32}, /* 32 , idx=0x0e */
+ {34, 30, 24, 16, 0, 32}, /* 26 , idx=0x0f */
+ {49, 46, 40, 16, 0, 48}, /* 20 , idx=0x10 */
+ {49, 45, 32, 0, 0, 48}, /* 17 , idx=0x11 */
+ {49, 45, 22, 18, 0, 48}, /* 15 , idx=0x12 */
+ {49, 40, 24, 16, 0, 48}, /* 12 , idx=0x13 */
+ {49, 32, 18, 12, 0, 48}, /* 9 , idx=0x14 */
+ {49, 22, 18, 14, 0, 48}, /* 6 , idx=0x15 */
+ {49, 16, 16, 0, 0, 48} /* 3, idx=0x16 */
+};
+
+static const u8 pt_penalty[RETRYSIZE + 1] = {34, 31, 30, 24, 0, 32};
+
+static const u8 retry_penalty_idx_normal[2][RATESIZE] = {
+ { /* RSSI>TH */
+ 4, 4, 4, 5,
+ 4, 4, 5, 7, 7, 7, 8, 0x0a,
+ 4, 4, 4, 4, 6, 0x0a, 0x0b, 0x0d,
+ 5, 5, 7, 7, 8, 0x0b, 0x0d, 0x0f
+ },
+ { /* RSSI<TH */
+ 0x0a, 0x0a, 0x0b, 0x0c,
+ 0x0a, 0x0a, 0x0b, 0x0c, 0x0d, 0x10, 0x13, 0x13,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x11, 0x13, 0x13,
+ 9, 9, 9, 9, 0x0c, 0x0e, 0x11, 0x13
+ }
+};
+
+static const u8 retry_penalty_idx_cut_i[2][RATESIZE] = {
+ { /* RSSI>TH */
+ 4, 4, 4, 5,
+ 4, 4, 5, 7, 7, 7, 8, 0x0a,
+ 4, 4, 4, 4, 6, 0x0a, 0x0b, 0x0d,
+ 5, 5, 7, 7, 8, 0x0b, 0x0d, 0x0f
+ },
+ { /* RSSI<TH */
+ 0x0a, 0x0a, 0x0b, 0x0c,
+ 0x0a, 0x0a, 0x0b, 0x0c, 0x0d, 0x10, 0x13, 0x13,
+ 0x06, 0x07, 0x08, 0x0d, 0x0e, 0x11, 0x11, 0x11,
+ 9, 9, 9, 9, 0x0c, 0x0e, 0x11, 0x13
+ }
+};
+
+static const u8 retry_penalty_up_idx_normal[RATESIZE] = {
+ 0x0c, 0x0d, 0x0d, 0x0f,
+ 0x0d, 0x0e, 0x0f, 0x0f, 0x10, 0x12, 0x13, 0x14,
+ 0x0f, 0x10, 0x10, 0x12, 0x12, 0x13, 0x14, 0x15,
+ 0x11, 0x11, 0x12, 0x13, 0x13, 0x13, 0x14, 0x15
+};
+
+static const u8 retry_penalty_up_idx_cut_i[RATESIZE] = {
+ 0x0c, 0x0d, 0x0d, 0x0f,
+ 0x0d, 0x0e, 0x0f, 0x0f, 0x10, 0x12, 0x13, 0x14,
+ 0x0b, 0x0b, 0x11, 0x11, 0x12, 0x12, 0x12, 0x12,
+ 0x11, 0x11, 0x12, 0x13, 0x13, 0x13, 0x14, 0x15
+};
+
+static const u8 rssi_threshold[RATESIZE] = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0x24, 0x26, 0x2a,
+ 0x18, 0x1a, 0x1d, 0x1f, 0x21, 0x27, 0x29, 0x2a,
+ 0, 0, 0, 0x1f, 0x23, 0x28, 0x2a, 0x2c
+};
+
+static const u16 n_threshold_high[RATESIZE] = {
+ 4, 4, 8, 16,
+ 24, 36, 48, 72, 96, 144, 192, 216,
+ 60, 80, 100, 160, 240, 400, 600, 800,
+ 300, 320, 480, 720, 1000, 1200, 1600, 2000
+};
+
+static const u16 n_threshold_low[RATESIZE] = {
+ 2, 2, 4, 8,
+ 12, 18, 24, 36, 48, 72, 96, 108,
+ 30, 40, 50, 80, 120, 200, 300, 400,
+ 150, 160, 240, 360, 500, 600, 800, 1000
+};
+
+static const u8 dropping_necessary[RATESIZE] = {
+ 1, 1, 1, 1,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 5, 6, 7, 8, 9, 10, 11, 12
+};
+
+static const u8 pending_for_rate_up_fail[5] = {2, 10, 24, 40, 60};
+
+static const u16 dynamic_tx_rpt_timing[6] = {
+ 0x186a, 0x30d4, 0x493e, 0x61a8, 0x7a12, 0x927c /* 200ms-1200ms */
+};
+
+enum rtl8188e_tx_rpt_timing {
+ DEFAULT_TIMING = 0,
+ INCREASE_TIMING,
+ DECREASE_TIMING
+};
+
+static int rtl8188eu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 sys_cfg, vendor;
+ int ret = 0;
+
+ strscpy(priv->chip_name, "8188EU", sizeof(priv->chip_name));
+ priv->rtl_chip = RTL8188E;
+ priv->rf_paths = 1;
+ priv->rx_paths = 1;
+ priv->tx_paths = 1;
+ priv->has_wifi = 1;
+
+ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK);
+ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * TODO: At a glance, I cut requires a different firmware,
+ * different initialisation tables, and no software rate
+ * control. The vendor driver is not configured to handle
+ * I cut chips by default. Are there any in the wild?
+ */
+ if (priv->chip_cut == 8) {
+ dev_info(dev, "RTL8188EU cut I is not supported. Please complain about it at linux-wireless@vger.kernel.org.\n");
+ return -EOPNOTSUPP;
+ }
+
+ vendor = sys_cfg & SYS_CFG_VENDOR_ID;
+ rtl8xxxu_identify_vendor_1bit(priv, vendor);
+
+ ret = rtl8xxxu_config_endpoints_no_sie(priv);
+
+ return ret;
+}
+
+static void rtl8188eu_config_channel(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ u32 val32, rsr;
+ u8 opmode;
+ int sec_ch_above, channel;
+ int i;
+
+ opmode = rtl8xxxu_read8(priv, REG_BW_OPMODE);
+ rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+ channel = hw->conf.chandef.chan->hw_value;
+
+ switch (hw->conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ opmode |= BW_OPMODE_20MHZ;
+ rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ if (hw->conf.chandef.center_freq1 >
+ hw->conf.chandef.chan->center_freq) {
+ sec_ch_above = 1;
+ channel += 2;
+ } else {
+ sec_ch_above = 0;
+ channel -= 2;
+ }
+
+ opmode &= ~BW_OPMODE_20MHZ;
+ rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+ rsr &= ~RSR_RSC_BANDWIDTH_40M;
+ if (sec_ch_above)
+ rsr |= RSR_RSC_LOWER_SUB_CHANNEL;
+ else
+ rsr |= RSR_RSC_UPPER_SUB_CHANNEL;
+ rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, rsr);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 |= FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+ val32 |= FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+ /*
+ * Set Control channel to upper or lower. These settings
+ * are required only for 40MHz
+ */
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM);
+ val32 &= ~CCK0_SIDEBAND;
+ if (!sec_ch_above)
+ val32 |= CCK0_SIDEBAND;
+ rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+ val32 &= ~OFDM_LSTF_PRIME_CH_MASK; /* 0xc00 */
+ if (sec_ch_above)
+ val32 |= OFDM_LSTF_PRIME_CH_LOW;
+ else
+ val32 |= OFDM_LSTF_PRIME_CH_HIGH;
+ rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+ val32 &= ~(FPGA0_PS_LOWER_CHANNEL | FPGA0_PS_UPPER_CHANNEL);
+ if (sec_ch_above)
+ val32 |= FPGA0_PS_UPPER_CHANNEL;
+ else
+ val32 |= FPGA0_PS_LOWER_CHANNEL;
+ rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = RF_A; i < priv->rf_paths; i++) {
+ val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+ u32p_replace_bits(&val32, channel, MODE_AG_CHANNEL_MASK);
+ rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+ }
+
+ for (i = RF_A; i < priv->rf_paths; i++) {
+ val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+ val32 &= ~MODE_AG_BW_MASK;
+ if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+ val32 |= MODE_AG_BW_40MHZ_8723B;
+ else
+ val32 |= MODE_AG_BW_20MHZ_8723B;
+ rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+ }
+}
+
+static void rtl8188eu_init_aggregation(struct rtl8xxxu_priv *priv)
+{
+ u8 agg_ctrl, usb_spec;
+
+ usb_spec = rtl8xxxu_read8(priv, REG_USB_SPECIAL_OPTION);
+ usb_spec &= ~USB_SPEC_USB_AGG_ENABLE;
+ rtl8xxxu_write8(priv, REG_USB_SPECIAL_OPTION, usb_spec);
+
+ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL);
+ agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN;
+ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
+}
+
+static int rtl8188eu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8188eu_efuse *efuse = &priv->efuse_wifi.efuse8188eu;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+ sizeof(efuse->tx_power_index_A.cck_base));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->tx_power_index_A.ht40_base,
+ sizeof(efuse->tx_power_index_A.ht40_base));
+
+ priv->default_crystal_cap = efuse->xtal_k & 0x3f;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
+ dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
+
+ return 0;
+}
+
+static void rtl8188eu_reset_8051(struct rtl8xxxu_priv *priv)
+{
+ u16 sys_func;
+
+ sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ sys_func &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+
+ sys_func |= SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+}
+
+static int rtl8188eu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ const char *fw_name;
+ int ret;
+
+ fw_name = "rtlwifi/rtl8188eufw.bin";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+ return ret;
+}
+
+static void rtl8188eu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /*
+ * Per vendor driver, run power sequence before init of RF
+ */
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ val8 = SYS_FUNC_USBA | SYS_FUNC_USBD |
+ SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ rtl8xxxu_init_phy_regs(priv, rtl8188eu_phy_init_table);
+ rtl8xxxu_init_phy_regs(priv, rtl8188e_agc_table);
+}
+
+static int rtl8188eu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ return rtl8xxxu_init_phy_rf(priv, rtl8188eu_radioa_init_table, RF_A);
+}
+
+static int rtl8188eu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_e94, reg_e9c;
+ int result = 0;
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x30008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x8214032a);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+
+ return result;
+}
+
+static int rtl8188eu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32;
+ int result = 0;
+
+ /* Leave IQK mode */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Enable path A PA in TX IQK mode */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf117b);
+
+ /* Enter IQK mode */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0x808000, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* TX IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x81004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x30008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160804);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+ else
+ goto out;
+
+ val32 = 0x80007c00 |
+ (reg_e94 & 0x03ff0000) | ((reg_e9c >> 16) & 0x03ff);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /* Modify RX IQK mode table */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+ /* Enter IQK mode */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0x808000, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* IQK setting */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x30008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x10008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c05);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c05);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+ if (!(reg_eac & BIT(27)) &&
+ ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_eac & 0x03ff0000) != 0x00360000))
+ result |= 0x02;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+ __func__);
+
+out:
+ return result;
+}
+
+static void rtl8188eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ int result[][8], int t)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 i, val32;
+ int path_a_ok;
+ int retry = 2;
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+ REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+ REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+ REG_TX_OFDM_BBON, REG_TX_TO_RX,
+ REG_TX_TO_TX, REG_RX_CCK,
+ REG_RX_OFDM, REG_RX_WAIT_RIFS,
+ REG_RX_TO_RX, REG_STANDBY,
+ REG_SLEEP, REG_PMPD_ANAEN
+ };
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ REG_TXPAUSE, REG_BEACON_CTRL,
+ REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+ };
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+ REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+ REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+ REG_FPGA0_XB_RF_INT_OE, REG_CCK0_AFE_SETTING
+ };
+
+ /*
+ * Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ if (t == 0) {
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+ rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+ rtl8xxxu_save_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+ }
+
+ rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+ if (t == 0) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
+ priv->pi_enabled = u32_get_bits(val32, FPGA0_HSSI_PARM1_PI);
+ }
+
+ if (!priv->pi_enabled) {
+ /* Switch BB to PI mode to do IQ Calibration. */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100);
+ }
+
+ /* MAC settings */
+ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+ u32p_replace_bits(&val32, 0xf, 0x0f000000);
+ rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+ rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+ if (!priv->no_pape) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+ val32 |= (FPGA0_RF_PAPE |
+ (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+ }
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+ val32 &= ~BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+ val32 &= ~BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+ /* Page B init */
+ rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x0f600000);
+
+ /* IQ calibration setting */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0x808000, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x81004800);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8188eu_iqk_path_a(priv);
+ if (path_a_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8188eu_rx_iqk_path_a(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_A_2);
+ result[t][2] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_A_2);
+ result[t][3] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
+
+ /* Back to BB mode, load original value */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ if (t == 0)
+ return;
+
+ if (!priv->pi_enabled) {
+ /* Switch back BB to SI mode after finishing IQ Calibration */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000000);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000000);
+ }
+
+ /* Reload ADDA power saving parameters */
+ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+
+ /* Reload MAC parameters */
+ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Reload BB parameters */
+ rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+
+ /* Restore RX initial gain */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00032ed3);
+
+ /* Load 0xe30 IQC default value */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+}
+
+static void rtl8188eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int result[4][8]; /* last is final result */
+ int i, candidate;
+ bool path_a_ok;
+ u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+ u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ bool simu;
+
+ memset(result, 0, sizeof(result));
+ result[3][0] = 0x100;
+ result[3][2] = 0x100;
+ result[3][4] = 0x100;
+ result[3][6] = 0x100;
+
+ candidate = -1;
+
+ path_a_ok = false;
+
+ for (i = 0; i < 3; i++) {
+ rtl8188eu_phy_iqcalibrate(priv, result, i);
+
+ if (i == 1) {
+ simu = rtl8xxxu_simularity_compare(priv,
+ result, 0, 1);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ simu = rtl8xxxu_simularity_compare(priv,
+ result, 0, 2);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+
+ simu = rtl8xxxu_simularity_compare(priv,
+ result, 1, 2);
+ if (simu)
+ candidate = 1;
+ else
+ candidate = 3;
+ }
+ }
+
+ if (candidate >= 0) {
+ reg_e94 = result[candidate][0];
+ priv->rege94 = reg_e94;
+ reg_e9c = result[candidate][1];
+ priv->rege9c = reg_e9c;
+ reg_ea4 = result[candidate][2];
+ reg_eac = result[candidate][3];
+ reg_eb4 = result[candidate][4];
+ priv->regeb4 = reg_eb4;
+ reg_ebc = result[candidate][5];
+ priv->regebc = reg_ebc;
+ reg_ec4 = result[candidate][6];
+ reg_ecc = result[candidate][7];
+ dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+ dev_dbg(dev,
+ "%s: e94=%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n",
+ __func__, reg_e94, reg_e9c, reg_ea4, reg_eac,
+ reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+ path_a_ok = true;
+ } else {
+ reg_e94 = 0x100;
+ reg_eb4 = 0x100;
+ priv->rege94 = 0x100;
+ priv->regeb4 = 0x100;
+ reg_e9c = 0x0;
+ reg_ebc = 0x0;
+ priv->rege9c = 0x0;
+ priv->regebc = 0x0;
+ }
+
+ if (reg_e94 && candidate >= 0)
+ rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+ candidate, (reg_ea4 == 0));
+
+ rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
+static void rtl8188e_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE);
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+}
+
+static int rtl8188e_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ u16 val16;
+ int count, ret = 0;
+
+ /* wait till 0x04[17] = 1 power ready*/
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* reset baseband */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~(SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN);
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ /*0x24[23] = 2b'01 schmit trigger */
+ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+ val32 |= BIT(23);
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+
+ /* 0x04[15] = 0 disable HWPDN (control by DRV)*/
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 &= ~APS_FSMCO_HW_POWERDOWN;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ /*0x04[12:11] = 2b'00 disable WL suspend*/
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE);
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ /* set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* LDO normal mode*/
+ val8 = rtl8xxxu_read8(priv, REG_LPLDO_CTRL);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, REG_LPLDO_CTRL, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8188eu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* Turn off RF */
+ val8 = rtl8xxxu_read8(priv, REG_RF_CTRL);
+ val8 &= ~RF_ENABLE;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ /* LDO Sleep mode */
+ val8 = rtl8xxxu_read8(priv, REG_LPLDO_CTRL);
+ val8 |= BIT(4);
+ rtl8xxxu_write8(priv, REG_LPLDO_CTRL, val8);
+
+ return 0;
+}
+
+static int rtl8188eu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+ u16 val16;
+ u8 val8;
+
+ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+ val32 |= BIT(23);
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 &= ~APS_FSMCO_PCIE;
+ val16 |= APS_FSMCO_HW_SUSPEND;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 3, 0x00);
+
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG + 1);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, REG_GPIO_MUXCFG + 1, val8);
+
+ /* Set USB suspend enable local register 0xfe10[4]=1 */
+ val8 = rtl8xxxu_read8(priv, 0xfe10);
+ val8 |= BIT(4);
+ rtl8xxxu_write8(priv, 0xfe10, val8);
+
+ return 0;
+}
+
+static int rtl8188eu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int retry, retval;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x7f);
+
+ retry = 100;
+ retval = -EBUSY;
+ /* Poll 32 bit wide REG_SCH_TX_CMD for 0 to ensure no TX is pending. */
+ do {
+ val32 = rtl8xxxu_read32(priv, REG_SCH_TX_CMD);
+ if (!val32) {
+ retval = 0;
+ break;
+ }
+ } while (retry--);
+
+ if (!retry) {
+ dev_warn(dev, "Failed to flush TX queue\n");
+ retval = -EBUSY;
+ goto out;
+ }
+
+ /* Disable CCK and OFDM, clock gated */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ udelay(2);
+
+ /* Reset MAC TRX */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= 0xff;
+ val16 &= ~(CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE | CR_SECURITY_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+ val8 |= DUAL_TSF_TX_OK;
+ rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+out:
+ return retval;
+}
+
+static int rtl8188eu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+ int ret;
+
+ rtl8188e_disabled_to_emu(priv);
+
+ ret = rtl8188e_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ * We do not set CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE here
+ * due to a hardware bug in the 88E, requiring those to be
+ * set after REG_TRXFF_BNDY is set. If not the RXFF bundary
+ * will get set to a larger buffer size than the real buffer
+ * size.
+ */
+ val16 = (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+exit:
+ return ret;
+}
+
+static void rtl8188eu_power_off(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ rtl8xxxu_flush_fifo(priv);
+
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+ rtl8188eu_active_to_lps(priv);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ /* 32K_CTRL looks to be very 8188e specific */
+ val8 = rtl8xxxu_read8(priv, REG_32K_CTRL);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_32K_CTRL, val8);
+
+ rtl8188eu_active_to_emu(priv);
+ rtl8188eu_emu_to_disabled(priv);
+
+ /* Reset MCU IO Wrapper */
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 &= ~BIT(3);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 |= BIT(3);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ /* Vendor driver refers to GPIO_IN */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_PIN_CTRL);
+ /* Vendor driver refers to GPIO_OUT */
+ rtl8xxxu_write8(priv, REG_GPIO_PIN_CTRL + 1, val8);
+ rtl8xxxu_write8(priv, REG_GPIO_PIN_CTRL + 2, 0xff);
+
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL, val8 << 4);
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL + 1);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL + 1, val8 | 0x0f);
+
+ /*
+ * Set LNA, TRSW, EX_PA Pin to output mode
+ * Referred to as REG_BB_PAD_CTRL in 8188eu vendor driver
+ */
+ rtl8xxxu_write32(priv, REG_PAD_CTRL1, 0x00080808);
+
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x00);
+
+ rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, 0x00000000);
+}
+
+static void rtl8188e_enable_rf(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ rtl8xxxu_write8(priv, REG_RF_CTRL, RF_ENABLE | RF_RSTB | RF_SDMRSTB);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK);
+ val32 |= OFDM_RF_PATH_RX_A | OFDM_RF_PATH_TX_A;
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static void rtl8188e_disable_rf(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ val32 &= ~OFDM_RF_PATH_TX_MASK;
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+ /* Power down RF module */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0);
+
+ rtl8188eu_active_to_emu(priv);
+}
+
+static void rtl8188e_usb_quirks(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+
+ /*
+ * Technically this is not a USB quirk, but a chip quirk.
+ * This has to be done after REG_TRXFF_BNDY is set, see
+ * rtl8188eu_power_on() for details.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ rtl8xxxu_gen2_usb_quirks(priv);
+
+ /* Pre-TX enable WEP/TKIP security */
+ rtl8xxxu_write8(priv, REG_EARLY_MODE_CONTROL_8188E + 3, 0x01);
+}
+
+static s8 rtl8188e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
+{
+ /* only use lna 0/1/2/3/7 */
+ static const s8 lna_gain_table_0[8] = {17, -1, -13, -29, -32, -35, -38, -41};
+ /* only use lna 3/7 */
+ static const s8 lna_gain_table_1[8] = {29, 20, 12, 3, -6, -15, -24, -33};
+
+ s8 rx_pwr_all = 0x00;
+ u8 vga_idx, lna_idx;
+ s8 lna_gain = 0;
+
+ lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK);
+ vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK);
+
+ if (priv->chip_cut >= 8) /* cut I */ /* SMIC */
+ lna_gain = lna_gain_table_0[lna_idx];
+ else /* TSMC */
+ lna_gain = lna_gain_table_1[lna_idx];
+
+ rx_pwr_all = lna_gain - (2 * vga_idx);
+
+ return rx_pwr_all;
+}
+
+static int rtl8188eu_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG2);
+
+ if (brightness == LED_OFF) {
+ ledcfg &= ~LEDCFG2_HW_LED_CONTROL;
+ ledcfg |= LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE;
+ } else if (brightness == LED_ON) {
+ ledcfg &= ~(LEDCFG2_HW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE);
+ ledcfg |= LEDCFG2_SW_LED_CONTROL;
+ } else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
+ ledcfg &= ~LEDCFG2_SW_LED_DISABLE;
+ ledcfg |= LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE;
+ }
+
+ rtl8xxxu_write8(priv, REG_LEDCFG2, ledcfg);
+
+ return 0;
+}
+
+static void rtl8188e_set_tx_rpt_timing(struct rtl8xxxu_ra_info *ra, u8 timing)
+{
+ u8 idx;
+
+ for (idx = 0; idx < 5; idx++)
+ if (dynamic_tx_rpt_timing[idx] == ra->rpt_time)
+ break;
+
+ if (timing == DEFAULT_TIMING) {
+ idx = 0; /* 200ms */
+ } else if (timing == INCREASE_TIMING) {
+ if (idx < 5)
+ idx++;
+ } else if (timing == DECREASE_TIMING) {
+ if (idx > 0)
+ idx--;
+ }
+
+ ra->rpt_time = dynamic_tx_rpt_timing[idx];
+}
+
+static void rtl8188e_rate_down(struct rtl8xxxu_ra_info *ra)
+{
+ u8 rate_id = ra->pre_rate;
+ u8 lowest_rate = ra->lowest_rate;
+ u8 highest_rate = ra->highest_rate;
+ s8 i;
+
+ if (rate_id > highest_rate) {
+ rate_id = highest_rate;
+ } else if (ra->rate_sgi) {
+ ra->rate_sgi = 0;
+ } else if (rate_id > lowest_rate) {
+ if (rate_id > 0) {
+ for (i = rate_id - 1; i >= lowest_rate; i--) {
+ if (ra->ra_use_rate & BIT(i)) {
+ rate_id = i;
+ goto rate_down_finish;
+ }
+ }
+ }
+ } else if (rate_id <= lowest_rate) {
+ rate_id = lowest_rate;
+ }
+
+rate_down_finish:
+ if (ra->ra_waiting_counter == 1) {
+ ra->ra_waiting_counter++;
+ ra->ra_pending_counter++;
+ } else if (ra->ra_waiting_counter > 1) {
+ ra->ra_waiting_counter = 0;
+ ra->ra_pending_counter = 0;
+ }
+
+ if (ra->ra_pending_counter >= 4)
+ ra->ra_pending_counter = 4;
+
+ ra->ra_drop_after_down = 1;
+
+ ra->decision_rate = rate_id;
+
+ rtl8188e_set_tx_rpt_timing(ra, DECREASE_TIMING);
+}
+
+static void rtl8188e_rate_up(struct rtl8xxxu_ra_info *ra)
+{
+ u8 rate_id = ra->pre_rate;
+ u8 highest_rate = ra->highest_rate;
+ u8 i;
+
+ if (ra->ra_waiting_counter == 1) {
+ ra->ra_waiting_counter = 0;
+ ra->ra_pending_counter = 0;
+ } else if (ra->ra_waiting_counter > 1) {
+ ra->pre_rssi_sta_ra = ra->rssi_sta_ra;
+ goto rate_up_finish;
+ }
+
+ rtl8188e_set_tx_rpt_timing(ra, DEFAULT_TIMING);
+
+ if (rate_id < highest_rate) {
+ for (i = rate_id + 1; i <= highest_rate; i++) {
+ if (ra->ra_use_rate & BIT(i)) {
+ rate_id = i;
+ goto rate_up_finish;
+ }
+ }
+ } else if (rate_id == highest_rate) {
+ if (ra->sgi_enable && !ra->rate_sgi)
+ ra->rate_sgi = 1;
+ else if (!ra->sgi_enable)
+ ra->rate_sgi = 0;
+ } else { /* rate_id > ra->highest_rate */
+ rate_id = highest_rate;
+ }
+
+rate_up_finish:
+ if (ra->ra_waiting_counter == (4 + pending_for_rate_up_fail[ra->ra_pending_counter]))
+ ra->ra_waiting_counter = 0;
+ else
+ ra->ra_waiting_counter++;
+
+ ra->decision_rate = rate_id;
+}
+
+static void rtl8188e_reset_ra_counter(struct rtl8xxxu_ra_info *ra)
+{
+ u8 rate_id = ra->decision_rate;
+
+ ra->nsc_up = (n_threshold_high[rate_id] + n_threshold_low[rate_id]) >> 1;
+ ra->nsc_down = (n_threshold_high[rate_id] + n_threshold_low[rate_id]) >> 1;
+}
+
+static void rtl8188e_rate_decision(struct rtl8xxxu_ra_info *ra)
+{
+ struct rtl8xxxu_priv *priv = container_of(ra, struct rtl8xxxu_priv, ra_info);
+ const u8 *retry_penalty_idx_0;
+ const u8 *retry_penalty_idx_1;
+ const u8 *retry_penalty_up_idx;
+ u8 rate_id, penalty_id1, penalty_id2;
+ int i;
+
+ if (ra->total == 0)
+ return;
+
+ if (ra->ra_drop_after_down) {
+ ra->ra_drop_after_down--;
+
+ rtl8188e_reset_ra_counter(ra);
+
+ return;
+ }
+
+ if (priv->chip_cut == 8) { /* cut I */
+ retry_penalty_idx_0 = retry_penalty_idx_cut_i[0];
+ retry_penalty_idx_1 = retry_penalty_idx_cut_i[1];
+ retry_penalty_up_idx = retry_penalty_up_idx_cut_i;
+ } else {
+ retry_penalty_idx_0 = retry_penalty_idx_normal[0];
+ retry_penalty_idx_1 = retry_penalty_idx_normal[1];
+ retry_penalty_up_idx = retry_penalty_up_idx_normal;
+ }
+
+ if (ra->rssi_sta_ra < (ra->pre_rssi_sta_ra - 3) ||
+ ra->rssi_sta_ra > (ra->pre_rssi_sta_ra + 3)) {
+ ra->pre_rssi_sta_ra = ra->rssi_sta_ra;
+ ra->ra_waiting_counter = 0;
+ ra->ra_pending_counter = 0;
+ }
+
+ /* Start RA decision */
+ if (ra->pre_rate > ra->highest_rate)
+ rate_id = ra->highest_rate;
+ else
+ rate_id = ra->pre_rate;
+
+ /* rate down */
+ if (ra->rssi_sta_ra > rssi_threshold[rate_id])
+ penalty_id1 = retry_penalty_idx_0[rate_id];
+ else
+ penalty_id1 = retry_penalty_idx_1[rate_id];
+
+ for (i = 0; i < 5; i++)
+ ra->nsc_down += ra->retry[i] * retry_penalty[penalty_id1][i];
+
+ if (ra->nsc_down > (ra->total * retry_penalty[penalty_id1][5]))
+ ra->nsc_down -= ra->total * retry_penalty[penalty_id1][5];
+ else
+ ra->nsc_down = 0;
+
+ /* rate up */
+ penalty_id2 = retry_penalty_up_idx[rate_id];
+
+ for (i = 0; i < 5; i++)
+ ra->nsc_up += ra->retry[i] * retry_penalty[penalty_id2][i];
+
+ if (ra->nsc_up > (ra->total * retry_penalty[penalty_id2][5]))
+ ra->nsc_up -= ra->total * retry_penalty[penalty_id2][5];
+ else
+ ra->nsc_up = 0;
+
+ if (ra->nsc_down < n_threshold_low[rate_id] ||
+ ra->drop > dropping_necessary[rate_id]) {
+ rtl8188e_rate_down(ra);
+
+ rtl8xxxu_update_ra_report(&priv->ra_report, ra->decision_rate,
+ ra->rate_sgi, priv->ra_report.txrate.bw);
+ } else if (ra->nsc_up > n_threshold_high[rate_id]) {
+ rtl8188e_rate_up(ra);
+
+ rtl8xxxu_update_ra_report(&priv->ra_report, ra->decision_rate,
+ ra->rate_sgi, priv->ra_report.txrate.bw);
+ }
+
+ if (ra->decision_rate == ra->pre_rate)
+ ra->dynamic_tx_rpt_timing_counter++;
+ else
+ ra->dynamic_tx_rpt_timing_counter = 0;
+
+ if (ra->dynamic_tx_rpt_timing_counter >= 4) {
+ /* Rate didn't change 4 times, extend RPT timing */
+ rtl8188e_set_tx_rpt_timing(ra, INCREASE_TIMING);
+ ra->dynamic_tx_rpt_timing_counter = 0;
+ }
+
+ ra->pre_rate = ra->decision_rate;
+
+ rtl8188e_reset_ra_counter(ra);
+}
+
+static void rtl8188e_power_training_try_state(struct rtl8xxxu_ra_info *ra)
+{
+ ra->pt_try_state = 0;
+ switch (ra->pt_mode_ss) {
+ case 3:
+ if (ra->decision_rate >= DESC_RATE_MCS13)
+ ra->pt_try_state = 1;
+ break;
+ case 2:
+ if (ra->decision_rate >= DESC_RATE_MCS5)
+ ra->pt_try_state = 1;
+ break;
+ case 1:
+ if (ra->decision_rate >= DESC_RATE_48M)
+ ra->pt_try_state = 1;
+ break;
+ case 0:
+ if (ra->decision_rate >= DESC_RATE_11M)
+ ra->pt_try_state = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (ra->rssi_sta_ra < 48) {
+ ra->pt_stage = 0;
+ } else if (ra->pt_try_state == 1) {
+ if ((ra->pt_stop_count >= 10) ||
+ (ra->pt_pre_rssi > ra->rssi_sta_ra + 5) ||
+ (ra->pt_pre_rssi < ra->rssi_sta_ra - 5) ||
+ (ra->decision_rate != ra->pt_pre_rate)) {
+ if (ra->pt_stage == 0)
+ ra->pt_stage = 1;
+ else if (ra->pt_stage == 1)
+ ra->pt_stage = 3;
+ else
+ ra->pt_stage = 5;
+
+ ra->pt_pre_rssi = ra->rssi_sta_ra;
+ ra->pt_stop_count = 0;
+ } else {
+ ra->ra_stage = 0;
+ ra->pt_stop_count++;
+ }
+ } else {
+ ra->pt_stage = 0;
+ ra->ra_stage = 0;
+ }
+
+ ra->pt_pre_rate = ra->decision_rate;
+
+ /* TODO: implement the "false alarm" statistics for this */
+ /* Disable power training when noisy environment */
+ /* if (p_dm_odm->is_disable_power_training) { */
+ if (1) {
+ ra->pt_stage = 0;
+ ra->ra_stage = 0;
+ ra->pt_stop_count = 0;
+ }
+}
+
+static void rtl8188e_power_training_decision(struct rtl8xxxu_ra_info *ra)
+{
+ u8 temp_stage;
+ u32 numsc;
+ u32 num_total;
+ u8 stage_id;
+ u8 j;
+
+ numsc = 0;
+ num_total = ra->total * pt_penalty[5];
+ for (j = 0; j <= 4; j++) {
+ numsc += ra->retry[j] * pt_penalty[j];
+
+ if (numsc > num_total)
+ break;
+ }
+
+ j >>= 1;
+ temp_stage = (ra->pt_stage + 1) >> 1;
+ if (temp_stage > j)
+ stage_id = temp_stage - j;
+ else
+ stage_id = 0;
+
+ ra->pt_smooth_factor = (ra->pt_smooth_factor >> 1) +
+ (ra->pt_smooth_factor >> 2) +
+ stage_id * 16 + 2;
+ if (ra->pt_smooth_factor > 192)
+ ra->pt_smooth_factor = 192;
+ stage_id = ra->pt_smooth_factor >> 6;
+ temp_stage = stage_id * 2;
+ if (temp_stage != 0)
+ temp_stage--;
+ if (ra->drop > 3)
+ temp_stage = 0;
+ ra->pt_stage = temp_stage;
+}
+
+void rtl8188e_handle_ra_tx_report2(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
+{
+ u32 *_rx_desc = (u32 *)(skb->data - sizeof(struct rtl8xxxu_rxdesc16));
+ struct rtl8xxxu_rxdesc16 *rx_desc = (struct rtl8xxxu_rxdesc16 *)_rx_desc;
+ struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_ra_info *ra = &priv->ra_info;
+ u32 tx_rpt_len = rx_desc->pktlen & 0x3ff;
+ u32 items = tx_rpt_len / TX_RPT2_ITEM_SIZE;
+ u64 macid_valid = ((u64)_rx_desc[5] << 32) | _rx_desc[4];
+ u32 macid;
+ u8 *rpt = skb->data;
+ bool valid;
+ u16 min_rpt_time = 0x927c;
+
+ dev_dbg(dev, "%s: len: %d items: %d\n", __func__, tx_rpt_len, items);
+
+ for (macid = 0; macid < items; macid++) {
+ valid = false;
+
+ if (macid < 64)
+ valid = macid_valid & BIT(macid);
+
+ if (valid) {
+ ra->retry[0] = le16_to_cpu(*(__le16 *)rpt);
+ ra->retry[1] = rpt[2];
+ ra->retry[2] = rpt[3];
+ ra->retry[3] = rpt[4];
+ ra->retry[4] = rpt[5];
+ ra->drop = rpt[6];
+ ra->total = ra->retry[0] + ra->retry[1] + ra->retry[2] +
+ ra->retry[3] + ra->retry[4] + ra->drop;
+
+ if (ra->total > 0) {
+ if (ra->ra_stage < 5)
+ rtl8188e_rate_decision(ra);
+ else if (ra->ra_stage == 5)
+ rtl8188e_power_training_try_state(ra);
+ else /* ra->ra_stage == 6 */
+ rtl8188e_power_training_decision(ra);
+
+ if (ra->ra_stage <= 5)
+ ra->ra_stage++;
+ else
+ ra->ra_stage = 0;
+ }
+ } else if (macid == 0) {
+ dev_warn(dev, "%s: TX report item 0 not valid\n", __func__);
+ }
+
+ dev_dbg(dev, "%s: valid: %d retry: %d %d %d %d %d drop: %d\n",
+ __func__, valid,
+ ra->retry[0], ra->retry[1], ra->retry[2],
+ ra->retry[3], ra->retry[4], ra->drop);
+
+ if (min_rpt_time > ra->rpt_time)
+ min_rpt_time = ra->rpt_time;
+
+ rpt += TX_RPT2_ITEM_SIZE;
+
+ /*
+ * We only use macid 0, so only the first item is relevant.
+ * AP mode will use more of them if it's ever implemented.
+ */
+ break;
+ }
+
+ if (min_rpt_time != ra->pre_min_rpt_time) {
+ rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, min_rpt_time);
+ ra->pre_min_rpt_time = min_rpt_time;
+ }
+}
+
+static void rtl8188e_arfb_refresh(struct rtl8xxxu_ra_info *ra)
+{
+ s8 i;
+
+ ra->ra_use_rate = ra->rate_mask;
+
+ /* Highest rate */
+ if (ra->ra_use_rate) {
+ for (i = RATESIZE; i >= 0; i--) {
+ if (ra->ra_use_rate & BIT(i)) {
+ ra->highest_rate = i;
+ break;
+ }
+ }
+ } else {
+ ra->highest_rate = 0;
+ }
+
+ /* Lowest rate */
+ if (ra->ra_use_rate) {
+ for (i = 0; i < RATESIZE; i++) {
+ if (ra->ra_use_rate & BIT(i)) {
+ ra->lowest_rate = i;
+ break;
+ }
+ }
+ } else {
+ ra->lowest_rate = 0;
+ }
+
+ if (ra->highest_rate > DESC_RATE_MCS7)
+ ra->pt_mode_ss = 3;
+ else if (ra->highest_rate > DESC_RATE_54M)
+ ra->pt_mode_ss = 2;
+ else if (ra->highest_rate > DESC_RATE_11M)
+ ra->pt_mode_ss = 1;
+ else
+ ra->pt_mode_ss = 0;
+}
+
+static void
+rtl8188e_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
+{
+ struct rtl8xxxu_ra_info *ra = &priv->ra_info;
+
+ ra->rate_id = rateid;
+ ra->rate_mask = ramask;
+ ra->sgi_enable = sgi;
+
+ rtl8188e_arfb_refresh(ra);
+}
+
+static void rtl8188e_ra_set_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi)
+{
+ priv->ra_info.rssi_sta_ra = rssi;
+}
+
+void rtl8188e_ra_info_init_all(struct rtl8xxxu_ra_info *ra)
+{
+ ra->decision_rate = DESC_RATE_MCS7;
+ ra->pre_rate = DESC_RATE_MCS7;
+ ra->highest_rate = DESC_RATE_MCS7;
+ ra->lowest_rate = 0;
+ ra->rate_id = 0;
+ ra->rate_mask = 0xfffff;
+ ra->rssi_sta_ra = 0;
+ ra->pre_rssi_sta_ra = 0;
+ ra->sgi_enable = 0;
+ ra->ra_use_rate = 0xfffff;
+ ra->nsc_down = (n_threshold_high[DESC_RATE_MCS7] + n_threshold_low[DESC_RATE_MCS7]) / 2;
+ ra->nsc_up = (n_threshold_high[DESC_RATE_MCS7] + n_threshold_low[DESC_RATE_MCS7]) / 2;
+ ra->rate_sgi = 0;
+ ra->rpt_time = 0x927c;
+ ra->drop = 0;
+ ra->retry[0] = 0;
+ ra->retry[1] = 0;
+ ra->retry[2] = 0;
+ ra->retry[3] = 0;
+ ra->retry[4] = 0;
+ ra->total = 0;
+ ra->ra_waiting_counter = 0;
+ ra->ra_pending_counter = 0;
+ ra->ra_drop_after_down = 0;
+
+ ra->pt_try_state = 0;
+ ra->pt_stage = 5;
+ ra->pt_smooth_factor = 192;
+ ra->pt_stop_count = 0;
+ ra->pt_pre_rate = 0;
+ ra->pt_pre_rssi = 0;
+ ra->pt_mode_ss = 0;
+ ra->ra_stage = 0;
+}
+
+struct rtl8xxxu_fileops rtl8188eu_fops = {
+ .identify_chip = rtl8188eu_identify_chip,
+ .parse_efuse = rtl8188eu_parse_efuse,
+ .load_firmware = rtl8188eu_load_firmware,
+ .power_on = rtl8188eu_power_on,
+ .power_off = rtl8188eu_power_off,
+ .reset_8051 = rtl8188eu_reset_8051,
+ .llt_init = rtl8xxxu_init_llt_table,
+ .init_phy_bb = rtl8188eu_init_phy_bb,
+ .init_phy_rf = rtl8188eu_init_phy_rf,
+ .phy_lc_calibrate = rtl8723a_phy_lc_calibrate,
+ .phy_iq_calibrate = rtl8188eu_phy_iq_calibrate,
+ .config_channel = rtl8188eu_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+ .init_aggregation = rtl8188eu_init_aggregation,
+ .enable_rf = rtl8188e_enable_rf,
+ .disable_rf = rtl8188e_disable_rf,
+ .usb_quirks = rtl8188e_usb_quirks,
+ .set_tx_power = rtl8188f_set_tx_power,
+ .update_rate_mask = rtl8188e_update_rate_mask,
+ .report_connect = rtl8xxxu_gen2_report_connect,
+ .report_rssi = rtl8188e_ra_set_rssi,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v3,
+ .set_crystal_cap = rtl8188f_set_crystal_cap,
+ .cck_rssi = rtl8188e_cck_rssi,
+ .led_classdev_brightness_set = rtl8188eu_led_brightness_set,
+ .writeN_block_size = 128,
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+ .has_tx_report = 1,
+ .gen2_thermal_meter = 1,
+ .adda_1t_init = 0x0b1b25a0,
+ .adda_1t_path_on = 0x0bdb25a0,
+ /*
+ * Use 9K for 8188e normal chip
+ * Max RX buffer = 10K - max(TxReportSize(64*8), WOLPattern(16*24))
+ */
+ .trxff_boundary = 0x25ff,
+ .pbp_rx = PBP_PAGE_SIZE_128,
+ .pbp_tx = PBP_PAGE_SIZE_128,
+ .mactable = rtl8188e_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM_8188E,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ_8188E,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ_8188E,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ_8188E,
+ .last_llt_entry = 175,
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
index 2c4f403ba68f..af6e2c8a5025 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
@@ -370,7 +370,7 @@ static void rtl8188f_channel_to_group(int channel, int *group, int *cck_group)
*cck_group = *group;
}
-static void
+void
rtl8188f_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
{
u32 val32, ofdm, mcs;
@@ -716,7 +716,6 @@ static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv)
static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
{
struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu;
- int i;
if (efuse->rtl_id != cpu_to_le16(0x8129))
return -EINVAL;
@@ -738,22 +737,12 @@ static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
dev_info(&priv->udev->dev, "Product: %.7s\n", efuse->device_name);
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8188fu_efuse));
- for (i = 0; i < sizeof(struct rtl8188fu_efuse); i += 8)
- dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
- }
-
return 0;
}
static int rtl8188fu_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
fw_name = "rtlwifi/rtl8188fufw.bin";
@@ -1122,7 +1111,7 @@ static void rtl8188fu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
if (t == 0) {
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
- priv->pi_enabled = val32 & FPGA0_HSSI_PARM1_PI;
+ priv->pi_enabled = u32_get_bits(val32, FPGA0_HSSI_PARM1_PI);
}
/* save RF path */
@@ -1662,7 +1651,7 @@ static void rtl8188f_usb_quirks(struct rtl8xxxu_priv *priv)
#define XTAL1 GENMASK(22, 17)
#define XTAL0 GENMASK(16, 11)
-static void rtl8188f_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap)
+void rtl8188f_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap)
{
struct rtl8xxxu_cfo_tracking *cfo = &priv->cfo_tracking;
u32 val32;
@@ -1693,8 +1682,8 @@ static s8 rtl8188f_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
s8 rx_pwr_all = 0x00;
u8 vga_idx, lna_idx;
- lna_idx = (cck_agc_rpt & 0xE0) >> 5;
- vga_idx = cck_agc_rpt & 0x1F;
+ lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK);
+ vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK);
switch (lna_idx) {
case 7:
@@ -1743,6 +1732,7 @@ struct rtl8xxxu_fileops rtl8188fu_fops = {
.set_tx_power = rtl8188f_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .report_rssi = rtl8xxxu_gen2_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.set_crystal_cap = rtl8188f_set_crystal_cap,
.cck_rssi = rtl8188f_cck_rssi,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index 3bef9ffc8b02..e61d65c3579b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -386,7 +386,7 @@ out:
static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
if (!priv->vendor_umc)
@@ -404,7 +404,6 @@ static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
{
struct rtl8192cu_efuse *efuse = &priv->efuse_wifi.efuse8192;
- int i;
if (efuse->rtl_id != cpu_to_le16(0x8129))
return -EINVAL;
@@ -457,15 +456,6 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
priv->power_base = &rtl8188r_power_base;
}
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8192cu_efuse));
- for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8)
- dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
- }
return 0;
}
@@ -619,6 +609,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.set_tx_power = rtl8xxxu_gen1_set_tx_power,
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
+ .report_rssi = rtl8xxxu_gen1_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.cck_rssi = rtl8723a_cck_rssi,
.writeN_block_size = 128,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index a7d76693c02d..5cfc00237f42 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -704,21 +704,12 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
rtl8192eu_log_next_device_info(priv, "Product", efuse->device_info, &record_offset);
rtl8192eu_log_next_device_info(priv, "Serial", efuse->device_info, &record_offset);
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8192eu_efuse));
- for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8)
- dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
- }
return 0;
}
static int rtl8192eu_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
fw_name = "rtlwifi/rtl8192eu_nic.bin";
@@ -1744,6 +1735,11 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
val8 &= ~BIT(0);
rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+
+ /*
+ * Fix transmission failure of rtl8192e.
+ */
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
}
static s8 rtl8192e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
@@ -1755,8 +1751,8 @@ static s8 rtl8192e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
u8 vga_idx, lna_idx;
s8 lna_gain = 0;
- lna_idx = (cck_agc_rpt & 0xE0) >> 5;
- vga_idx = cck_agc_rpt & 0x1F;
+ lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK);
+ vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK);
if (priv->cck_agc_report_type == 0)
lna_gain = lna_gain_table_0[lna_idx];
@@ -1768,6 +1764,29 @@ static s8 rtl8192e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
return rx_pwr_all;
}
+static int rtl8192eu_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG1);
+
+ if (brightness == LED_OFF) {
+ ledcfg &= ~LEDCFG1_HW_LED_CONTROL;
+ ledcfg |= LEDCFG1_LED_DISABLE;
+ } else if (brightness == LED_ON) {
+ ledcfg &= ~(LEDCFG1_HW_LED_CONTROL | LEDCFG1_LED_DISABLE);
+ } else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
+ ledcfg &= ~LEDCFG1_LED_DISABLE;
+ ledcfg |= LEDCFG1_HW_LED_CONTROL;
+ }
+
+ rtl8xxxu_write8(priv, REG_LEDCFG1, ledcfg);
+
+ return 0;
+}
+
struct rtl8xxxu_fileops rtl8192eu_fops = {
.identify_chip = rtl8192eu_identify_chip,
.parse_efuse = rtl8192eu_parse_efuse,
@@ -1788,9 +1807,11 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.set_tx_power = rtl8192e_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .report_rssi = rtl8xxxu_gen2_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.set_crystal_cap = rtl8723a_set_crystal_cap,
.cck_rssi = rtl8192e_cck_rssi,
+ .led_classdev_brightness_set = rtl8192eu_led_brightness_set,
.writeN_block_size = 128,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index 707ac48ecc83..5e7b58d395ba 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -231,7 +231,7 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
switch (priv->chip_cut) {
@@ -457,6 +457,30 @@ s8 rtl8723a_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
return rx_pwr_all;
}
+static int rtl8723au_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG2);
+
+ if (brightness == LED_OFF) {
+ ledcfg &= ~LEDCFG2_HW_LED_CONTROL;
+ ledcfg |= LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE;
+ } else if (brightness == LED_ON) {
+ ledcfg &= ~(LEDCFG2_HW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE);
+ ledcfg |= LEDCFG2_SW_LED_CONTROL;
+ } else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
+ ledcfg &= ~LEDCFG2_SW_LED_DISABLE;
+ ledcfg |= LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE;
+ }
+
+ rtl8xxxu_write8(priv, REG_LEDCFG2, ledcfg);
+
+ return 0;
+}
+
struct rtl8xxxu_fileops rtl8723au_fops = {
.identify_chip = rtl8723au_identify_chip,
.parse_efuse = rtl8723au_parse_efuse,
@@ -478,9 +502,11 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.set_tx_power = rtl8xxxu_gen1_set_tx_power,
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
+ .report_rssi = rtl8xxxu_gen1_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.set_crystal_cap = rtl8723a_set_crystal_cap,
.cck_rssi = rtl8723a_cck_rssi,
+ .led_classdev_brightness_set = rtl8723au_led_brightness_set,
.writeN_block_size = 1024,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index a0ec895b61a4..21613d60dc22 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -497,23 +497,12 @@ static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv)
dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name);
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- int i;
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8723bu_efuse));
- for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8)
- dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
- }
-
return 0;
}
static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
if (priv->enable_bluetooth)
@@ -1691,8 +1680,8 @@ static s8 rtl8723b_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
s8 rx_pwr_all = 0x00;
u8 vga_idx, lna_idx;
- lna_idx = (cck_agc_rpt & 0xE0) >> 5;
- vga_idx = cck_agc_rpt & 0x1F;
+ lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK);
+ vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK);
switch (lna_idx) {
case 6:
@@ -1738,6 +1727,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.set_tx_power = rtl8723b_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .report_rssi = rtl8xxxu_gen2_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.set_crystal_cap = rtl8723a_set_crystal_cap,
.cck_rssi = rtl8723b_cck_rssi,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 3ed435401e57..620a5cc2bfdd 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -34,7 +34,7 @@
#define DRIVER_NAME "rtl8xxxu"
-int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
+int rtl8xxxu_debug;
static bool rtl8xxxu_ht40_2g;
static bool rtl8xxxu_dma_aggregation;
static int rtl8xxxu_dma_agg_timeout = -1;
@@ -46,6 +46,7 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8188eufw.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
@@ -1581,10 +1582,11 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
cut = 'A' + priv->chip_cut;
dev_info(dev,
- "RTL%s rev %c (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
- priv->chip_name, cut, priv->chip_vendor, priv->tx_paths,
- priv->rx_paths, priv->ep_tx_count, priv->has_wifi,
- priv->has_bluetooth, priv->has_gps, priv->hi_pa);
+ "RTL%s rev %c (%s) romver %d, %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
+ priv->chip_name, cut, priv->chip_vendor, priv->rom_rev,
+ priv->tx_paths, priv->rx_paths, priv->ep_tx_count,
+ priv->has_wifi, priv->has_bluetooth, priv->has_gps,
+ priv->hi_pa);
dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr);
}
@@ -1813,6 +1815,16 @@ exit:
return ret;
}
+static void rtl8xxxu_dump_efuse(struct rtl8xxxu_priv *priv)
+{
+ dev_info(&priv->udev->dev,
+ "Dumping efuse for RTL%s (0x%02x bytes):\n",
+ priv->chip_name, EFUSE_MAP_LEN);
+
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ priv->efuse_wifi.raw, EFUSE_MAP_LEN, true);
+}
+
void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -1970,7 +1982,7 @@ fw_abort:
return ret;
}
-int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
+int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, const char *fw_name)
{
struct device *dev = &priv->udev->dev;
const struct firmware *fw;
@@ -2000,6 +2012,7 @@ int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
switch (signature & 0xfff0) {
case 0x92e0:
case 0x92c0:
+ case 0x88e0:
case 0x88c0:
case 0x5300:
case 0x2300:
@@ -2071,10 +2084,20 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv)
}
}
- if (priv->rtl_chip != RTL8723B &&
- priv->rtl_chip != RTL8192E &&
- priv->rtl_chip != RTL8188F)
+ switch (priv->rtl_chip) {
+ case RTL8188C:
+ case RTL8188R:
+ case RTL8191C:
+ case RTL8192C:
+ case RTL8723A:
rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
+ break;
+ case RTL8188E:
+ rtl8xxxu_write16(priv, REG_MAX_AGGR_NUM, 0x0707);
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -2373,11 +2396,16 @@ static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv)
{
int ret;
- int i;
+ int i, last_entry;
u8 last_tx_page;
last_tx_page = priv->fops->total_page_num;
+ if (priv->fops->last_llt_entry)
+ last_entry = priv->fops->last_llt_entry;
+ else
+ last_entry = 255;
+
for (i = 0; i < last_tx_page; i++) {
ret = rtl8xxxu_llt_write(priv, i, i + 1);
if (ret)
@@ -2389,14 +2417,14 @@ int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv)
goto exit;
/* Mark remaining pages as a ring buffer */
- for (i = last_tx_page + 1; i < 0xff; i++) {
+ for (i = last_tx_page + 1; i < last_entry; i++) {
ret = rtl8xxxu_llt_write(priv, i, (i + 1));
if (ret)
goto exit;
}
/* Let last entry point to the start entry of ring buffer */
- ret = rtl8xxxu_llt_write(priv, 0xff, last_tx_page + 1);
+ ret = rtl8xxxu_llt_write(priv, last_entry, last_tx_page + 1);
if (ret)
goto exit;
@@ -2704,8 +2732,8 @@ void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv, bool iqk_ok,
#define MAX_TOLERANCE 5
-static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
- int result[][8], int c1, int c2)
+bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2)
{
u32 i, j, diff, simubitmap, bound = 0;
int candidate[2] = {-1, -1}; /* for path A and path B */
@@ -3898,7 +3926,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
goto exit;
/* RFSW Control - clear bit 14 ?? */
- if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
+ if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E &&
+ priv->rtl_chip != RTL8188E)
rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
@@ -3911,7 +3940,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
/* 0x860[6:5]= 00 - why? - this sets antenna B */
- if (priv->rtl_chip != RTL8192E)
+ if (priv->rtl_chip != RTL8192E && priv->rtl_chip != RTL8188E)
rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66f60210);
if (!macpower) {
@@ -3953,7 +3982,25 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* Enable TX report and TX report timer for 8723bu/8188eu/...
*/
if (fops->has_tx_report) {
+ /*
+ * The RTL8188EU has two types of TX reports:
+ * rpt_sel=1:
+ * One report for one frame. We can use this for frames
+ * with IEEE80211_TX_CTL_REQ_TX_STATUS.
+ * rpt_sel=2:
+ * One report for many frames transmitted over a period
+ * of time. (This is what REG_TX_REPORT_TIME is for.) The
+ * report includes the number of frames transmitted
+ * successfully, and the number of unsuccessful
+ * transmissions. We use this for software rate control.
+ *
+ * Bit 0 of REG_TX_REPORT_CTRL is required for both types.
+ * Bit 1 (TX_REPORT_CTRL_TIMER_ENABLE) is required for
+ * type 2.
+ */
val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ if (priv->rtl_chip == RTL8188E)
+ val8 |= BIT(0);
val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
/* Set MAX RPT MACID */
@@ -3979,6 +4026,15 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
} else if (priv->rtl_chip == RTL8188F) {
rtl8xxxu_write32(priv, REG_HISR0, 0xffffffff);
rtl8xxxu_write32(priv, REG_HISR1, 0xffffffff);
+ } else if (priv->rtl_chip == RTL8188E) {
+ rtl8xxxu_write32(priv, REG_HISR0, 0xffffffff);
+ val32 = IMR0_PSTIMEOUT | IMR0_TBDER | IMR0_CPWM | IMR0_CPWM2;
+ rtl8xxxu_write32(priv, REG_HIMR0, val32);
+ val32 = IMR1_TXERR | IMR1_RXERR | IMR1_TXFOVW | IMR1_RXFOVW;
+ rtl8xxxu_write32(priv, REG_HIMR1, val32);
+ val8 = rtl8xxxu_read8(priv, REG_USB_SPECIAL_OPTION);
+ val8 |= USB_SPEC_INT_BULK_SELECT;
+ rtl8xxxu_write8(priv, REG_USB_SPECIAL_OPTION, val8);
} else {
/*
* Enable all interrupts - not obvious USB needs to do this
@@ -4084,7 +4140,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (fops->init_aggregation)
fops->init_aggregation(priv);
- if (priv->rtl_chip == RTL8188F) {
+ if (priv->rtl_chip == RTL8188F || priv->rtl_chip == RTL8188E) {
rtl8xxxu_write16(priv, REG_PKT_VO_VI_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
rtl8xxxu_write16(priv, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
}
@@ -4118,7 +4174,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/* Disable BAR - not sure if this has any effect on USB */
rtl8xxxu_write32(priv, REG_BAR_MODE_CTRL, 0x0201ffff);
- if (priv->rtl_chip != RTL8188F)
+ if (priv->rtl_chip != RTL8188F && priv->rtl_chip != RTL8188E)
rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
if (fops->init_statistics)
@@ -4136,9 +4192,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* Reset USB mode switch setting
*/
rtl8xxxu_write8(priv, REG_ACLK_MON, 0x00);
- } else if (priv->rtl_chip == RTL8188F) {
+ } else if (priv->rtl_chip == RTL8188F || priv->rtl_chip == RTL8188E) {
/*
- * Init GPIO settings for 8188f
+ * Init GPIO settings for 8188f, 8188e
*/
val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
val8 &= ~GPIO_MUXCFG_IO_SEL_ENBT;
@@ -4184,7 +4240,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
val32 |= FPGA_RF_MODE_CCK;
rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
}
- } else if (priv->rtl_chip == RTL8192E) {
+ } else if (priv->rtl_chip == RTL8192E || priv->rtl_chip == RTL8188E) {
rtl8xxxu_write8(priv, REG_USB_HRPWM, 0x00);
}
@@ -4208,10 +4264,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* should be equal or CCK RSSI report may be incorrect
*/
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
- priv->cck_agc_report_type = val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR;
+ priv->cck_agc_report_type =
+ u32_get_bits(val32, FPGA0_HSSI_PARM2_CCK_HIGH_PWR);
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_HSSI_PARM2);
- if (priv->cck_agc_report_type != (bool)(val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR)) {
+ if (priv->cck_agc_report_type !=
+ u32_get_bits(val32, FPGA0_HSSI_PARM2_CCK_HIGH_PWR)) {
if (priv->cck_agc_report_type)
val32 |= FPGA0_HSSI_PARM2_CCK_HIGH_PWR;
else
@@ -4235,6 +4293,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
priv->cfo_tracking.crystal_cap = priv->default_crystal_cap;
}
+ if (priv->rtl_chip == RTL8188E)
+ rtl8188e_ra_info_init_all(&priv->ra_info);
+
exit:
return ret;
}
@@ -4401,6 +4462,37 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
}
+void rtl8xxxu_gen1_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi)
+{
+ struct h2c_cmd h2c;
+ const int h2c_size = 4;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+
+ h2c.rssi_report.cmd = H2C_SET_RSSI;
+ h2c.rssi_report.macid = macid;
+ h2c.rssi_report.rssi = rssi;
+
+ rtl8xxxu_gen1_h2c_cmd(priv, &h2c, h2c_size);
+}
+
+void rtl8xxxu_gen2_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi)
+{
+ struct h2c_cmd h2c;
+ int h2c_size = sizeof(h2c.rssi_report);
+
+ if (priv->rtl_chip == RTL8723B)
+ h2c_size = 4;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+
+ h2c.rssi_report.cmd = H2C_8723B_RSSI_SETTING;
+ h2c.rssi_report.macid = macid;
+ h2c.rssi_report.rssi = rssi;
+
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, h2c_size);
+}
+
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
{
u8 agg_ctrl, usb_spec, page_thresh, timeout;
@@ -4598,8 +4690,8 @@ static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
}
}
-static void rtl8xxxu_update_ra_report(struct rtl8xxxu_ra_report *rarpt,
- u8 rate, u8 sgi, u8 bw)
+void rtl8xxxu_update_ra_report(struct rtl8xxxu_ra_report *rarpt,
+ u8 rate, u8 sgi, u8 bw)
{
u8 mcs, nss;
@@ -5069,6 +5161,98 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
}
}
+/*
+ * Fill in v3 (gen1) specific TX descriptor bits.
+ * This format is a hybrid between the v1 and v2 formats, only seen
+ * on 8188eu devices so far.
+ */
+void
+rtl8xxxu_fill_txdesc_v3(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *tx_info,
+ struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
+ bool short_preamble, bool ampdu_enable, u32 rts_rate)
+{
+ struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_ra_info *ra = &priv->ra_info;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ u32 rate;
+ u16 rate_flags = tx_info->control.rates[0].flags;
+ u16 seq_number;
+
+ if (rate_flags & IEEE80211_TX_RC_MCS &&
+ !ieee80211_is_mgmt(hdr->frame_control))
+ rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
+ else
+ rate = tx_rate->hw_value;
+
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ rate = ra->decision_rate;
+ tx_desc->txdw5 = cpu_to_le32(rate);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
+ tx_desc->txdw4 |= le32_encode_bits(ra->pt_stage, TXDESC32_PT_STAGE_MASK);
+ /* Data/RTS rate FB limit */
+ tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+ }
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
+ dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
+ __func__, rate, le16_to_cpu(tx_desc->pkt_size));
+
+ tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
+
+ if (ampdu_enable && test_bit(tid, priv->tid_tx_operational))
+ tx_desc->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
+ else
+ tx_desc->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tx_desc->txdw5 = cpu_to_le32(rate);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
+ tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
+ }
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
+
+ if (conf_is_ht40(&hw->conf)) {
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_DATA_BW);
+
+ if (conf_is_ht40_minus(&hw->conf))
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_PRIME_CH_OFF_UPPER);
+ else
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_PRIME_CH_OFF_LOWER);
+ }
+ }
+
+ if (short_preamble)
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
+
+ if (sgi && ra->rate_sgi)
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
+
+ /*
+ * rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
+ */
+ tx_desc->txdw4 |= cpu_to_le32(rts_rate << TXDESC32_RTS_RATE_SHIFT);
+ if (ampdu_enable || (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
+ } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_CTS_SELF_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
+ }
+
+ tx_desc->txdw2 |= cpu_to_le32(TXDESC_ANTENNA_SELECT_A |
+ TXDESC_ANTENNA_SELECT_B);
+ tx_desc->txdw7 |= cpu_to_le16(TXDESC_ANTENNA_SELECT_C >> 16);
+}
+
static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -5274,7 +5458,7 @@ static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv,
pending = priv->rx_urb_pending_count;
} else {
skb = (struct sk_buff *)rx_urb->urb.context;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
usb_free_urb(&rx_urb->urb);
}
@@ -5550,9 +5734,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
btcoex = &priv->bt_coex;
rarpt = &priv->ra_report;
- if (priv->rf_paths > 1)
- goto out;
-
while (!skb_queue_empty(&priv->c2hcmd_queue)) {
skb = skb_dequeue(&priv->c2hcmd_queue);
@@ -5585,10 +5766,9 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
default:
break;
}
- }
-out:
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
+ }
}
static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
@@ -5640,6 +5820,44 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
schedule_work(&priv->c2hcmd_work);
}
+static void rtl8188e_c2hcmd_callback(struct work_struct *work)
+{
+ struct rtl8xxxu_priv *priv = container_of(work, struct rtl8xxxu_priv, c2hcmd_work);
+ struct device *dev = &priv->udev->dev;
+ struct sk_buff *skb = NULL;
+ struct rtl8xxxu_rxdesc16 *rx_desc;
+
+ while (!skb_queue_empty(&priv->c2hcmd_queue)) {
+ skb = skb_dequeue(&priv->c2hcmd_queue);
+
+ rx_desc = (struct rtl8xxxu_rxdesc16 *)(skb->data - sizeof(struct rtl8xxxu_rxdesc16));
+
+ switch (rx_desc->rpt_sel) {
+ case 1:
+ dev_dbg(dev, "C2H TX report type 1\n");
+
+ break;
+ case 2:
+ dev_dbg(dev, "C2H TX report type 2\n");
+
+ rtl8188e_handle_ra_tx_report2(priv, skb);
+
+ break;
+ case 3:
+ dev_dbg(dev, "C2H USB interrupt report\n");
+
+ break;
+ default:
+ dev_warn(dev, "%s: rpt_sel should not be %d\n",
+ __func__, rx_desc->rpt_sel);
+
+ break;
+ }
+
+ dev_kfree_skb(skb);
+ }
+}
+
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
{
struct ieee80211_hw *hw = priv->hw;
@@ -5695,38 +5913,45 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16));
- phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+ if (rx_desc->rpt_sel) {
+ skb_queue_tail(&priv->c2hcmd_queue, skb);
+ schedule_work(&priv->c2hcmd_work);
+ } else {
+ phy_stats = (struct rtl8723au_phy_stats *)skb->data;
- skb_pull(skb, drvinfo_sz + desc_shift);
+ skb_pull(skb, drvinfo_sz + desc_shift);
- skb_trim(skb, pkt_len);
+ skb_trim(skb, pkt_len);
- if (rx_desc->phy_stats)
- rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
- rx_desc->rxmcs, (struct ieee80211_hdr *)skb->data,
- rx_desc->crc32 || rx_desc->icverr);
+ if (rx_desc->phy_stats)
+ rtl8xxxu_rx_parse_phystats(
+ priv, rx_status, phy_stats,
+ rx_desc->rxmcs,
+ (struct ieee80211_hdr *)skb->data,
+ rx_desc->crc32 || rx_desc->icverr);
- rx_status->mactime = rx_desc->tsfl;
- rx_status->flag |= RX_FLAG_MACTIME_START;
+ rx_status->mactime = rx_desc->tsfl;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
- if (!rx_desc->swdec)
- rx_status->flag |= RX_FLAG_DECRYPTED;
- if (rx_desc->crc32)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rx_desc->bw)
- rx_status->bw = RATE_INFO_BW_40;
+ if (!rx_desc->swdec)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (rx_desc->crc32)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_desc->bw)
+ rx_status->bw = RATE_INFO_BW_40;
- if (rx_desc->rxht) {
- rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
- } else {
- rx_status->rate_idx = rx_desc->rxmcs;
- }
+ if (rx_desc->rxht) {
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+ } else {
+ rx_status->rate_idx = rx_desc->rxmcs;
+ }
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
- ieee80211_rx_irqsafe(hw, skb);
+ ieee80211_rx_irqsafe(hw, skb);
+ }
skb = next_skb;
if (skb)
@@ -5956,7 +6181,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
- u16 val16;
int ret = 0, channel;
bool ht40;
@@ -5966,14 +6190,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
__func__, hw->conf.chandef.chan->hw_value,
changed, hw->conf.chandef.width);
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- val16 = ((hw->conf.long_frame_max_tx_count <<
- RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) |
- ((hw->conf.short_frame_max_tx_count <<
- RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK);
- rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
- }
-
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -6504,6 +6720,9 @@ static void rtl8xxxu_watchdog_callback(struct work_struct *work)
signal = ieee80211_ave_rssi(vif);
+ priv->fops->report_rssi(priv, 0,
+ rtl8xxxu_signal_to_snr(signal));
+
if (priv->fops->set_crystal_cap)
rtl8xxxu_track_cfo(priv);
@@ -6587,7 +6806,10 @@ exit:
rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff);
- rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
+ if (priv->rtl_chip == RTL8188E)
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6955341e);
+ else
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
return ret;
@@ -6733,6 +6955,40 @@ exit:
return ret;
}
+static void rtl8xxxu_init_led(struct rtl8xxxu_priv *priv)
+{
+ struct led_classdev *led = &priv->led_cdev;
+
+ if (!priv->fops->led_classdev_brightness_set)
+ return;
+
+ led->brightness_set_blocking = priv->fops->led_classdev_brightness_set;
+
+ snprintf(priv->led_name, sizeof(priv->led_name),
+ "rtl8xxxu-usb%s", dev_name(&priv->udev->dev));
+ led->name = priv->led_name;
+ led->max_brightness = RTL8XXXU_HW_LED_CONTROL;
+
+ if (led_classdev_register(&priv->udev->dev, led))
+ return;
+
+ priv->led_registered = true;
+
+ led->brightness = led->max_brightness;
+ priv->fops->led_classdev_brightness_set(led, led->brightness);
+}
+
+static void rtl8xxxu_deinit_led(struct rtl8xxxu_priv *priv)
+{
+ struct led_classdev *led = &priv->led_cdev;
+
+ if (!priv->led_registered)
+ return;
+
+ priv->fops->led_classdev_brightness_set(led, LED_OFF);
+ led_classdev_unregister(led);
+}
+
static int rtl8xxxu_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -6754,6 +7010,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
case 0x817f:
case 0x818b:
case 0xf179:
+ case 0x8179:
untested = 0;
break;
}
@@ -6809,7 +7066,6 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
- INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback);
skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -6827,6 +7083,11 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw->wiphy->available_antennas_tx = BIT(priv->tx_paths) - 1;
hw->wiphy->available_antennas_rx = BIT(priv->rx_paths) - 1;
+ if (priv->rtl_chip == RTL8188E)
+ INIT_WORK(&priv->c2hcmd_work, rtl8188e_c2hcmd_callback);
+ else
+ INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback);
+
ret = rtl8xxxu_read_efuse(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
@@ -6839,6 +7100,9 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
goto err_set_intfdata;
}
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE)
+ rtl8xxxu_dump_efuse(priv);
+
rtl8xxxu_print_chipinfo(priv);
ret = priv->fops->load_firmware(priv);
@@ -6887,8 +7151,10 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw->extra_tx_headroom = priv->fops->tx_desc_size;
ieee80211_hw_set(hw, SIGNAL_DBM);
+
/*
- * The firmware handles rate control
+ * The firmware handles rate control, except for RTL8188EU,
+ * where we handle the rate control in the driver.
*/
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
@@ -6903,6 +7169,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
goto err_set_intfdata;
}
+ rtl8xxxu_init_led(priv);
+
return 0;
err_set_intfdata:
@@ -6927,6 +7195,8 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
hw = usb_get_intfdata(interface);
priv = hw->priv;
+ rtl8xxxu_deinit_led(priv);
+
ieee80211_unregister_hw(hw);
priv->fops->power_off(priv);
@@ -6973,6 +7243,50 @@ static const struct usb_device_id dev_table[] = {
/* RTL8188FU */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xf179, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8188fu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8179, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Tested by Hans de Goede - rtl8188etv */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x0179, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Sitecom rtl8188eus */
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0076, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* D-Link USB-GO-N150 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3311, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* D-Link DWA-125 REV D1 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330f, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* D-Link DWA-123 REV D1 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3310, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* D-Link DWA-121 rev B1 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331b, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Abocom - Abocom */
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8179, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Elecom WDC-150SU2M */
+{USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4008, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* TP-Link TL-WN722N v2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010c, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* TP-Link TL-WN727N v5.21 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0111, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* MERCUSYS MW150US v2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0102, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* ASUS USB-N10 Nano B1 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x18f0, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+ /* Edimax EW-7811Un V2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb811, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Rosewill USB-N150 Nano */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xffef, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 3e79efdfb4c2..5849fa4e1566 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -147,7 +147,13 @@
#define REG_LEDCFG0 0x004c
#define LEDCFG0_DPDT_SELECT BIT(23)
#define REG_LEDCFG1 0x004d
+#define LEDCFG1_HW_LED_CONTROL BIT(1)
+#define LEDCFG1_LED_DISABLE BIT(7)
#define REG_LEDCFG2 0x004e
+#define LEDCFG2_HW_LED_CONTROL BIT(1)
+#define LEDCFG2_HW_LED_ENABLE BIT(5)
+#define LEDCFG2_SW_LED_DISABLE BIT(3)
+#define LEDCFG2_SW_LED_CONTROL BIT(5)
#define LEDCFG2_DPDT_SELECT BIT(7)
#define REG_LEDCFG3 0x004f
#define REG_LEDCFG REG_LEDCFG2
@@ -371,6 +377,11 @@
#define PBP_PAGE_SIZE_512 0x3
#define PBP_PAGE_SIZE_1024 0x4
+/* 8188eu IOL magic */
+#define REG_PKT_BUF_ACCESS_CTRL 0x0106
+#define PKT_BUF_ACCESS_CTRL_TX 0x69
+#define PKT_BUF_ACCESS_CTRL_RX 0xa5
+
#define REG_TRXDMA_CTRL 0x010c
#define TRXDMA_CTRL_RXDMA_AGG_EN BIT(2)
#define TRXDMA_CTRL_VOQ_SHIFT 4
@@ -407,6 +418,8 @@
#define REG_MBIST_START 0x0174
#define REG_MBIST_DONE 0x0178
#define REG_MBIST_FAIL 0x017c
+/* 8188EU */
+#define REG_32K_CTRL 0x0194
#define REG_C2HEVT_MSG_NORMAL 0x01a0
/* 8192EU/8723BU/8812 */
#define REG_C2HEVT_CMD_ID_8723B 0x01ae
@@ -942,6 +955,16 @@
#define REG_FPGA1_RF_MODE 0x0900
#define REG_FPGA1_TX_INFO 0x090c
+#define FPGA1_TX_ANT_MASK 0x0000000f
+#define FPGA1_TX_ANT_L_MASK 0x000000f0
+#define FPGA1_TX_ANT_NON_HT_MASK 0x00000f00
+#define FPGA1_TX_ANT_HT1_MASK 0x0000f000
+#define FPGA1_TX_ANT_HT2_MASK 0x000f0000
+#define FPGA1_TX_ANT_HT_S1_MASK 0x00f00000
+#define FPGA1_TX_ANT_NON_HT_S1_MASK 0x0f000000
+#define FPGA1_TX_OFDM_TXSC_MASK 0x30000000
+
+#define REG_ANT_MAPPING1 0x0914
#define REG_DPDT_CTRL 0x092c /* 8723BU */
#define REG_RFE_CTRL_ANTA_SRC 0x0930 /* 8723BU */
#define REG_RFE_PATH_SELECT 0x0940 /* 8723BU */
@@ -954,9 +977,25 @@
#define REG_CCK0_AFE_SETTING 0x0a04
#define CCK0_AFE_RX_MASK 0x0f000000
-#define CCK0_AFE_RX_ANT_AB BIT(24)
+#define CCK0_AFE_TX_MASK 0xf0000000
#define CCK0_AFE_RX_ANT_A 0
-#define CCK0_AFE_RX_ANT_B (BIT(24) | BIT(26))
+#define CCK0_AFE_RX_ANT_B BIT(26)
+#define CCK0_AFE_RX_ANT_C BIT(27)
+#define CCK0_AFE_RX_ANT_D (BIT(26) | BIT(27))
+#define CCK0_AFE_RX_ANT_OPTION_A 0
+#define CCK0_AFE_RX_ANT_OPTION_B BIT(24)
+#define CCK0_AFE_RX_ANT_OPTION_C BIT(25)
+#define CCK0_AFE_RX_ANT_OPTION_D (BIT(24) | BIT(25))
+#define CCK0_AFE_TX_ANT_A BIT(31)
+#define CCK0_AFE_TX_ANT_B BIT(30)
+
+#define REG_CCK_ANTDIV_PARA2 0x0a04
+#define REG_BB_POWER_SAVE4 0x0a74
+
+/* 8188eu */
+#define REG_LNA_SWITCH 0x0b2c
+#define LNA_SWITCH_DISABLE_CSCG BIT(22)
+#define LNA_SWITCH_OUTPUT_CG BIT(31)
#define REG_CCK_PD_THRESH 0x0a0a
#define CCK_PD_TYPE1_LV0_TH 0x40
@@ -1020,6 +1059,9 @@
#define REG_OFDM0_RX_IQ_EXT_ANTA 0x0ca0
+/* 8188eu */
+#define REG_ANTDIV_PARA1 0x0ca4
+
/* 8723bu */
#define REG_OFDM0_TX_PSDO_NOISE_WEIGHT 0x0ce4
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 58c2ab3d44be..de61c9c0ddec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -68,8 +68,10 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ struct sk_buff_head free_list;
unsigned long flags;
+ skb_queue_head_init(&free_list);
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
@@ -79,10 +81,12 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
true, HW_DESC_TXBUFF_ADDR),
skb->len, DMA_TO_DEVICE);
- kfree_skb(skb);
+ __skb_queue_tail(&free_list, skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ __skb_queue_purge(&free_list);
}
static void _rtl88ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
index 0455a3712f3e..12cdecdafc32 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
@@ -116,7 +116,7 @@ void rtl8723e_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw);
long rtl8723e_dm_bt_get_rx_ss(struct ieee80211_hw *hw);
void rtl8723e_dm_bt_balance(struct ieee80211_hw *hw,
bool balance_on, u8 ms0, u8 ms1);
-void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 tyep);
+void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type);
void rtl8723e_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type);
u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
u8 level_num, u8 rssi_thresh,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 189cc6437600..0ba3bbed6ed3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -30,8 +30,10 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ struct sk_buff_head free_list;
unsigned long flags;
+ skb_queue_head_init(&free_list);
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
@@ -41,10 +43,12 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
true, HW_DESC_TXBUFF_ADDR),
skb->len, DMA_TO_DEVICE);
- kfree_skb(skb);
+ __skb_queue_tail(&free_list, skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ __skb_queue_purge(&free_list);
}
static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 7e0f62d59fe1..a7e3250957dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -26,8 +26,10 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ struct sk_buff_head free_list;
unsigned long flags;
+ skb_queue_head_init(&free_list);
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
@@ -37,10 +39,12 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
true, HW_DESC_TXBUFF_ADDR),
skb->len, DMA_TO_DEVICE);
- kfree_skb(skb);
+ __skb_queue_tail(&free_list, skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ __skb_queue_purge(&free_list);
}
static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index a29321e2fa72..5323ead30db0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -1598,18 +1598,6 @@ static bool _rtl8812ae_get_integer_from_string(const char *str, u8 *pint)
return true;
}
-static bool _rtl8812ae_eq_n_byte(const char *str1, const char *str2, u32 num)
-{
- if (num == 0)
- return false;
- while (num > 0) {
- num--;
- if (str1[num] != str2[num])
- return false;
- }
- return true;
-}
-
static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
u8 band, u8 channel)
{
@@ -1659,42 +1647,42 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw,
power_limit = power_limit > MAX_POWER_INDEX ?
MAX_POWER_INDEX : power_limit;
- if (_rtl8812ae_eq_n_byte(pregulation, "FCC", 3))
+ if (strcmp(pregulation, "FCC") == 0)
regulation = 0;
- else if (_rtl8812ae_eq_n_byte(pregulation, "MKK", 3))
+ else if (strcmp(pregulation, "MKK") == 0)
regulation = 1;
- else if (_rtl8812ae_eq_n_byte(pregulation, "ETSI", 4))
+ else if (strcmp(pregulation, "ETSI") == 0)
regulation = 2;
- else if (_rtl8812ae_eq_n_byte(pregulation, "WW13", 4))
+ else if (strcmp(pregulation, "WW13") == 0)
regulation = 3;
- if (_rtl8812ae_eq_n_byte(prate_section, "CCK", 3))
+ if (strcmp(prate_section, "CCK") == 0)
rate_section = 0;
- else if (_rtl8812ae_eq_n_byte(prate_section, "OFDM", 4))
+ else if (strcmp(prate_section, "OFDM") == 0)
rate_section = 1;
- else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) &&
- _rtl8812ae_eq_n_byte(prf_path, "1T", 2))
+ else if (strcmp(prate_section, "HT") == 0 &&
+ strcmp(prf_path, "1T") == 0)
rate_section = 2;
- else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) &&
- _rtl8812ae_eq_n_byte(prf_path, "2T", 2))
+ else if (strcmp(prate_section, "HT") == 0 &&
+ strcmp(prf_path, "2T") == 0)
rate_section = 3;
- else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) &&
- _rtl8812ae_eq_n_byte(prf_path, "1T", 2))
+ else if (strcmp(prate_section, "VHT") == 0 &&
+ strcmp(prf_path, "1T") == 0)
rate_section = 4;
- else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) &&
- _rtl8812ae_eq_n_byte(prf_path, "2T", 2))
+ else if (strcmp(prate_section, "VHT") == 0 &&
+ strcmp(prf_path, "2T") == 0)
rate_section = 5;
- if (_rtl8812ae_eq_n_byte(pbandwidth, "20M", 3))
+ if (strcmp(pbandwidth, "20M") == 0)
bandwidth = 0;
- else if (_rtl8812ae_eq_n_byte(pbandwidth, "40M", 3))
+ else if (strcmp(pbandwidth, "40M") == 0)
bandwidth = 1;
- else if (_rtl8812ae_eq_n_byte(pbandwidth, "80M", 3))
+ else if (strcmp(pbandwidth, "80M") == 0)
bandwidth = 2;
- else if (_rtl8812ae_eq_n_byte(pbandwidth, "160M", 4))
+ else if (strcmp(pbandwidth, "160M") == 0)
bandwidth = 3;
- if (_rtl8812ae_eq_n_byte(pband, "2.4G", 4)) {
+ if (strcmp(pband, "2.4G") == 0) {
ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
BAND_ON_2_4G,
channel);
@@ -1718,7 +1706,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw,
regulation, bandwidth, rate_section, channel_index,
rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
[rate_section][channel_index][RF90_PATH_A]);
- } else if (_rtl8812ae_eq_n_byte(pband, "5G", 2)) {
+ } else if (strcmp(pband, "5G") == 0) {
ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
BAND_ON_5G,
channel);
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index 038a30b170ef..c827c4a2814b 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -49,19 +49,23 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
sta = ieee80211_find_sta(vif, bssid);
if (!sta) {
+ rcu_read_unlock();
+
rtw_warn(rtwdev, "failed to find station entry for bss %pM\n",
bssid);
- goto out_unlock;
+ return;
}
ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap;
vht_cap = &sta->deflink.vht_cap;
+ rcu_read_unlock();
+
if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
(vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
if (bfinfo->bfer_mu_cnt >= chip->bfer_mu_max_num) {
rtw_dbg(rtwdev, RTW_DBG_BF, "mu bfer number over limit\n");
- goto out_unlock;
+ return;
}
ether_addr_copy(bfee->mac_addr, bssid);
@@ -75,7 +79,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
(vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
if (bfinfo->bfer_su_cnt >= chip->bfer_su_max_num) {
rtw_dbg(rtwdev, RTW_DBG_BF, "su bfer number over limit\n");
- goto out_unlock;
+ return;
}
sound_dim = vht_cap->cap &
@@ -98,9 +102,6 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true);
}
-
-out_unlock:
- rcu_read_unlock();
}
void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 38697237ee5f..86467d2f8888 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -4056,7 +4056,7 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
rtwdev->stats.tx_throughput, rtwdev->stats.rx_throughput);
seq_printf(m, "%-40s = %u/ %u/ %u\n",
"IPS/ Low Power/ PS mode",
- test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags),
+ !test_bit(RTW_FLAG_POWERON, rtwdev->flags),
test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags),
rtwdev->lps_conf.mode);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 98777f294945..dae64901bac5 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -217,10 +217,10 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
cut_mask = cut_version_to_mask(cut);
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
- intf_mask = BIT(2);
+ intf_mask = RTW_PWR_INTF_PCI_MSK;
break;
case RTW_HCI_TYPE_USB:
- intf_mask = BIT(1);
+ intf_mask = RTW_PWR_INTF_USB_MSK;
break;
default:
return -EINVAL;
@@ -273,6 +273,11 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
return -EINVAL;
+ if (pwr_on)
+ set_bit(RTW_FLAG_POWERON, rtwdev->flags);
+ else
+ clear_bit(RTW_FLAG_POWERON, rtwdev->flags);
+
return 0;
}
@@ -335,6 +340,11 @@ int rtw_mac_power_on(struct rtw_dev *rtwdev)
ret = rtw_mac_power_switch(rtwdev, true);
if (ret == -EALREADY) {
rtw_mac_power_switch(rtwdev, false);
+
+ ret = rtw_mac_pre_system_cfg(rtwdev);
+ if (ret)
+ goto err;
+
ret = rtw_mac_power_switch(rtwdev, true);
if (ret)
goto err;
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 776a9a9884b5..3b92ac611d3f 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -737,7 +737,7 @@ static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev,
br_data.rtwdev = rtwdev;
br_data.vif = vif;
br_data.mask = mask;
- rtw_iterate_stas_atomic(rtwdev, rtw_ra_mask_info_update_iter, &br_data);
+ rtw_iterate_stas(rtwdev, rtw_ra_mask_info_update_iter, &br_data);
}
static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
@@ -746,7 +746,9 @@ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
+ mutex_lock(&rtwdev->mutex);
rtw_ra_mask_info_update(rtwdev, vif, mask);
+ mutex_unlock(&rtwdev->mutex);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 888427cf3bdf..b2e78737bd5d 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -241,8 +241,10 @@ static void rtw_watch_dog_work(struct work_struct *work)
rtw_phy_dynamic_mechanism(rtwdev);
data.rtwdev = rtwdev;
- /* use atomic version to avoid taking local->iflist_mtx mutex */
- rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data);
+ /* rtw_iterate_vifs internally uses an atomic iterator which is needed
+ * to avoid taking local->iflist_mtx mutex
+ */
+ rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data);
/* fw supports only one station associated to enter lps, if there are
* more than two stations associated to the AP, then we can not enter
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 165f299e8e1f..d4a53d556745 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -356,7 +356,7 @@ enum rtw_flags {
RTW_FLAG_RUNNING,
RTW_FLAG_FW_RUNNING,
RTW_FLAG_SCANNING,
- RTW_FLAG_INACTIVE_PS,
+ RTW_FLAG_POWERON,
RTW_FLAG_LEISURE_PS,
RTW_FLAG_LEISURE_PS_DEEP,
RTW_FLAG_DIG_DISABLE,
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 0975d27240e4..b4bd831c9845 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -30,7 +30,8 @@ static u32 rtw_pci_tx_queue_idx_addr[] = {
[RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
};
-static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
+static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb,
+ enum rtw_tx_queue_type queue)
{
switch (queue) {
case RTW_TX_QUEUE_BCN:
@@ -542,7 +543,7 @@ static int rtw_pci_setup(struct rtw_dev *rtwdev)
static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
{
struct rtw_pci_tx_ring *tx_ring;
- u8 queue;
+ enum rtw_tx_queue_type queue;
rtw_pci_reset_trx_ring(rtwdev);
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
@@ -608,8 +609,8 @@ static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *tx_ring;
+ enum rtw_tx_queue_type queue;
bool tx_empty = true;
- u8 queue;
if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
goto enter_deep_ps;
@@ -669,37 +670,6 @@ static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
spin_unlock_bh(&rtwpci->irq_lock);
}
-static u8 ac_to_hwq[] = {
- [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
- [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
- [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
- [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
-};
-
-static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
-
-static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
- u8 q_mapping = skb_get_queue_mapping(skb);
- u8 queue;
-
- if (unlikely(ieee80211_is_beacon(fc)))
- queue = RTW_TX_QUEUE_BCN;
- else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
- queue = RTW_TX_QUEUE_MGMT;
- else if (is_broadcast_ether_addr(hdr->addr1) ||
- is_multicast_ether_addr(hdr->addr1))
- queue = RTW_TX_QUEUE_HI0;
- else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
- queue = ac_to_hwq[IEEE80211_AC_BE];
- else
- queue = ac_to_hwq[q_mapping];
-
- return queue;
-}
-
static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
struct rtw_pci_tx_ring *ring)
{
@@ -797,13 +767,14 @@ static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
} else {
for (i = 0; i < rtwdev->hw->queues; i++)
if (queues & BIT(i))
- pci_queues |= BIT(ac_to_hwq[i]);
+ pci_queues |= BIT(rtw_tx_ac_to_hwq(i));
}
__rtw_pci_flush_queues(rtwdev, pci_queues, drop);
}
-static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
+static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev,
+ enum rtw_tx_queue_type queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *ring;
@@ -822,7 +793,7 @@ static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- u8 queue;
+ enum rtw_tx_queue_type queue;
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
if (test_and_clear_bit(queue, rtwpci->tx_queued))
@@ -831,7 +802,8 @@ static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb, u8 queue)
+ struct sk_buff *skb,
+ enum rtw_tx_queue_type queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
const struct rtw_chip_info *chip = rtwdev->chip;
@@ -949,9 +921,9 @@ static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb)
{
+ enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *ring;
- u8 queue = rtw_hw_queue_mapping(skb);
int ret;
ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 11594940d6b0..996365575f44 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -25,7 +25,7 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
int rtw_enter_ips(struct rtw_dev *rtwdev)
{
- if (test_and_set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags))
return 0;
rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
@@ -50,7 +50,7 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
{
int ret;
- if (!test_and_clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ if (test_bit(RTW_FLAG_POWERON, rtwdev->flags))
return 0;
rtw_hci_link_ps(rtwdev, false);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index ab39245e9c2f..bb5c7492c98b 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -682,3 +682,44 @@ void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
list_del_init(&rtwtxq->list);
spin_unlock_bh(&rtwdev->txq_lock);
}
+
+static const enum rtw_tx_queue_type ac_to_hwq[] = {
+ [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
+ [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
+ [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
+ [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
+};
+
+static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
+
+enum rtw_tx_queue_type rtw_tx_ac_to_hwq(enum ieee80211_ac_numbers ac)
+{
+ if (WARN_ON(unlikely(ac >= IEEE80211_NUM_ACS)))
+ return RTW_TX_QUEUE_BE;
+
+ return ac_to_hwq[ac];
+}
+EXPORT_SYMBOL(rtw_tx_ac_to_hwq);
+
+enum rtw_tx_queue_type rtw_tx_queue_mapping(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 fc = hdr->frame_control;
+ u8 q_mapping = skb_get_queue_mapping(skb);
+ enum rtw_tx_queue_type queue;
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ queue = RTW_TX_QUEUE_BCN;
+ else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
+ queue = RTW_TX_QUEUE_MGMT;
+ else if (is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1))
+ queue = RTW_TX_QUEUE_HI0;
+ else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
+ queue = ac_to_hwq[IEEE80211_AC_BE];
+ else
+ queue = ac_to_hwq[q_mapping];
+
+ return queue;
+}
+EXPORT_SYMBOL(rtw_tx_queue_mapping);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index a2f3ac326041..197d5868c8ad 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -131,6 +131,9 @@ rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *buf, u32 size);
+enum rtw_tx_queue_type rtw_tx_ac_to_hwq(enum ieee80211_ac_numbers ac);
+enum rtw_tx_queue_type rtw_tx_queue_mapping(struct sk_buff *skb);
+
static inline
void fill_txdesc_checksum_common(u8 *txdesc, size_t words)
{
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index 4ef38279b64c..2a8336b1847a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -271,6 +271,7 @@ static int rtw_usb_write_port(struct rtw_dev *rtwdev, u8 qsel, struct sk_buff *s
return -ENOMEM;
usb_fill_bulk_urb(urb, usbd, pipe, skb->data, skb->len, cb, context);
+ urb->transfer_flags |= URB_ZERO_PACKET;
ret = usb_submit_urb(urb, GFP_ATOMIC);
usb_free_urb(urb);
@@ -413,24 +414,11 @@ static int rtw_usb_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
u32 size)
{
const struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_usb *rtwusb;
struct rtw_tx_pkt_info pkt_info = {0};
- u32 len, desclen;
-
- rtwusb = rtw_get_usb_priv(rtwdev);
pkt_info.tx_pkt_size = size;
pkt_info.qsel = TX_DESC_QSEL_BEACON;
-
- desclen = chip->tx_pkt_desc_sz;
- len = desclen + size;
- if (len % rtwusb->bulkout_size == 0) {
- len += RTW_USB_PACKET_OFFSET_SZ;
- pkt_info.offset = desclen + RTW_USB_PACKET_OFFSET_SZ;
- pkt_info.pkt_offset = 1;
- } else {
- pkt_info.offset = desclen;
- }
+ pkt_info.offset = chip->tx_pkt_desc_sz;
return rtw_usb_write_data(rtwdev, &pkt_info, buf);
}
@@ -471,9 +459,9 @@ static int rtw_usb_tx_write(struct rtw_dev *rtwdev,
u8 *pkt_desc;
int ep;
+ pkt_info->qsel = rtw_usb_tx_queue_mapping_to_qsel(skb);
pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
- pkt_info->qsel = rtw_usb_tx_queue_mapping_to_qsel(skb);
ep = qsel_to_ep(rtwusb, pkt_info->qsel);
rtw_tx_fill_tx_desc(pkt_info, skb);
rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data);
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
index 89dc595094d5..16ddee577efe 100644
--- a/drivers/net/wireless/realtek/rtw88/wow.c
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
@@ -592,7 +592,7 @@ static int rtw_wow_leave_no_link_ps(struct rtw_dev *rtwdev)
if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
rtw_leave_lps_deep(rtwdev);
} else {
- if (test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags)) {
+ if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags)) {
rtw_wow->ips_enabled = true;
ret = rtw_leave_ips(rtwdev);
if (ret)
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index f21c73310fdb..bcf483cafd20 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -9,7 +9,7 @@
#include "ps.h"
#include "reg.h"
-#define RTW89_COEX_VERSION 0x06030013
+#define RTW89_COEX_VERSION 0x07000013
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
enum btc_fbtc_tdma_template {
@@ -63,7 +63,7 @@ struct btc_fbtc_1slot {
static const struct rtw89_btc_fbtc_tdma t_def[] = {
[CXTD_OFF] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
[CXTD_OFF_B2] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 1, 0, 0},
- [CXTD_OFF_EXT] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 3, 0, 0},
+ [CXTD_OFF_EXT] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 2, 0, 0},
[CXTD_FIX] = { CXTDMA_FIX, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
[CXTD_PFIX] = { CXTDMA_FIX, CXFLC_NULLP, CXTPS_ON, 0, 5, 0, 0, 0},
[CXTD_AUTO] = { CXTDMA_AUTO, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
@@ -80,21 +80,21 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_OFF] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
[CXST_B2W] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_ISO),
[CXST_W1] = __DEF_FBTC_SLOT(70, 0xea5a5a5a, SLOT_ISO),
- [CXST_W2] = __DEF_FBTC_SLOT(70, 0xea5a5aaa, SLOT_ISO),
+ [CXST_W2] = __DEF_FBTC_SLOT(15, 0xea5a5a5a, SLOT_ISO),
[CXST_W2B] = __DEF_FBTC_SLOT(15, 0xea5a5a5a, SLOT_ISO),
- [CXST_B1] = __DEF_FBTC_SLOT(100, 0xe5555555, SLOT_MIX),
+ [CXST_B1] = __DEF_FBTC_SLOT(250, 0xe5555555, SLOT_MIX),
[CXST_B2] = __DEF_FBTC_SLOT(7, 0xea5a5a5a, SLOT_MIX),
[CXST_B3] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
[CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX),
[CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO),
- [CXST_BLK] = __DEF_FBTC_SLOT(250, 0x55555555, SLOT_MIX),
- [CXST_E2G] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_MIX),
- [CXST_E5G] = __DEF_FBTC_SLOT(20, 0xffffffff, SLOT_MIX),
- [CXST_EBT] = __DEF_FBTC_SLOT(20, 0xe5555555, SLOT_MIX),
- [CXST_ENULL] = __DEF_FBTC_SLOT(7, 0xaaaaaaaa, SLOT_ISO),
+ [CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX),
+ [CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX),
+ [CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO),
+ [CXST_EBT] = __DEF_FBTC_SLOT(0, 0xe5555555, SLOT_MIX),
+ [CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
- [CXST_W1FDD] = __DEF_FBTC_SLOT(35, 0xfafafafa, SLOT_ISO),
- [CXST_B1FDD] = __DEF_FBTC_SLOT(100, 0xffffffff, SLOT_MIX),
+ [CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
+ [CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO),
};
static const u32 cxtbl[] = {
@@ -117,9 +117,78 @@ static const u32 cxtbl[] = {
0xfafafafa, /* 16 */
0xffffddff, /* 17 */
0xdaffdaff, /* 18 */
- 0xfafadafa /* 19 */
+ 0xfafadafa, /* 19 */
+ 0xea6a6a6a, /* 20 */
+ 0xea55556a, /* 21 */
+ 0xaafafafa, /* 22 */
+ 0xfafaaafa, /* 23 */
+ 0xfafffaff /* 24 */
};
+static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
+ /* firmware version must be in decreasing order for each chip */
+ {RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852C, RTW89_FW_VER_CODE(0, 27, 42, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852C, RTW89_FW_VER_CODE(0, 27, 0, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0),
+ .fcxbtcrpt = 5, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 4,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
+ .info_buf = 1800, .max_role_num = 6,
+ },
+ {RTL8852B, RTW89_FW_VER_CODE(0, 27, 0, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 1, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852A, RTW89_FW_VER_CODE(0, 13, 37, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852A, RTW89_FW_VER_CODE(0, 13, 0, 0),
+ .fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
+ .fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
+ .info_buf = 1024, .max_role_num = 5,
+ },
+
+ /* keep it to be the last as default entry */
+ {0, RTW89_FW_VER_CODE(0, 0, 0, 0),
+ .fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
+ .fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
+ .info_buf = 1024, .max_role_num = 5,
+ },
+};
+
+#define RTW89_DEFAULT_BTC_VER_IDX (ARRAY_SIZE(rtw89_btc_ver_defs) - 1)
+
struct rtw89_btc_btf_tlv {
u8 type;
u8 len;
@@ -127,16 +196,20 @@ struct rtw89_btc_btf_tlv {
} __packed;
enum btc_btf_set_report_en {
- RPT_EN_TDMA = BIT(0),
- RPT_EN_CYCLE = BIT(1),
- RPT_EN_MREG = BIT(2),
- RPT_EN_BT_VER_INFO = BIT(3),
- RPT_EN_BT_SCAN_INFO = BIT(4),
- RPT_EN_BT_AFH_MAP = BIT(5),
- RPT_EN_BT_DEVICE_INFO = BIT(6),
- RPT_EN_WL_ALL = GENMASK(2, 0),
- RPT_EN_BT_ALL = GENMASK(6, 3),
- RPT_EN_ALL = GENMASK(6, 0),
+ RPT_EN_TDMA,
+ RPT_EN_CYCLE,
+ RPT_EN_MREG,
+ RPT_EN_BT_VER_INFO,
+ RPT_EN_BT_SCAN_INFO,
+ RPT_EN_BT_DEVICE_INFO,
+ RPT_EN_BT_AFH_MAP,
+ RPT_EN_BT_AFH_MAP_LE,
+ RPT_EN_FW_STEP_INFO,
+ RPT_EN_TEST,
+ RPT_EN_WL_ALL,
+ RPT_EN_BT_ALL,
+ RPT_EN_ALL,
+ RPT_EN_MONITER,
};
#define BTF_SET_REPORT_VER 1
@@ -786,17 +859,18 @@ static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt)
static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_a2dp_desc *a2dp = &bt_linfo->a2dp_desc;
struct rtw89_btc_fbtc_btver *pver = NULL;
struct rtw89_btc_fbtc_btscan *pscan = NULL;
- struct rtw89_btc_fbtc_btafh *pafh = NULL;
+ struct rtw89_btc_fbtc_btafh *pafh_v1 = NULL;
+ struct rtw89_btc_fbtc_btafh_v2 *pafh_v2 = NULL;
struct rtw89_btc_fbtc_btdevinfo *pdev = NULL;
pver = (struct rtw89_btc_fbtc_btver *)pfinfo;
pscan = (struct rtw89_btc_fbtc_btscan *)pfinfo;
- pafh = (struct rtw89_btc_fbtc_btafh *)pfinfo;
pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -813,9 +887,23 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
memcpy(bt->scan_info, pscan->scan, BTC_SCAN_MAX1);
break;
case BTC_RPT_TYPE_BT_AFH:
- memcpy(&bt_linfo->afh_map[0], pafh->afh_l, 4);
- memcpy(&bt_linfo->afh_map[4], pafh->afh_m, 4);
- memcpy(&bt_linfo->afh_map[8], pafh->afh_h, 2);
+ if (ver->fcxbtafh == 2) {
+ pafh_v2 = (struct rtw89_btc_fbtc_btafh_v2 *)pfinfo;
+ if (pafh_v2->map_type & RPT_BT_AFH_SEQ_LEGACY) {
+ memcpy(&bt_linfo->afh_map[0], pafh_v2->afh_l, 4);
+ memcpy(&bt_linfo->afh_map[4], pafh_v2->afh_m, 4);
+ memcpy(&bt_linfo->afh_map[8], pafh_v2->afh_h, 2);
+ }
+ if (pafh_v2->map_type & RPT_BT_AFH_SEQ_LE) {
+ memcpy(&bt_linfo->afh_map_le[0], pafh_v2->afh_le_a, 4);
+ memcpy(&bt_linfo->afh_map_le[4], pafh_v2->afh_le_b, 1);
+ }
+ } else if (ver->fcxbtafh == 1) {
+ pafh_v1 = (struct rtw89_btc_fbtc_btafh *)pfinfo;
+ memcpy(&bt_linfo->afh_map[0], pafh_v1->afh_l, 4);
+ memcpy(&bt_linfo->afh_map[4], pafh_v1->afh_m, 4);
+ memcpy(&bt_linfo->afh_map[8], pafh_v1->afh_h, 2);
+ }
break;
case BTC_RPT_TYPE_BT_DEVICE:
a2dp->device_name = le32_to_cpu(pdev->dev_name);
@@ -827,76 +915,6 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
}
}
-struct rtw89_btc_fbtc_cysta_cpu {
- u8 fver;
- u8 rsvd;
- u16 cycles;
- u16 cycles_a2dp[CXT_FLCTRL_MAX];
- u16 a2dpept;
- u16 a2dpeptto;
- u16 tavg_cycle[CXT_MAX];
- u16 tmax_cycle[CXT_MAX];
- u16 tmaxdiff_cycle[CXT_MAX];
- u16 tavg_a2dp[CXT_FLCTRL_MAX];
- u16 tmax_a2dp[CXT_FLCTRL_MAX];
- u16 tavg_a2dpept;
- u16 tmax_a2dpept;
- u16 tavg_lk;
- u16 tmax_lk;
- u32 slot_cnt[CXST_MAX];
- u32 bcn_cnt[CXBCN_MAX];
- u32 leakrx_cnt;
- u32 collision_cnt;
- u32 skip_cnt;
- u32 exception;
- u32 except_cnt;
- u16 tslot_cycle[BTC_CYCLE_SLOT_MAX];
-};
-
-static void rtw89_btc_fbtc_cysta_to_cpu(const struct rtw89_btc_fbtc_cysta *src,
- struct rtw89_btc_fbtc_cysta_cpu *dst)
-{
- static_assert(sizeof(*src) == sizeof(*dst));
-
-#define __CPY_U8(_x) ({dst->_x = src->_x; })
-#define __CPY_LE16(_x) ({dst->_x = le16_to_cpu(src->_x); })
-#define __CPY_LE16S(_x) ({int _i; for (_i = 0; _i < ARRAY_SIZE(dst->_x); _i++) \
- dst->_x[_i] = le16_to_cpu(src->_x[_i]); })
-#define __CPY_LE32(_x) ({dst->_x = le32_to_cpu(src->_x); })
-#define __CPY_LE32S(_x) ({int _i; for (_i = 0; _i < ARRAY_SIZE(dst->_x); _i++) \
- dst->_x[_i] = le32_to_cpu(src->_x[_i]); })
-
- __CPY_U8(fver);
- __CPY_U8(rsvd);
- __CPY_LE16(cycles);
- __CPY_LE16S(cycles_a2dp);
- __CPY_LE16(a2dpept);
- __CPY_LE16(a2dpeptto);
- __CPY_LE16S(tavg_cycle);
- __CPY_LE16S(tmax_cycle);
- __CPY_LE16S(tmaxdiff_cycle);
- __CPY_LE16S(tavg_a2dp);
- __CPY_LE16S(tmax_a2dp);
- __CPY_LE16(tavg_a2dpept);
- __CPY_LE16(tmax_a2dpept);
- __CPY_LE16(tavg_lk);
- __CPY_LE16(tmax_lk);
- __CPY_LE32S(slot_cnt);
- __CPY_LE32S(bcn_cnt);
- __CPY_LE32(leakrx_cnt);
- __CPY_LE32(collision_cnt);
- __CPY_LE32(skip_cnt);
- __CPY_LE32(exception);
- __CPY_LE32(except_cnt);
- __CPY_LE16S(tslot_cycle);
-
-#undef __CPY_U8
-#undef __CPY_LE16
-#undef __CPY_LE16S
-#undef __CPY_LE32
-#undef __CPY_LE32S
-}
-
#define BTC_LEAK_AP_TH 10
#define BTC_CYSTA_CHK_PERIOD 100
@@ -910,19 +928,15 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *prptbuf, u32 index)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
- struct rtw89_btc_fbtc_rpt_ctrl *prpt;
- struct rtw89_btc_fbtc_rpt_ctrl_v1 *prpt_v1;
- struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL;
- struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1 = NULL;
- struct rtw89_btc_fbtc_cysta_cpu pcysta[1];
+ union rtw89_btc_fbtc_rpt_ctrl_ver_info *prpt = NULL;
+ union rtw89_btc_fbtc_cysta_info *pcysta = NULL;
struct rtw89_btc_prpt *btc_prpt = NULL;
- struct rtw89_btc_fbtc_slot *rtp_slot = NULL;
void *rpt_content = NULL, *pfinfo = NULL;
u8 rpt_type = 0;
u16 wl_slot_set = 0, wl_slot_real = 0;
@@ -951,137 +965,141 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
switch (rpt_type) {
case BTC_RPT_TYPE_CTRL:
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
- if (chip->chip_id == RTL8852A) {
- pfinfo = &pfwinfo->rpt_ctrl.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo);
+ prpt = &pfwinfo->rpt_ctrl.finfo;
+ if (ver->fcxbtcrpt == 1) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v1);
+ } else if (ver->fcxbtcrpt == 4) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v4;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v4);
+ } else if (ver->fcxbtcrpt == 5) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v5;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v5);
} else {
- pfinfo = &pfwinfo->rpt_ctrl.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo_v1);
+ goto err;
}
- pcinfo->req_fver = chip->fcxbtcrpt_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxbtcrpt;
break;
case BTC_RPT_TYPE_TDMA:
pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
- if (chip->chip_id == RTL8852A) {
- pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo);
+ if (ver->fcxtdma == 1) {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v1);
+ } else if (ver->fcxtdma == 3) {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v3;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v3);
} else {
- pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo_v1);
+ goto err;
}
- pcinfo->req_fver = chip->fcxtdma_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxtdma;
break;
case BTC_RPT_TYPE_SLOT:
pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_slots.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo);
- pcinfo->req_fver = chip->fcxslots_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxslots;
break;
case BTC_RPT_TYPE_CYSTA:
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
- if (chip->chip_id == RTL8852A) {
- pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo;
- pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
- rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo);
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ if (ver->fcxcysta == 2) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v2;
+ pcysta->v2 = pfwinfo->rpt_fbtc_cysta.finfo.v2;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v2);
+ } else if (ver->fcxcysta == 3) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v3;
+ pcysta->v3 = pfwinfo->rpt_fbtc_cysta.finfo.v3;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v3);
+ } else if (ver->fcxcysta == 4) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v4;
+ pcysta->v4 = pfwinfo->rpt_fbtc_cysta.finfo.v4;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v4);
} else {
- pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
- pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo_v1);
+ goto err;
}
- pcinfo->req_fver = chip->fcxcysta_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxcysta;
break;
case BTC_RPT_TYPE_STEP:
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
- if (chip->chip_id == RTL8852A) {
- pfinfo = &pfwinfo->rpt_fbtc_step.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) *
+ if (ver->fcxstep == 2) {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo.v2;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.v2.step[0]) *
trace_step +
- offsetof(struct rtw89_btc_fbtc_steps, step);
- } else {
- pfinfo = &pfwinfo->rpt_fbtc_step.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo_v1.step[0]) *
+ offsetof(struct rtw89_btc_fbtc_steps_v2, step);
+ } else if (ver->fcxstep == 3) {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo.v3;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.v3.step[0]) *
trace_step +
- offsetof(struct rtw89_btc_fbtc_steps_v1, step);
+ offsetof(struct rtw89_btc_fbtc_steps_v3, step);
+ } else {
+ goto err;
}
- pcinfo->req_fver = chip->fcxstep_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxstep;
break;
case BTC_RPT_TYPE_NULLSTA:
pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo;
- if (chip->chip_id == RTL8852A) {
+ if (ver->fcxnullsta == 1) {
pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v1);
+ } else if (ver->fcxnullsta == 2) {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v2;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v2);
} else {
- pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo_v1);
+ goto err;
}
- pcinfo->req_fver = chip->fcxnullsta_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxnullsta;
break;
case BTC_RPT_TYPE_MREG:
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo);
- pcinfo->req_fver = chip->fcxmreg_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxmreg;
break;
case BTC_RPT_TYPE_GPIO_DBG:
pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo);
- pcinfo->req_fver = chip->fcxgpiodbg_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxgpiodbg;
break;
case BTC_RPT_TYPE_BT_VER:
pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_btver.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo);
- pcinfo->req_fver = chip->fcxbtver_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxbtver;
break;
case BTC_RPT_TYPE_BT_SCAN:
pcinfo = &pfwinfo->rpt_fbtc_btscan.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo);
- pcinfo->req_fver = chip->fcxbtscan_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxbtscan;
break;
case BTC_RPT_TYPE_BT_AFH:
pcinfo = &pfwinfo->rpt_fbtc_btafh.cinfo;
- pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo);
- pcinfo->req_fver = chip->fcxbtafh_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ if (ver->fcxbtafh == 1) {
+ pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v1);
+ } else if (ver->fcxbtafh == 2) {
+ pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v2;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v2);
+ } else {
+ goto err;
+ }
+ pcinfo->req_fver = ver->fcxbtafh;
break;
case BTC_RPT_TYPE_BT_DEVICE:
pcinfo = &pfwinfo->rpt_fbtc_btdev.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_btdev.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btdev.finfo);
- pcinfo->req_fver = chip->fcxbtdevinfo_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxbtdevinfo;
break;
default:
pfwinfo->err[BTFRE_UNDEF_TYPE]++;
return 0;
}
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+
if (rpt_len != pcinfo->req_len) {
if (rpt_type < BTC_RPT_TYPE_MAX)
pfwinfo->len_mismch |= (0x1 << rpt_type);
@@ -1102,235 +1120,257 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
memcpy(pfinfo, rpt_content, pcinfo->req_len);
pcinfo->valid = 1;
- if (rpt_type == BTC_RPT_TYPE_TDMA && chip->chip_id == RTL8852A) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): check %d %zu\n", __func__,
- BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
-
- if (memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo,
- sizeof(dm->tdma_now)) != 0) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d tdma_now %x %x %x %x %x %x %x %x\n",
- __func__, BTC_DCNT_TDMA_NONSYNC,
- dm->tdma_now.type, dm->tdma_now.rxflctrl,
- dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
- dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
- dm->tdma_now.rxflctrl_role,
- dm->tdma_now.option_ctrl);
-
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
- __func__, BTC_DCNT_TDMA_NONSYNC,
- pfwinfo->rpt_fbtc_tdma.finfo.type,
- pfwinfo->rpt_fbtc_tdma.finfo.rxflctrl,
- pfwinfo->rpt_fbtc_tdma.finfo.txpause,
- pfwinfo->rpt_fbtc_tdma.finfo.wtgle_n,
- pfwinfo->rpt_fbtc_tdma.finfo.leak_n,
- pfwinfo->rpt_fbtc_tdma.finfo.ext_ctrl,
- pfwinfo->rpt_fbtc_tdma.finfo.rxflctrl_role,
- pfwinfo->rpt_fbtc_tdma.finfo.option_ctrl);
- }
+ switch (rpt_type) {
+ case BTC_RPT_TYPE_CTRL:
+ if (ver->fcxbtcrpt == 1) {
+ prpt->v1 = pfwinfo->rpt_ctrl.finfo.v1;
+ btc->fwinfo.rpt_en_map = prpt->v1.rpt_enable;
+ wl->ver_info.fw_coex = prpt->v1.wl_fw_coex_ver;
+ wl->ver_info.fw = prpt->v1.wl_fw_ver;
+ dm->wl_fw_cx_offload = !!prpt->v1.wl_fw_cx_offload;
+
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
+
+ /* To avoid I/O if WL LPS or power-off */
+ if (wl->status.map.lps != BTC_LPS_RF_OFF &&
+ !wl->status.map.rf_off) {
+ rtwdev->chip->ops->btc_update_bt_cnt(rtwdev);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] =
+ rtw89_mac_get_plt_cnt(rtwdev,
+ RTW89_MAC_0);
+ }
+ } else if (ver->fcxbtcrpt == 4) {
+ prpt->v4 = pfwinfo->rpt_ctrl.finfo.v4;
+ btc->fwinfo.rpt_en_map = le32_to_cpu(prpt->v4.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v4.wl_fw_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v4.wl_fw_info.fw_ver);
+ dm->wl_fw_cx_offload = !!le32_to_cpu(prpt->v4.wl_fw_info.cx_offload);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v4.gnt_val[i],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_HI_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_HI_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_LO_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_LO_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_POLLUTED]);
- _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
- memcmp(&dm->tdma_now,
- &pfwinfo->rpt_fbtc_tdma.finfo,
- sizeof(dm->tdma_now)));
- } else if (rpt_type == BTC_RPT_TYPE_TDMA) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): check %d %zu\n", __func__,
- BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
-
- if (memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
- sizeof(dm->tdma_now)) != 0) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d tdma_now %x %x %x %x %x %x %x %x\n",
- __func__, BTC_DCNT_TDMA_NONSYNC,
- dm->tdma_now.type, dm->tdma_now.rxflctrl,
- dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
- dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
- dm->tdma_now.rxflctrl_role,
- dm->tdma_now.option_ctrl);
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
- __func__, BTC_DCNT_TDMA_NONSYNC,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.type,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.txpause,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.wtgle_n,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.leak_n,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.ext_ctrl,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl_role,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.option_ctrl);
- }
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
- _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
- memcmp(&dm->tdma_now,
- &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
- sizeof(dm->tdma_now)));
- }
+ if (le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
+ bt->rfk_info.map.timeout = 1;
+ else
+ bt->rfk_info.map.timeout = 0;
+
+ dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else if (ver->fcxbtcrpt == 5) {
+ prpt->v5 = pfwinfo->rpt_ctrl.finfo.v5;
+ pfwinfo->rpt_en_map = le32_to_cpu(prpt->v5.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v5.rpt_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v5.rpt_info.fw_ver);
+ dm->wl_fw_cx_offload = 0;
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v5.gnt_val[i][0],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_HI_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_HI_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_LO_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_LO_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_POLLUTED]);
+
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
- if (rpt_type == BTC_RPT_TYPE_SLOT) {
+ dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else {
+ goto err;
+ }
+ break;
+ case BTC_RPT_TYPE_TDMA:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n", __func__,
+ BTC_DCNT_TDMA_NONSYNC,
+ sizeof(dm->tdma_now));
+ if (ver->fcxtdma == 1)
+ _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
+ memcmp(&dm->tdma_now,
+ &pfwinfo->rpt_fbtc_tdma.finfo.v1,
+ sizeof(dm->tdma_now)));
+ else if (ver->fcxtdma == 3)
+ _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
+ memcmp(&dm->tdma_now,
+ &pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma,
+ sizeof(dm->tdma_now)));
+ else
+ goto err;
+ break;
+ case BTC_RPT_TYPE_SLOT:
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): check %d %zu\n",
__func__, BTC_DCNT_SLOT_NONSYNC,
sizeof(dm->slot_now));
-
- if (memcmp(dm->slot_now, pfwinfo->rpt_fbtc_slots.finfo.slot,
- sizeof(dm->slot_now)) != 0) {
- for (i = 0; i < CXST_MAX; i++) {
- rtp_slot =
- &pfwinfo->rpt_fbtc_slots.finfo.slot[i];
- if (memcmp(&dm->slot_now[i], rtp_slot,
- sizeof(dm->slot_now[i])) != 0) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d slot_now[%d] dur=0x%04x tbl=%08x type=0x%04x\n",
- __func__,
- BTC_DCNT_SLOT_NONSYNC, i,
- dm->slot_now[i].dur,
- dm->slot_now[i].cxtbl,
- dm->slot_now[i].cxtype);
-
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d rpt_fbtc_slots[%d] dur=0x%04x tbl=%08x type=0x%04x\n",
- __func__,
- BTC_DCNT_SLOT_NONSYNC, i,
- rtp_slot->dur,
- rtp_slot->cxtbl,
- rtp_slot->cxtype);
- }
- }
- }
_chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC,
memcmp(dm->slot_now,
pfwinfo->rpt_fbtc_slots.finfo.slot,
sizeof(dm->slot_now)));
- }
+ break;
+ case BTC_RPT_TYPE_CYSTA:
+ if (ver->fcxcysta == 2) {
+ if (le16_to_cpu(pcysta->v2.cycles) < BTC_CYSTA_CHK_PERIOD)
+ break;
+ /* Check Leak-AP */
+ if (le32_to_cpu(pcysta->v2.slot_cnt[CXST_LK]) != 0 &&
+ le32_to_cpu(pcysta->v2.leakrx_cnt) != 0 && dm->tdma_now.rxflctrl) {
+ if (le32_to_cpu(pcysta->v2.slot_cnt[CXST_LK]) <
+ BTC_LEAK_AP_TH * le32_to_cpu(pcysta->v2.leakrx_cnt))
+ dm->leak_ap = 1;
+ }
- if (rpt_type == BTC_RPT_TYPE_CYSTA && chip->chip_id == RTL8852A &&
- pcysta->cycles >= BTC_CYSTA_CHK_PERIOD) {
- /* Check Leak-AP */
- if (pcysta->slot_cnt[CXST_LK] != 0 &&
- pcysta->leakrx_cnt != 0 && dm->tdma_now.rxflctrl) {
- if (pcysta->slot_cnt[CXST_LK] <
- BTC_LEAK_AP_TH * pcysta->leakrx_cnt)
- dm->leak_ap = 1;
- }
+ /* Check diff time between WL slot and W1/E2G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT)
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_E2G].dur);
+ else
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
- /* Check diff time between WL slot and W1/E2G slot */
- if (dm->tdma_now.type == CXTDMA_OFF &&
- dm->tdma_now.ext_ctrl == CXECTL_EXT)
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_E2G].dur);
- else
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ if (le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) > wl_slot_set) {
+ diff_t = le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) - wl_slot_set;
+ _chk_btc_err(rtwdev,
+ BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
- if (pcysta->tavg_cycle[CXT_WL] > wl_slot_set) {
- diff_t = pcysta->tavg_cycle[CXT_WL] - wl_slot_set;
- _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
- }
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le32_to_cpu(pcysta->v2.slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le32_to_cpu(pcysta->v2.slot_cnt[CXST_B1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
+ le16_to_cpu(pcysta->v2.cycles));
+ } else if (ver->fcxcysta == 3) {
+ if (le16_to_cpu(pcysta->v3.cycles) < BTC_CYSTA_CHK_PERIOD)
+ break;
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_B1]);
- _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles);
- } else if (rpt_type == BTC_RPT_TYPE_CYSTA && pcysta_v1 &&
- le16_to_cpu(pcysta_v1->cycles) >= BTC_CYSTA_CHK_PERIOD) {
- cnt_leak_slot = le32_to_cpu(pcysta_v1->slot_cnt[CXST_LK]);
- cnt_rx_imr = le32_to_cpu(pcysta_v1->leak_slot.cnt_rximr);
- /* Check Leak-AP */
- if (cnt_leak_slot != 0 && cnt_rx_imr != 0 &&
- dm->tdma_now.rxflctrl) {
- if (cnt_leak_slot < BTC_LEAK_AP_TH * cnt_rx_imr)
- dm->leak_ap = 1;
- }
+ cnt_leak_slot = le32_to_cpu(pcysta->v3.slot_cnt[CXST_LK]);
+ cnt_rx_imr = le32_to_cpu(pcysta->v3.leak_slot.cnt_rximr);
- /* Check diff time between real WL slot and W1 slot */
- if (dm->tdma_now.type == CXTDMA_OFF) {
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
- wl_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_WL]);
- if (wl_slot_real > wl_slot_set) {
- diff_t = wl_slot_real - wl_slot_set;
- _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ /* Check Leak-AP */
+ if (cnt_leak_slot != 0 && cnt_rx_imr != 0 &&
+ dm->tdma_now.rxflctrl) {
+ if (cnt_leak_slot < BTC_LEAK_AP_TH * cnt_rx_imr)
+ dm->leak_ap = 1;
}
- }
- /* Check diff time between real BT slot and EBT/E5G slot */
- if (dm->tdma_now.type == CXTDMA_OFF &&
- dm->tdma_now.ext_ctrl == CXECTL_EXT &&
- btc->bt_req_len != 0) {
- bt_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_BT]);
+ /* Check diff time between real WL slot and W1 slot */
+ if (dm->tdma_now.type == CXTDMA_OFF) {
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ wl_slot_real = le16_to_cpu(pcysta->v3.cycle_time.tavg[CXT_WL]);
+ if (wl_slot_real > wl_slot_set) {
+ diff_t = wl_slot_real - wl_slot_set;
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
+ }
- if (btc->bt_req_len > bt_slot_real) {
- diff_t = btc->bt_req_len - bt_slot_real;
- _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, diff_t);
+ /* Check diff time between real BT slot and EBT/E5G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT &&
+ btc->bt_req_len != 0) {
+ bt_slot_real = le16_to_cpu(pcysta->v3.cycle_time.tavg[CXT_BT]);
+ if (btc->bt_req_len > bt_slot_real) {
+ diff_t = btc->bt_req_len - bt_slot_real;
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, diff_t);
+ }
}
- }
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
- le32_to_cpu(pcysta_v1->slot_cnt[CXST_W1]));
- _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE,
- le32_to_cpu(pcysta_v1->slot_cnt[CXST_B1]));
- _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
- (u32)le16_to_cpu(pcysta_v1->cycles));
- }
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le32_to_cpu(pcysta->v3.slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE,
+ le32_to_cpu(pcysta->v3.slot_cnt[CXST_B1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
+ le16_to_cpu(pcysta->v3.cycles));
+ } else if (ver->fcxcysta == 4) {
+ if (le16_to_cpu(pcysta->v4.cycles) < BTC_CYSTA_CHK_PERIOD)
+ break;
- if (rpt_type == BTC_RPT_TYPE_CTRL && chip->chip_id == RTL8852A) {
- prpt = &pfwinfo->rpt_ctrl.finfo;
- btc->fwinfo.rpt_en_map = prpt->rpt_enable;
- wl->ver_info.fw_coex = prpt->wl_fw_coex_ver;
- wl->ver_info.fw = prpt->wl_fw_ver;
- dm->wl_fw_cx_offload = !!prpt->wl_fw_cx_offload;
+ cnt_leak_slot = le16_to_cpu(pcysta->v4.slot_cnt[CXST_LK]);
+ cnt_rx_imr = le32_to_cpu(pcysta->v4.leak_slot.cnt_rximr);
- _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
- pfwinfo->event[BTF_EVNT_RPT]);
+ /* Check Leak-AP */
+ if (cnt_leak_slot != 0 && cnt_rx_imr != 0 &&
+ dm->tdma_now.rxflctrl) {
+ if (cnt_leak_slot < BTC_LEAK_AP_TH * cnt_rx_imr)
+ dm->leak_ap = 1;
+ }
- /* To avoid I/O if WL LPS or power-off */
- if (wl->status.map.lps != BTC_LPS_RF_OFF && !wl->status.map.rf_off) {
- rtwdev->chip->ops->btc_update_bt_cnt(rtwdev);
- _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+ /* Check diff time between real WL slot and W1 slot */
+ if (dm->tdma_now.type == CXTDMA_OFF) {
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ wl_slot_real = le16_to_cpu(pcysta->v4.cycle_time.tavg[CXT_WL]);
+ if (wl_slot_real > wl_slot_set) {
+ diff_t = wl_slot_real - wl_slot_set;
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
+ }
- btc->cx.cnt_bt[BTC_BCNT_POLUT] =
- rtw89_mac_get_plt_cnt(rtwdev, RTW89_MAC_0);
- }
- } else if (rpt_type == BTC_RPT_TYPE_CTRL) {
- prpt_v1 = &pfwinfo->rpt_ctrl.finfo_v1;
- btc->fwinfo.rpt_en_map = le32_to_cpu(prpt_v1->rpt_info.en);
- wl->ver_info.fw_coex = le32_to_cpu(prpt_v1->wl_fw_info.cx_ver);
- wl->ver_info.fw = le32_to_cpu(prpt_v1->wl_fw_info.fw_ver);
- dm->wl_fw_cx_offload = !!le32_to_cpu(prpt_v1->wl_fw_info.cx_offload);
-
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
- memcpy(&dm->gnt.band[i], &prpt_v1->gnt_val[i],
- sizeof(dm->gnt.band[i]));
-
- btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_TX]);
- btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_RX]);
- btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_TX]);
- btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_RX]);
- btc->cx.cnt_bt[BTC_BCNT_POLUT] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_POLLUTED]);
-
- _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
- _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
- pfwinfo->event[BTF_EVNT_RPT]);
-
- if (le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
- bt->rfk_info.map.timeout = 1;
- else
- bt->rfk_info.map.timeout = 0;
+ /* Check diff time between real BT slot and EBT/E5G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT &&
+ btc->bt_req_len != 0) {
+ bt_slot_real = le16_to_cpu(pcysta->v4.cycle_time.tavg[CXT_BT]);
- dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
- }
+ if (btc->bt_req_len > bt_slot_real) {
+ diff_t = btc->bt_req_len - bt_slot_real;
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, diff_t);
+ }
+ }
- if (rpt_type >= BTC_RPT_TYPE_BT_VER &&
- rpt_type <= BTC_RPT_TYPE_BT_DEVICE)
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le16_to_cpu(pcysta->v4.slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE,
+ le16_to_cpu(pcysta->v4.slot_cnt[CXST_B1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
+ le16_to_cpu(pcysta->v4.cycles));
+ } else {
+ goto err;
+ }
+ break;
+ case BTC_RPT_TYPE_BT_VER:
+ case BTC_RPT_TYPE_BT_SCAN:
+ case BTC_RPT_TYPE_BT_AFH:
+ case BTC_RPT_TYPE_BT_DEVICE:
_update_bt_report(rtwdev, rpt_type, pfinfo);
-
+ break;
+ }
return (rpt_len + BTC_RPT_HDR_SIZE);
+
+err:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): Undefined version for type=%d\n", __func__, rpt_type);
+ return 0;
}
static void _parse_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *pbuf, u32 buf_len)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
struct rtw89_btc_prpt *btc_prpt = NULL;
u32 index = 0, rpt_len = 0;
@@ -1340,7 +1380,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
while (pbuf) {
btc_prpt = (struct rtw89_btc_prpt *)&pbuf[index];
- if (index + 2 >= chip->btc_fwinfo_buf)
+ if (index + 2 >= ver->info_buf)
break;
/* At least 3 bytes: type(1) & len(2) */
rpt_len = le16_to_cpu(btc_prpt->len);
@@ -1358,12 +1398,12 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
static void _append_tdma(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_btf_tlv *tlv;
struct rtw89_btc_fbtc_tdma *v;
- struct rtw89_btc_fbtc_tdma_v1 *v1;
+ struct rtw89_btc_fbtc_tdma_v3 *v3;
u16 len = btc->policy_len;
if (!btc->update_policy_force &&
@@ -1376,17 +1416,17 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
tlv = (struct rtw89_btc_btf_tlv *)&btc->policy[len];
tlv->type = CXPOLICY_TDMA;
- if (chip->chip_id == RTL8852A) {
+ if (ver->fcxtdma == 1) {
v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
tlv->len = sizeof(*v);
memcpy(v, &dm->tdma, sizeof(*v));
- btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
} else {
- tlv->len = sizeof(*v1);
- v1 = (struct rtw89_btc_fbtc_tdma_v1 *)&tlv->val[0];
- v1->fver = chip->fcxtdma_ver;
- v1->tdma = dm->tdma;
- btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v1);
+ tlv->len = sizeof(*v3);
+ v3 = (struct rtw89_btc_fbtc_tdma_v3 *)&tlv->val[0];
+ v3->fver = ver->fcxtdma;
+ memcpy(&v3->tdma, &dm->tdma, sizeof(v3->tdma));
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v3);
}
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -1441,22 +1481,169 @@ static void _append_slot(struct rtw89_dev *rtwdev)
__func__, cnt);
}
+static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ u32 bit_map = 0;
+
+ switch (rpt_map) {
+ case RPT_EN_TDMA:
+ bit_map = BIT(0);
+ break;
+ case RPT_EN_CYCLE:
+ bit_map = BIT(1);
+ break;
+ case RPT_EN_MREG:
+ bit_map = BIT(2);
+ break;
+ case RPT_EN_BT_VER_INFO:
+ bit_map = BIT(3);
+ break;
+ case RPT_EN_BT_SCAN_INFO:
+ bit_map = BIT(4);
+ break;
+ case RPT_EN_BT_DEVICE_INFO:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ case 2:
+ bit_map = BIT(6);
+ break;
+ case 3:
+ bit_map = BIT(5);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_BT_AFH_MAP:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ case 2:
+ bit_map = BIT(5);
+ break;
+ case 3:
+ bit_map = BIT(6);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_BT_AFH_MAP_LE:
+ switch (ver->frptmap) {
+ case 2:
+ bit_map = BIT(8);
+ break;
+ case 3:
+ bit_map = BIT(7);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_FW_STEP_INFO:
+ switch (ver->frptmap) {
+ case 1:
+ case 2:
+ bit_map = BIT(7);
+ break;
+ case 3:
+ bit_map = BIT(8);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_TEST:
+ bit_map = BIT(31);
+ break;
+ case RPT_EN_WL_ALL:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ case 2:
+ bit_map = GENMASK(2, 0);
+ break;
+ case 3:
+ bit_map = GENMASK(2, 0) | BIT(8);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_BT_ALL:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ bit_map = GENMASK(6, 3);
+ break;
+ case 2:
+ bit_map = GENMASK(6, 3) | BIT(8);
+ break;
+ case 3:
+ bit_map = GENMASK(7, 3);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_ALL:
+ switch (ver->frptmap) {
+ case 0:
+ bit_map = GENMASK(6, 0);
+ break;
+ case 1:
+ bit_map = GENMASK(7, 0);
+ break;
+ case 2:
+ case 3:
+ bit_map = GENMASK(8, 0);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_MONITER:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ bit_map = GENMASK(6, 2);
+ break;
+ case 2:
+ bit_map = GENMASK(6, 2) | BIT(8);
+ break;
+ case 3:
+ bit_map = GENMASK(8, 2);
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return bit_map;
+}
+
static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
u32 rpt_map, bool rpt_state)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo;
struct rtw89_btc_btf_set_report r = {0};
- u32 val = 0;
+ u32 val, bit_map;
+
+ bit_map = rtw89_btc_fw_rpt_ver(rtwdev, rpt_map);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): rpt_map=%x, rpt_state=%x\n",
__func__, rpt_map, rpt_state);
if (rpt_state)
- val = fwinfo->rpt_en_map | rpt_map;
+ val = fwinfo->rpt_en_map | bit_map;
else
- val = fwinfo->rpt_en_map & ~rpt_map;
+ val = fwinfo->rpt_en_map & ~bit_map;
if (val == fwinfo->rpt_en_map)
return;
@@ -1593,16 +1780,17 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
switch (type) {
case CXDRVINFO_INIT:
rtw89_fw_h2c_cxdrv_init(rtwdev);
break;
case CXDRVINFO_ROLE:
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
rtw89_fw_h2c_cxdrv_role(rtwdev);
- else
+ else if (ver->fwlrole == 1)
rtw89_fw_h2c_cxdrv_role_v1(rtwdev);
break;
case CXDRVINFO_CTRL:
@@ -1892,6 +2080,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *b = &bt->link_info;
@@ -1905,7 +2094,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
if (btc->ctrl.manual || wl->status.map.scan)
return;
- if (chip->chip_id == RTL8852A) {
+ if (ver->fwlrole == 0) {
mode = wl_rinfo->link_mode;
connect_cnt = wl_rinfo->connect_cnt;
} else {
@@ -1924,13 +2113,13 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
- if (chip->chip_id == RTL8852A &&
+ if (ver->fwlrole == 0 &&
(r->role == RTW89_WIFI_ROLE_P2P_GO ||
r->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
ch = r->ch;
bw = r->bw;
break;
- } else if (chip->chip_id != RTL8852A &&
+ } else if (ver->fwlrole == 1 &&
(r1->role == RTW89_WIFI_ROLE_P2P_GO ||
r1->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
ch = r1->ch;
@@ -1945,12 +2134,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
- if (chip->chip_id == RTL8852A &&
+ if (ver->fwlrole == 0 &&
r->connected && r->band == RTW89_BAND_2G) {
ch = r->ch;
bw = r->bw;
break;
- } else if (chip->chip_id != RTL8852A &&
+ } else if (ver->fwlrole == 1 &&
r1->connected && r1->band == RTW89_BAND_2G) {
ch = r1->ch;
bw = r1->bw;
@@ -2517,15 +2706,16 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
break;
case BTC_CXP_OFF_EQ0:
_slot_set_tbl(btc, CXST_OFF, cxtbl[0]);
+ _slot_set_type(btc, CXST_OFF, SLOT_ISO);
break;
case BTC_CXP_OFF_EQ1:
_slot_set_tbl(btc, CXST_OFF, cxtbl[16]);
break;
case BTC_CXP_OFF_EQ2:
- _slot_set_tbl(btc, CXST_OFF, cxtbl[17]);
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[0]);
break;
case BTC_CXP_OFF_EQ3:
- _slot_set_tbl(btc, CXST_OFF, cxtbl[18]);
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[24]);
break;
case BTC_CXP_OFF_BWB0:
_slot_set_tbl(btc, CXST_OFF, cxtbl[5]);
@@ -2581,6 +2771,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
default:
break;
}
+ s[CXST_OFF] = s_def[CXST_OFF];
break;
case BTC_CXP_FIX: /* TDMA Fix-Slot */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
@@ -2607,6 +2798,10 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
_slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_ISO);
_slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
break;
+ case BTC_CXP_FIX_TD4020:
+ _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_MIX);
+ _slot_set(btc, CXST_B1, 20, tbl_b1, SLOT_MIX);
+ break;
case BTC_CXP_FIX_TD7010:
_slot_set(btc, CXST_W1, 70, tbl_w1, SLOT_ISO);
_slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
@@ -3398,8 +3593,8 @@ static void _action_wl_rfk(struct rtw89_dev *rtwdev)
static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
@@ -3410,7 +3605,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
if (btc->ctrl.manual)
return;
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -3503,8 +3698,8 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
@@ -3524,7 +3719,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
if (btc->ctrl.manual)
return;
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -3572,8 +3767,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
@@ -3581,7 +3776,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
bool bt_hi_lna_rx = false;
u8 mode;
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -3623,6 +3818,7 @@ static void _action_common(struct rtw89_dev *rtwdev)
wl->scbd_change = false;
btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
}
+ btc->dm.tdma_instant_excute = 0;
}
static void _action_by_bt(struct rtw89_dev *rtwdev)
@@ -3651,7 +3847,7 @@ static void _action_by_bt(struct rtw89_dev *rtwdev)
case BTC_BT_NOPROFILE:
if (_check_freerun(rtwdev))
_action_freerun(rtwdev);
- else if (a2dp.active || pan.active)
+ else if (pan.active)
_action_bt_pan(rtwdev);
else
_action_bt_idle(rtwdev);
@@ -4406,7 +4602,7 @@ static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update)
bt->whql_test = !!(val & BTC_BSCB_WHQL);
bt->btg_type = val & BTC_BSCB_BT_S1 ? BTC_BT_BTG : BTC_BT_ALONE;
- bt->link_info.a2dp_desc.active = !!(val & BTC_BSCB_A2DP_ACT);
+ bt->link_info.a2dp_desc.exist = !!(val & BTC_BSCB_A2DP_ACT);
/* if rfk run 1->0 */
if (bt->rfk_info.map.run && !(val & BTC_BSCB_RFK_RUN))
@@ -4445,8 +4641,8 @@ static bool _chk_wl_rfk_request(struct rtw89_dev *rtwdev)
static
void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
@@ -4461,7 +4657,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_update_dm_step(rtwdev, reason);
_update_btc_state_map(rtwdev);
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -4567,6 +4763,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_nc(rtwdev);
break;
case BTC_WLINK_2G_STA:
+ if (wl->status.map.traffic_dir & BIT(RTW89_TFC_DL))
+ bt->scan_rx_low_pri = true;
_action_wl_2g_sta(rtwdev);
break;
case BTC_WLINK_2G_AP:
@@ -4583,9 +4781,9 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
break;
case BTC_WLINK_2G_SCC:
bt->scan_rx_low_pri = true;
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
_action_wl_2g_scc(rtwdev);
- else if (chip->chip_id == RTL8852C)
+ else if (ver->fwlrole == 1)
_action_wl_2g_scc_v1(rtwdev);
break;
case BTC_WLINK_2G_MCC:
@@ -4690,7 +4888,7 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
_write_scbd(rtwdev,
BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG, true);
_update_bt_scbd(rtwdev, true);
- if (rtw89_mac_get_ctrl_path(rtwdev) && chip->chip_id == RTL8852A) {
+ if (rtw89_mac_get_ctrl_path(rtwdev)) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): PTA owner warning!!\n",
__func__);
@@ -5000,11 +5198,6 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
a2dp->sink = btinfo.hb3.a2dp_sink;
- if (b->profile_cnt.now || b->status.map.ble_connect)
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, 1);
- else
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, 0);
-
if (!a2dp->exist_last && a2dp->exist) {
a2dp->vendor_id = 0;
a2dp->flush_time = 0;
@@ -5014,12 +5207,6 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
RTW89_COEX_BT_DEVINFO_WORK_PERIOD);
}
- if (a2dp->exist && (a2dp->flush_time == 0 || a2dp->vendor_id == 0 ||
- a2dp->play_latency == 1))
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, 1);
- else
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, 0);
-
_run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO);
}
@@ -5034,10 +5221,10 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
struct rtw89_sta *rtwsta, enum btc_role_state state)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_link_info r = {0};
struct rtw89_btc_wl_link_info *wlinfo = NULL;
@@ -5101,7 +5288,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
wlinfo = &wl->link_info[r.pid];
memcpy(wlinfo, &r, sizeof(*wlinfo));
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
_update_wl_info(rtwdev);
else
_update_wl_info_v1(rtwdev);
@@ -5151,9 +5338,7 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
}
if (rf_state == BTC_RFCTRL_WL_ON) {
- btc->dm.cnt_dm[BTC_DCNT_BTCNT_FREEZE] = 0;
- rtw89_btc_fw_en_rpt(rtwdev,
- RPT_EN_MREG | RPT_EN_BT_VER_INFO, true);
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, true);
val = BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG;
_write_scbd(rtwdev, val, true);
_update_bt_scbd(rtwdev, true);
@@ -5164,6 +5349,13 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
_write_scbd(rtwdev, BTC_WSCB_ALL, false);
}
+ btc->dm.cnt_dm[BTC_DCNT_BTCNT_FREEZE] = 0;
+ if (wl->status.map.lps_pre == BTC_LPS_OFF &&
+ wl->status.map.lps_pre != wl->status.map.lps)
+ btc->dm.tdma_instant_excute = 1;
+ else
+ btc->dm.tdma_instant_excute = 0;
+
_run_coex(rtwdev, BTC_RSN_NTFY_RADIO_STATE);
wl->status.map.rf_off_pre = wl->status.map.rf_off;
@@ -5618,8 +5810,8 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
@@ -5631,7 +5823,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_puts(m, "========== [WL Status] ==========\n");
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -5714,12 +5906,14 @@ static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_module *module = &btc->mdinfo;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
u8 *afh = bt_linfo->afh_map;
+ u8 *afh_le = bt_linfo->afh_map_le;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT))
return;
@@ -5769,6 +5963,12 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
afh[0], afh[1], afh[2], afh[3], afh[4],
afh[5], afh[6], afh[7], afh[8], afh[9]);
+ if (ver->fcxbtafh == 2 && bt_linfo->status.map.ble_connect)
+ seq_printf(m,
+ "LE[%02x%02x_%02x_%02x%02x]",
+ afh_le[0], afh_le[1], afh_le[2],
+ afh_le[3], afh_le[4]);
+
seq_printf(m, "wl_ch_map[en:%d/ch:%d/bw:%d]\n",
wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw);
@@ -5800,6 +6000,29 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
"[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX],
cx->cnt_bt[BTC_BCNT_HIPRI_TX], cx->cnt_bt[BTC_BCNT_LOPRI_RX],
cx->cnt_bt[BTC_BCNT_LOPRI_TX], cx->cnt_bt[BTC_BCNT_POLUT]);
+
+ if (bt->enable.now && bt->ver_info.fw == 0)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, false);
+
+ if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, false);
+
+ if (ver->fcxbtafh == 2 && bt_linfo->status.map.ble_connect)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP_LE, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP_LE, false);
+
+ if (bt_linfo->a2dp_desc.exist &&
+ (bt_linfo->a2dp_desc.flush_time == 0 ||
+ bt_linfo->a2dp_desc.vendor_id == 0 ||
+ bt_linfo->a2dp_desc.play_latency == 1))
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, false);
}
#define CASE_BTC_RSN_STR(e) case BTC_RSN_ ## e: return #e
@@ -5807,6 +6030,7 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
#define CASE_BTC_POLICY_STR(e) \
case BTC_CXP_ ## e | BTC_POLICY_EXT_BIT: return #e
#define CASE_BTC_SLOT_STR(e) case CXST_ ## e: return #e
+#define CASE_BTC_EVT_STR(e) case CXEVNT_## e: return #e
static const char *steps_to_str(u16 step)
{
@@ -5947,6 +6171,40 @@ static const char *id_to_slot(u32 id)
}
}
+static const char *id_to_evt(u32 id)
+{
+ switch (id) {
+ CASE_BTC_EVT_STR(TDMA_ENTRY);
+ CASE_BTC_EVT_STR(WL_TMR);
+ CASE_BTC_EVT_STR(B1_TMR);
+ CASE_BTC_EVT_STR(B2_TMR);
+ CASE_BTC_EVT_STR(B3_TMR);
+ CASE_BTC_EVT_STR(B4_TMR);
+ CASE_BTC_EVT_STR(W2B_TMR);
+ CASE_BTC_EVT_STR(B2W_TMR);
+ CASE_BTC_EVT_STR(BCN_EARLY);
+ CASE_BTC_EVT_STR(A2DP_EMPTY);
+ CASE_BTC_EVT_STR(LK_END);
+ CASE_BTC_EVT_STR(RX_ISR);
+ CASE_BTC_EVT_STR(RX_FC0);
+ CASE_BTC_EVT_STR(RX_FC1);
+ CASE_BTC_EVT_STR(BT_RELINK);
+ CASE_BTC_EVT_STR(BT_RETRY);
+ CASE_BTC_EVT_STR(E2G);
+ CASE_BTC_EVT_STR(E5G);
+ CASE_BTC_EVT_STR(EBT);
+ CASE_BTC_EVT_STR(ENULL);
+ CASE_BTC_EVT_STR(DRV_WLK);
+ CASE_BTC_EVT_STR(BCN_OK);
+ CASE_BTC_EVT_STR(BT_CHANGE);
+ CASE_BTC_EVT_STR(EBT_EXTEND);
+ CASE_BTC_EVT_STR(E2G_NULL1);
+ CASE_BTC_EVT_STR(B1FDD_TMR);
+ default:
+ return "unknown";
+ }
+}
+
static
void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data,
u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
@@ -6045,21 +6303,27 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_fbtc_cysta *pcysta;
- struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1;
+ union rtw89_btc_fbtc_cysta_info *pcysta;
u32 except_cnt, exception_map;
- if (chip->chip_id == RTL8852A) {
- pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
- except_cnt = le32_to_cpu(pcysta->except_cnt);
- exception_map = le32_to_cpu(pcysta->exception);
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ if (ver->fcxcysta == 2) {
+ pcysta->v2 = pfwinfo->rpt_fbtc_cysta.finfo.v2;
+ except_cnt = le32_to_cpu(pcysta->v2.except_cnt);
+ exception_map = le32_to_cpu(pcysta->v2.exception);
+ } else if (ver->fcxcysta == 3) {
+ pcysta->v3 = pfwinfo->rpt_fbtc_cysta.finfo.v3;
+ except_cnt = le32_to_cpu(pcysta->v3.except_cnt);
+ exception_map = le32_to_cpu(pcysta->v3.except_map);
+ } else if (ver->fcxcysta == 4) {
+ pcysta->v4 = pfwinfo->rpt_fbtc_cysta.finfo.v4;
+ except_cnt = pcysta->v4.except_cnt;
+ exception_map = le32_to_cpu(pcysta->v4.except_map);
} else {
- pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
- except_cnt = le32_to_cpu(pcysta_v1->except_cnt);
- exception_map = le32_to_cpu(pcysta_v1->except_map);
+ return;
}
if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 && except_cnt == 0 &&
@@ -6097,23 +6361,20 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_tdma *t = NULL;
- struct rtw89_btc_fbtc_slot *s = NULL;
- struct rtw89_btc_dm *dm = &btc->dm;
- u8 i, cnt = 0;
pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
if (!pcinfo->valid)
return;
- if (chip->chip_id == RTL8852A)
- t = &pfwinfo->rpt_fbtc_tdma.finfo;
+ if (ver->fcxtdma == 1)
+ t = &pfwinfo->rpt_fbtc_tdma.finfo.v1;
else
- t = &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma;
+ t = &pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma;
seq_printf(m,
" %-15s : ", "[tdma_policy]");
@@ -6130,75 +6391,43 @@ static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
"policy_type:%d",
(u32)btc->policy_type);
- s = pfwinfo->rpt_fbtc_slots.finfo.slot;
-
- for (i = 0; i < CXST_MAX; i++) {
- if (dm->update_slot_map == BIT(CXST_MAX) - 1)
- break;
-
- if (!(dm->update_slot_map & BIT(i)))
- continue;
-
- if (cnt % 6 == 0)
- seq_printf(m,
- " %-15s : %d[%d/0x%x/%d]",
- "[slot_policy]",
- (u32)i,
- s[i].dur, s[i].cxtbl, s[i].cxtype);
- else
- seq_printf(m,
- ", %d[%d/0x%x/%d]",
- (u32)i,
- s[i].dur, s[i].cxtbl, s[i].cxtype);
- if (cnt % 6 == 5)
- seq_puts(m, "\n");
- cnt++;
- }
seq_puts(m, "\n");
}
static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_slots *pslots = NULL;
- struct rtw89_btc_fbtc_slot s;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_slot *s;
u8 i = 0;
- pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
- if (!pcinfo->valid)
- return;
-
- pslots = &pfwinfo->rpt_fbtc_slots.finfo;
-
for (i = 0; i < CXST_MAX; i++) {
- s = pslots->slot[i];
- if (i % 6 == 0)
+ s = &dm->slot_now[i];
+ if (i % 5 == 0)
seq_printf(m,
- " %-15s : %02d[%03d/0x%x/%d]",
+ " %-15s : %5s[%03d/0x%x/%d]",
"[slot_list]",
- (u32)i,
- s.dur, s.cxtbl, s.cxtype);
+ id_to_slot((u32)i),
+ s->dur, s->cxtbl, s->cxtype);
else
seq_printf(m,
- ", %02d[%03d/0x%x/%d]",
- (u32)i,
- s.dur, s.cxtbl, s.cxtype);
- if (i % 6 == 5)
+ ", %5s[%03d/0x%x/%d]",
+ id_to_slot((u32)i),
+ s->dur, s->cxtbl, s->cxtype);
+ if (i % 5 == 4)
seq_puts(m, "\n");
}
+ seq_puts(m, "\n");
}
-static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL;
- struct rtw89_btc_fbtc_cysta_cpu pcysta[1];
+ struct rtw89_btc_fbtc_cysta_v2 *pcysta_le32 = NULL;
union rtw89_btc_fbtc_rxflct r;
u8 i, cnt = 0, slot_pair;
u16 cycle, c_begin, c_end, store_index;
@@ -6207,65 +6436,66 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
- rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
+ pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo.v2;
seq_printf(m,
" %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]", pcysta->cycles, pcysta->bcn_cnt[CXBCN_ALL],
- pcysta->bcn_cnt[CXBCN_ALL_OK],
- pcysta->bcn_cnt[CXBCN_BT_SLOT],
- pcysta->bcn_cnt[CXBCN_BT_OK]);
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta_le32->cycles),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL_OK]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_SLOT]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
- if (!pcysta->slot_cnt[i])
+ if (!le32_to_cpu(pcysta_le32->slot_cnt[i]))
continue;
- seq_printf(m,
- ", %d:%d", (u32)i, pcysta->slot_cnt[i]);
+ seq_printf(m, ", %s:%d", id_to_slot((u32)i),
+ le32_to_cpu(pcysta_le32->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl) {
- seq_printf(m,
- ", leak_rx:%d", pcysta->leakrx_cnt);
+ seq_printf(m, ", leak_rx:%d",
+ le32_to_cpu(pcysta_le32->leakrx_cnt));
}
- if (pcysta->collision_cnt) {
- seq_printf(m,
- ", collision:%d", pcysta->collision_cnt);
+ if (le32_to_cpu(pcysta_le32->collision_cnt)) {
+ seq_printf(m, ", collision:%d",
+ le32_to_cpu(pcysta_le32->collision_cnt));
}
- if (pcysta->skip_cnt) {
- seq_printf(m,
- ", skip:%d", pcysta->skip_cnt);
+ if (le32_to_cpu(pcysta_le32->skip_cnt)) {
+ seq_printf(m, ", skip:%d",
+ le32_to_cpu(pcysta_le32->skip_cnt));
}
seq_puts(m, "\n");
seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
"[cycle_time]",
- pcysta->tavg_cycle[CXT_WL],
- pcysta->tavg_cycle[CXT_BT],
- pcysta->tavg_lk / 1000, pcysta->tavg_lk % 1000);
- seq_printf(m,
- ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
- pcysta->tmax_cycle[CXT_WL],
- pcysta->tmax_cycle[CXT_BT],
- pcysta->tmax_lk / 1000, pcysta->tmax_lk % 1000);
- seq_printf(m,
- ", maxdiff_t[wl:%d/bt:%d]\n",
- pcysta->tmaxdiff_cycle[CXT_WL],
- pcysta->tmaxdiff_cycle[CXT_BT]);
-
- if (pcysta->cycles == 0)
+ le16_to_cpu(pcysta_le32->tavg_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tavg_cycle[CXT_BT]),
+ le16_to_cpu(pcysta_le32->tavg_lk) / 1000,
+ le16_to_cpu(pcysta_le32->tavg_lk) % 1000);
+ seq_printf(m, ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta_le32->tmax_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tmax_cycle[CXT_BT]),
+ le16_to_cpu(pcysta_le32->tmax_lk) / 1000,
+ le16_to_cpu(pcysta_le32->tmax_lk) % 1000);
+ seq_printf(m, ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_BT]));
+
+ if (le16_to_cpu(pcysta_le32->cycles) <= 1)
return;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
- if (pcysta->cycles <= slot_pair)
+ if (le16_to_cpu(pcysta_le32->cycles) <= slot_pair)
c_begin = 1;
else
- c_begin = pcysta->cycles - slot_pair + 1;
+ c_begin = le16_to_cpu(pcysta_le32->cycles) - slot_pair + 1;
- c_end = pcysta->cycles;
+ c_end = le16_to_cpu(pcysta_le32->cycles);
for (cycle = c_begin; cycle <= c_end; cycle++) {
cnt++;
@@ -6274,13 +6504,13 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 1)
seq_printf(m,
" %-15s : ->b%02d->w%02d", "[cycle_step]",
- pcysta->tslot_cycle[store_index],
- pcysta->tslot_cycle[store_index + 1]);
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
else
seq_printf(m,
"->b%02d->w%02d",
- pcysta->tslot_cycle[store_index],
- pcysta->tslot_cycle[store_index + 1]);
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
seq_puts(m, "\n");
}
@@ -6289,41 +6519,43 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m,
" %-15s : a2dp_ept:%d, a2dp_late:%d",
"[a2dp_t_sta]",
- pcysta->a2dpept, pcysta->a2dpeptto);
+ le16_to_cpu(pcysta_le32->a2dpept),
+ le16_to_cpu(pcysta_le32->a2dpeptto));
seq_printf(m,
", avg_t:%d, max_t:%d",
- pcysta->tavg_a2dpept, pcysta->tmax_a2dpept);
+ le16_to_cpu(pcysta_le32->tavg_a2dpept),
+ le16_to_cpu(pcysta_le32->tmax_a2dpept));
r.val = dm->tdma_now.rxflctrl;
if (r.type && r.tgln_n) {
seq_printf(m,
", cycle[PSTDMA:%d/TDMA:%d], ",
- pcysta->cycles_a2dp[CXT_FLCTRL_ON],
- pcysta->cycles_a2dp[CXT_FLCTRL_OFF]);
+ le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_OFF]));
seq_printf(m,
"avg_t[PSTDMA:%d/TDMA:%d], ",
- pcysta->tavg_a2dp[CXT_FLCTRL_ON],
- pcysta->tavg_a2dp[CXT_FLCTRL_OFF]);
+ le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_OFF]));
seq_printf(m,
"max_t[PSTDMA:%d/TDMA:%d]",
- pcysta->tmax_a2dp[CXT_FLCTRL_ON],
- pcysta->tmax_a2dp[CXT_FLCTRL_OFF]);
+ le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_OFF]));
}
seq_puts(m, "\n");
}
}
-static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_fbtc_a2dp_trx_stat *a2dp_trx;
- struct rtw89_btc_fbtc_cysta_v1 *pcysta;
+ struct rtw89_btc_fbtc_cysta_v3 *pcysta;
struct rtw89_btc_rpt_cmn_info *pcinfo;
u8 i, cnt = 0, slot_pair, divide_cnt;
u16 cycle, c_begin, c_end, store_index;
@@ -6332,7 +6564,7 @@ static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- pcysta = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v3;
seq_printf(m,
" %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
"[cycle_cnt]",
@@ -6379,7 +6611,7 @@ static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
cycle = le16_to_cpu(pcysta->cycles);
- if (cycle == 0)
+ if (cycle <= 1)
return;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
@@ -6401,40 +6633,171 @@ static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt++;
store_index = ((cycle - 1) % slot_pair) * 2;
- if (cnt % divide_cnt == 1) {
- seq_printf(m, "\n\r %-15s : ", "[cycle_step]");
- } else {
- seq_printf(m, "->b%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index]));
- if (a2dp->exist) {
- a2dp_trx = &pcysta->a2dp_trx[store_index];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
- }
- seq_printf(m, "->w%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
- if (a2dp->exist) {
- a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
- }
+ if (cnt % divide_cnt == 1)
+ seq_printf(m, " %-15s : ", "[cycle_step]");
+
+ seq_printf(m, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
- if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
+ seq_printf(m, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ if (cnt % divide_cnt == 0 || cnt == c_end)
seq_puts(m, "\n");
}
if (a2dp->exist) {
- seq_printf(m, "%-15s : a2dp_ept:%d, a2dp_late:%d",
+ seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+
+ seq_printf(m, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
+
+ seq_puts(m, "\n");
+ }
+}
+
+static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_a2dp_trx_stat_v4 *a2dp_trx;
+ struct rtw89_btc_fbtc_cysta_v4 *pcysta;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ u8 i, cnt = 0, slot_pair, divide_cnt;
+ u16 cycle, c_begin, c_end, store_index;
+
+ pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v4;
+ seq_printf(m,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!le16_to_cpu(pcysta->slot_cnt[i]))
+ continue;
+
+ seq_printf(m, ", %s:%d", id_to_slot(i),
+ le16_to_cpu(pcysta->slot_cnt[i]));
+ }
+
+ if (dm->tdma_now.rxflctrl)
+ seq_printf(m, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+
+ if (pcysta->collision_cnt)
+ seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+
+ if (le16_to_cpu(pcysta->skip_cnt))
+ seq_printf(m, ", skip:%d",
+ le16_to_cpu(pcysta->skip_cnt));
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ seq_printf(m,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ seq_printf(m,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
+
+ cycle = le16_to_cpu(pcysta->cycles);
+ if (cycle <= 1)
+ return;
+
+ /* 1 cycle record 1 wl-slot and 1 bt-slot */
+ slot_pair = BTC_CYCLE_SLOT_MAX / 2;
+
+ if (cycle <= slot_pair)
+ c_begin = 1;
+ else
+ c_begin = cycle - slot_pair + 1;
+
+ c_end = cycle;
+
+ if (a2dp->exist)
+ divide_cnt = 3;
+ else
+ divide_cnt = BTC_CYCLE_SLOT_MAX / 4;
+
+ for (cycle = c_begin; cycle <= c_end; cycle++) {
+ cnt++;
+ store_index = ((cycle - 1) % slot_pair) * 2;
+
+ if (cnt % divide_cnt == 1)
+ seq_printf(m, " %-15s : ", "[cycle_step]");
+
+ seq_printf(m, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ seq_printf(m, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ if (cnt % divide_cnt == 0 || cnt == c_end)
+ seq_puts(m, "\n");
+ }
+
+ if (a2dp->exist) {
+ seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
"[a2dp_t_sta]",
le16_to_cpu(pcysta->a2dp_ept.cnt),
le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
@@ -6449,12 +6812,11 @@ static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo;
- struct rtw89_btc_fbtc_cynullsta *ns;
- struct rtw89_btc_fbtc_cynullsta_v1 *ns_v1;
+ union rtw89_btc_fbtc_cynullsta_info *ns;
u8 i = 0;
if (!btc->dm.tdma_now.rxflctrl)
@@ -6464,68 +6826,56 @@ static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- if (chip->chip_id == RTL8852A) {
- ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
-
- seq_printf(m, " %-15s : ", "[null_sta]");
-
+ ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
+ if (ver->fcxnullsta == 1) {
for (i = 0; i < 2; i++) {
- if (i != 0)
- seq_printf(m, ", null-%d", i);
- else
- seq_printf(m, "null-%d", i);
+ seq_printf(m, " %-15s : ", "[NULL-STA]");
+ seq_printf(m, "null-%d", i);
seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns->result[i][1]));
+ le32_to_cpu(ns->v1.result[i][1]));
seq_printf(m, "fail:%d/",
- le32_to_cpu(ns->result[i][0]));
+ le32_to_cpu(ns->v1.result[i][0]));
seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns->result[i][2]));
+ le32_to_cpu(ns->v1.result[i][2]));
seq_printf(m, "retry:%d/",
- le32_to_cpu(ns->result[i][3]));
+ le32_to_cpu(ns->v1.result[i][3]));
seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->avg_t[i]) / 1000,
- le32_to_cpu(ns->avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]",
- le32_to_cpu(ns->max_t[i]) / 1000,
- le32_to_cpu(ns->max_t[i]) % 1000);
+ le32_to_cpu(ns->v1.avg_t[i]) / 1000,
+ le32_to_cpu(ns->v1.avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v1.max_t[i]) / 1000,
+ le32_to_cpu(ns->v1.max_t[i]) % 1000);
}
} else {
- ns_v1 = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
-
- seq_printf(m, " %-15s : ", "[null_sta]");
-
for (i = 0; i < 2; i++) {
- if (i != 0)
- seq_printf(m, ", null-%d", i);
- else
- seq_printf(m, "null-%d", i);
+ seq_printf(m, " %-15s : ", "[NULL-STA]");
+ seq_printf(m, "null-%d", i);
seq_printf(m, "[Tx:%d/",
- le32_to_cpu(ns_v1->result[i][4]));
+ le32_to_cpu(ns->v2.result[i][4]));
seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns_v1->result[i][1]));
+ le32_to_cpu(ns->v2.result[i][1]));
seq_printf(m, "fail:%d/",
- le32_to_cpu(ns_v1->result[i][0]));
+ le32_to_cpu(ns->v2.result[i][0]));
seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns_v1->result[i][2]));
+ le32_to_cpu(ns->v2.result[i][2]));
seq_printf(m, "retry:%d/",
- le32_to_cpu(ns_v1->result[i][3]));
+ le32_to_cpu(ns->v2.result[i][3]));
seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns_v1->avg_t[i]) / 1000,
- le32_to_cpu(ns_v1->avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]",
- le32_to_cpu(ns_v1->max_t[i]) / 1000,
- le32_to_cpu(ns_v1->max_t[i]) % 1000);
+ le32_to_cpu(ns->v2.avg_t[i]) / 1000,
+ le32_to_cpu(ns->v2.avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v2.max_t[i]) / 1000,
+ le32_to_cpu(ns->v2.max_t[i]) % 1000);
}
}
- seq_puts(m, "\n");
}
-static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_steps *pstep = NULL;
+ struct rtw89_btc_fbtc_steps_v2 *pstep = NULL;
u8 type, val, cnt = 0, state = 0;
bool outloop = false;
u16 i, diff_t, n_start = 0, n_stop = 0;
@@ -6535,7 +6885,7 @@ static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- pstep = &pfwinfo->rpt_fbtc_step.finfo;
+ pstep = &pfwinfo->rpt_fbtc_step.finfo.v2;
pos_old = le16_to_cpu(pstep->pos_old);
pos_new = le16_to_cpu(pstep->pos_new);
@@ -6589,10 +6939,67 @@ static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
} while (!outloop);
}
+static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_fbtc_steps_v3 *pstep;
+ u32 i, n_begin, n_end, array_idx, cnt = 0;
+ u8 type, val;
+ u16 diff_t;
+
+ if ((pfwinfo->rpt_en_map &
+ rtw89_btc_fw_rpt_ver(rtwdev, RPT_EN_FW_STEP_INFO)) == 0)
+ return;
+
+ pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pstep = &pfwinfo->rpt_fbtc_step.finfo.v3;
+ if (pcinfo->req_fver != pstep->fver)
+ return;
+
+ if (le32_to_cpu(pstep->cnt) <= FCXDEF_STEP)
+ n_begin = 1;
+ else
+ n_begin = le32_to_cpu(pstep->cnt) - FCXDEF_STEP + 1;
+
+ n_end = le32_to_cpu(pstep->cnt);
+
+ if (n_begin > n_end)
+ return;
+
+ /* restore step info by using ring instead of FIFO */
+ for (i = n_begin; i <= n_end; i++) {
+ array_idx = (i - 1) % FCXDEF_STEP;
+ type = pstep->step[array_idx].type;
+ val = pstep->step[array_idx].val;
+ diff_t = le16_to_cpu(pstep->step[array_idx].difft);
+
+ if (type == CXSTEP_NONE || type >= CXSTEP_MAX)
+ continue;
+
+ if (cnt % 10 == 0)
+ seq_printf(m, " %-15s : ", "[steps]");
+
+ seq_printf(m, "-> %s(%02d)",
+ (type == CXSTEP_SLOT ?
+ id_to_slot((u32)val) :
+ id_to_evt((u32)val)), diff_t);
+
+ if (cnt % 10 == 9)
+ seq_puts(m, "\n");
+
+ cnt++;
+ }
+}
+
static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM))
return;
@@ -6601,13 +7008,20 @@ static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_fbtc_tdma(rtwdev, m);
_show_fbtc_slots(rtwdev, m);
- if (chip->chip_id == RTL8852A)
- _show_fbtc_cysta(rtwdev, m);
- else
- _show_fbtc_cysta_v1(rtwdev, m);
+ if (ver->fcxcysta == 2)
+ _show_fbtc_cysta_v2(rtwdev, m);
+ else if (ver->fcxcysta == 3)
+ _show_fbtc_cysta_v3(rtwdev, m);
+ else if (ver->fcxcysta == 4)
+ _show_fbtc_cysta_v4(rtwdev, m);
_show_fbtc_nullsta(rtwdev, m);
- _show_fbtc_step(rtwdev, m);
+
+ if (ver->fcxstep == 2)
+ _show_fbtc_step_v2(rtwdev, m);
+ else if (ver->fcxstep == 3)
+ _show_fbtc_step_v3(rtwdev, m);
+
}
static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt_cfg)
@@ -6680,10 +7094,7 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
/* To avoid I/O if WL LPS or power-off */
if (!wl->status.map.lps && !wl->status.map.rf_off) {
- if (chip->chip_id == RTL8852A)
- btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
- else if (chip->chip_id == RTL8852C)
- btc->dm.pta_owner = 0;
+ btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
_get_gnt(rtwdev, &gnt_cfg);
gnt = gnt_cfg.band[0];
@@ -6729,6 +7140,9 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
if (cnt % 6 == 5)
seq_puts(m, "\n");
cnt++;
+
+ if (i >= pmreg->reg_num)
+ seq_puts(m, "\n");
}
pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
@@ -6754,12 +7168,12 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_puts(m, "\n");
}
-static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_rpt_ctrl *prptctrl = NULL;
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 *prptctrl = NULL;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
@@ -6774,7 +7188,7 @@ static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
- prptctrl = &pfwinfo->rpt_ctrl.finfo;
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v1;
seq_printf(m,
" %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
@@ -6858,11 +7272,11 @@ static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
-static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_fbtc_rpt_ctrl_v1 *prptctrl;
+ struct rtw89_btc_fbtc_rpt_ctrl_v4 *prptctrl;
struct rtw89_btc_rpt_cmn_info *pcinfo;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -6878,7 +7292,7 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
- prptctrl = &pfwinfo->rpt_ctrl.finfo_v1;
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v4;
seq_printf(m,
" %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
@@ -6970,11 +7384,127 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_fbtc_rpt_ctrl_v5 *prptctrl;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_puts(m, "========== [Statistics] ==========\n");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v5;
+
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
+ "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h));
+
+ seq_printf(m,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ seq_printf(m,
+ ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ seq_printf(m,
+ ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ } else {
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
+ }
+
+ if (!pcinfo->valid || pfwinfo->len_mismch || pfwinfo->fver_mismch ||
+ pfwinfo->err[BTFRE_EXCEPTION]) {
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
+ "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
+ "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
+ pfwinfo->fver_mismch, pfwinfo->err[BTFRE_EXCEPTION],
+ wl->status.map.lps, wl->status.map.rf_off);
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m,
+ "timer=%d, control=%d, customerize=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
+}
+
void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
@@ -7003,8 +7533,41 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_dm_info(rtwdev, m);
_show_fw_dm_msg(rtwdev, m);
_show_mreg(rtwdev, m);
- if (chip->chip_id == RTL8852A)
- _show_summary(rtwdev, m);
- else
+ if (ver->fcxbtcrpt == 1)
_show_summary_v1(rtwdev, m);
+ else if (ver->fcxbtcrpt == 4)
+ _show_summary_v4(rtwdev, m);
+ else if (ver->fcxbtcrpt == 5)
+ _show_summary_v5(rtwdev, m);
+}
+
+void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *btc_ver_def;
+ const struct rtw89_fw_suit *fw_suit;
+ u32 suit_ver_code;
+ int i;
+
+ fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
+ suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
+
+ for (i = 0; i < ARRAY_SIZE(rtw89_btc_ver_defs); i++) {
+ btc_ver_def = &rtw89_btc_ver_defs[i];
+
+ if (chip->chip_id != btc_ver_def->chip_id)
+ continue;
+
+ if (suit_ver_code >= btc_ver_def->fw_ver_code) {
+ btc->ver = btc_ver_def;
+ goto out;
+ }
+ }
+
+ btc->ver = &rtw89_btc_ver_defs[RTW89_DEFAULT_BTC_VER_IDX];
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] use version def[%d] = 0x%08x\n",
+ (int)(btc->ver - rtw89_btc_ver_defs), btc->ver->fw_ver_code);
}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index ca16afa97ec0..401fb55df82b 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -164,6 +164,7 @@ void rtw89_coex_rfk_chk_work(struct work_struct *work);
void rtw89_coex_power_on(struct rtw89_dev *rtwdev);
void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type);
void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type);
+void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 931aff8b5dc9..f09361bc4a4d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -498,7 +498,8 @@ static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u16 lowest_rate;
- if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE || vif->p2p)
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE ||
+ (vif && vif->p2p))
lowest_rate = RTW89_HW_RATE_OFDM6;
else if (chan->band_type == RTW89_BAND_2G)
lowest_rate = RTW89_HW_RATE_CCK1;
@@ -511,6 +512,21 @@ static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
return __ffs(vif->bss_conf.basic_rates) + lowest_rate;
}
+static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta;
+
+ if (!sta)
+ return rtwvif->mac_id;
+
+ rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ return rtwsta->mac_id;
+}
+
static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@@ -527,6 +543,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
desc_info->port = desc_info->hiq ? rtwvif->port : 0;
+ desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE;
@@ -669,27 +686,14 @@ desc_bk:
desc_info->bk = true;
}
-static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
- struct rtw89_core_tx_request *tx_req)
-{
- struct ieee80211_vif *vif = tx_req->vif;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
- struct ieee80211_sta *sta = tx_req->sta;
- struct rtw89_sta *rtwsta;
-
- if (!sta)
- return rtwvif->mac_id;
-
- rtwsta = (struct rtw89_sta *)sta->drv_priv;
- return rtwsta->mac_id;
-}
-
static void
rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct ieee80211_vif *vif = tx_req->vif;
+ struct ieee80211_sta *sta = tx_req->sta;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
@@ -707,6 +711,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
desc_info->qsel = qsel;
desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
desc_info->port = desc_info->hiq ? rtwvif->port : 0;
+ desc_info->er_cap = rtwsta ? rtwsta->er_cap : false;
/* enable wd_info for AMPDU */
desc_info->en_wd_info = true;
@@ -1006,7 +1011,9 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
- FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
+ FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_ER, desc_info->er_cap) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_BW_ER, 0);
return cpu_to_le32(dword);
}
@@ -1757,7 +1764,8 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
- if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED))
+ if ((chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED)) &&
+ !RTW89_CHK_FW_FEATURE(NO_LPS_PG, &rtwdev->fw))
return RTW89_PS_MODE_PWR_GATED;
if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_CLK_GATED))
@@ -2199,8 +2207,9 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
- rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
+ if ((rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT) ||
+ rtwvif->tdls_peer)
return;
if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE &&
@@ -2426,6 +2435,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
int i;
+ int ret;
rtwsta->rtwdev = rtwdev;
rtwsta->rtwvif = rtwvif;
@@ -2450,6 +2460,21 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
RTW89_MAX_MAC_ID_NUM);
if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM)
return -ENOSPC;
+
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
+ if (ret) {
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
+ RTW89_ROLE_CREATE);
+ if (ret) {
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
}
return 0;
@@ -2459,9 +2484,12 @@ int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
rtwdev->total_sta_assoc--;
+ if (sta->tdls)
+ rtwvif->tdls_peer--;
rtwsta->disassoc = true;
return 0;
@@ -2484,8 +2512,10 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
if (sta->tdls)
rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
rtw89_vif_type_mapping(vif, false);
+ rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, true);
+ }
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
@@ -2499,14 +2529,6 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
- if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
- ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_REMOVE);
- if (ret) {
- rtw89_warn(rtwdev, "failed to send h2c role info\n");
- return ret;
- }
- }
-
/* update cam aid mac_id net_type */
ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
@@ -2527,18 +2549,6 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
int ret;
if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
- ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
- if (ret) {
- rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
- return ret;
- }
-
- ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_CREATE);
- if (ret) {
- rtw89_warn(rtwdev, "failed to send h2c role info\n");
- return ret;
- }
-
if (sta->tdls) {
ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, sta->addr);
if (ret) {
@@ -2573,22 +2583,30 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwsta->mac_id);
- if (ret) {
- rtw89_warn(rtwdev, "failed to send h2c general packet\n");
- return ret;
- }
-
rtwdev->total_sta_assoc++;
+ if (sta->tdls)
+ rtwvif->tdls_peer++;
rtw89_phy_ra_assoc(rtwdev, sta);
rtw89_mac_bf_assoc(rtwdev, vif, sta);
rtw89_mac_bf_monitor_calc(rtwdev, sta, false);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if (bss_conf->he_support &&
+ !(bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE))
+ rtwsta->er_cap = true;
+
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_END);
rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template);
rtw89_phy_ul_tb_assoc(rtwdev, rtwvif);
+
+ ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif, rtwsta->mac_id);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c general packet\n");
+ return ret;
+ }
}
return ret;
@@ -2600,13 +2618,22 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ int ret;
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_DIS_CONN);
- else if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
+ else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
+ RTW89_ROLE_REMOVE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -3113,7 +3140,6 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
continue;
INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
}
- INIT_LIST_HEAD(&rtwdev->wow.pkt_list);
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -3124,6 +3150,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
+ if (!rtwdev->txq_wq)
+ return -ENOMEM;
spin_lock_init(&rtwdev->ba_lock);
spin_lock_init(&rtwdev->rpwm_lock);
mutex_init(&rtwdev->mutex);
@@ -3138,7 +3166,6 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_core_ppdu_sts_init(rtwdev);
rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
- rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR;
INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
@@ -3149,6 +3176,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
ret = rtw89_load_firmware(rtwdev);
if (ret) {
rtw89_warn(rtwdev, "no firmware loaded\n");
+ destroy_workqueue(rtwdev->txq_wq);
return ret;
}
rtw89_ser_init(rtwdev);
@@ -3293,6 +3321,8 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
+
return 0;
}
EXPORT_SYMBOL(rtw89_chip_info_setup);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 2badb96d2ae3..41365ffb7e5e 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -816,6 +816,7 @@ struct rtw89_tx_desc_info {
#define RTW89_MGMT_HW_SEQ_MODE 1
bool hiq;
u8 port;
+ bool er_cap;
};
struct rtw89_core_tx_request {
@@ -1263,6 +1264,7 @@ union rtw89_btc_bt_state_map {
#define BTC_BT_RSSI_THMAX 4
#define BTC_BT_AFH_GROUP 12
+#define BTC_BT_AFH_LE_GROUP 5
struct rtw89_btc_bt_link_info {
struct rtw89_btc_u8_sta_chg profile_cnt;
@@ -1278,6 +1280,7 @@ struct rtw89_btc_bt_link_info {
u8 golden_rx_shift[BTC_PROFILE_MAX];
u8 rssi_state[BTC_BT_RSSI_THMAX];
u8 afh_map[BTC_BT_AFH_GROUP];
+ u8 afh_map_le[BTC_BT_AFH_LE_GROUP];
u32 role_sw: 1;
u32 slave_role: 1;
@@ -1437,7 +1440,7 @@ struct rtw89_btc_cx {
};
struct rtw89_btc_fbtc_tdma {
- u8 type; /* chip_info::fcxtdma_ver */
+ u8 type; /* btc_ver::fcxtdma */
u8 rxflctrl;
u8 txpause;
u8 wtgle_n;
@@ -1447,13 +1450,18 @@ struct rtw89_btc_fbtc_tdma {
u8 option_ctrl;
} __packed;
-struct rtw89_btc_fbtc_tdma_v1 {
- u8 fver; /* chip_info::fcxtdma_ver */
+struct rtw89_btc_fbtc_tdma_v3 {
+ u8 fver; /* btc_ver::fcxtdma */
u8 rsvd;
__le16 rsvd1;
struct rtw89_btc_fbtc_tdma tdma;
} __packed;
+union rtw89_btc_fbtc_tdma_le32 {
+ struct rtw89_btc_fbtc_tdma v1;
+ struct rtw89_btc_fbtc_tdma_v3 v3;
+};
+
#define CXMREG_MAX 30
#define FCXMAX_STEP 255 /*STEP trace record cnt, Max:65535, default:255*/
#define BTC_CYCLE_SLOT_MAX 48 /* must be even number, non-zero */
@@ -1472,8 +1480,8 @@ enum rtw89_btc_bt_sta_counter {
BTC_BCNT_STA_MAX
};
-struct rtw89_btc_fbtc_rpt_ctrl {
- u16 fver; /* chip_info::fcxbtcrpt_ver */
+struct rtw89_btc_fbtc_rpt_ctrl_v1 {
+ u16 fver; /* btc_ver::fcxbtcrpt */
u16 rpt_cnt; /* tmr counters */
u32 wl_fw_coex_ver; /* match which driver's coex version */
u32 wl_fw_cx_offload;
@@ -1504,6 +1512,20 @@ struct rtw89_btc_fbtc_rpt_ctrl_info {
__le32 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_info_v5 {
+ __le32 cx_ver; /* match which driver's coex version */
+ __le32 fw_ver;
+ __le32 en; /* report map */
+
+ __le16 cnt; /* fw report counter */
+ __le16 cnt_c2h; /* fw send c2h counter */
+ __le16 cnt_h2c; /* fw recv h2c counter */
+ __le16 len_c2h; /* The total length of the last C2H */
+
+ __le16 cnt_aoac_rf_on; /* rf-on counter for aoac switch notify */
+ __le16 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
+} __packed;
+
struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info {
__le32 cx_ver; /* match which driver's coex version */
__le32 cx_offload;
@@ -1525,7 +1547,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox {
struct rtw89_btc_fbtc_rpt_ctrl_a2dp_empty a2dp;
} __packed;
-struct rtw89_btc_fbtc_rpt_ctrl_v1 {
+struct rtw89_btc_fbtc_rpt_ctrl_v4 {
u8 fver;
u8 rsvd;
__le16 rsvd1;
@@ -1536,6 +1558,24 @@ struct rtw89_btc_fbtc_rpt_ctrl_v1 {
struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_MAX];
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_v5 {
+ u8 fver;
+ u8 rsvd;
+ __le16 rsvd1;
+
+ u8 gnt_val[RTW89_PHY_MAX][4];
+ __le16 bt_cnt[BTC_BCNT_STA_MAX];
+
+ struct rtw89_btc_fbtc_rpt_ctrl_info_v5 rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+} __packed;
+
+union rtw89_btc_fbtc_rpt_ctrl_ver_info {
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 v1;
+ struct rtw89_btc_fbtc_rpt_ctrl_v4 v4;
+ struct rtw89_btc_fbtc_rpt_ctrl_v5 v5;
+};
+
enum rtw89_fbtc_ext_ctrl_type {
CXECTL_OFF = 0x0, /* tdma off */
CXECTL_B2 = 0x1, /* allow B2 (beacon-early) */
@@ -1571,6 +1611,36 @@ enum rtw89_btc_cxst_state {
CXST_MAX = 0x12,
};
+enum rtw89_btc_cxevnt {
+ CXEVNT_TDMA_ENTRY = 0x0,
+ CXEVNT_WL_TMR,
+ CXEVNT_B1_TMR,
+ CXEVNT_B2_TMR,
+ CXEVNT_B3_TMR,
+ CXEVNT_B4_TMR,
+ CXEVNT_W2B_TMR,
+ CXEVNT_B2W_TMR,
+ CXEVNT_BCN_EARLY,
+ CXEVNT_A2DP_EMPTY,
+ CXEVNT_LK_END,
+ CXEVNT_RX_ISR,
+ CXEVNT_RX_FC0,
+ CXEVNT_RX_FC1,
+ CXEVNT_BT_RELINK,
+ CXEVNT_BT_RETRY,
+ CXEVNT_E2G,
+ CXEVNT_E5G,
+ CXEVNT_EBT,
+ CXEVNT_ENULL,
+ CXEVNT_DRV_WLK,
+ CXEVNT_BCN_OK,
+ CXEVNT_BT_CHANGE,
+ CXEVNT_EBT_EXTEND,
+ CXEVNT_E2G_NULL1,
+ CXEVNT_B1FDD_TMR,
+ CXEVNT_MAX
+};
+
enum {
CXBCN_ALL = 0x0,
CXBCN_ALL_OK,
@@ -1604,9 +1674,14 @@ enum { /* STEP TYPE */
CXSTEP_MAX,
};
+enum rtw89_btc_afh_map_type { /*AFH MAP TYPE */
+ RPT_BT_AFH_SEQ_LEGACY = 0x10,
+ RPT_BT_AFH_SEQ_LE = 0x20
+};
+
#define BTC_DBG_MAX1 32
struct rtw89_btc_fbtc_gpio_dbg {
- u8 fver; /* chip_info::fcxgpiodbg_ver */
+ u8 fver; /* btc_ver::fcxgpiodbg */
u8 rsvd;
u16 rsvd2;
u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
@@ -1615,7 +1690,7 @@ struct rtw89_btc_fbtc_gpio_dbg {
} __packed;
struct rtw89_btc_fbtc_mreg_val {
- u8 fver; /* chip_info::fcxmreg_ver */
+ u8 fver; /* btc_ver::fcxmreg */
u8 reg_num;
__le16 rsvd;
__le32 mreg_val[CXMREG_MAX];
@@ -1638,7 +1713,7 @@ struct rtw89_btc_fbtc_slot {
} __packed;
struct rtw89_btc_fbtc_slots {
- u8 fver; /* chip_info::fcxslots_ver */
+ u8 fver; /* btc_ver::fcxslots */
u8 tbl_num;
__le16 rsvd;
__le32 update_map;
@@ -1651,8 +1726,8 @@ struct rtw89_btc_fbtc_step {
__le16 difft;
} __packed;
-struct rtw89_btc_fbtc_steps {
- u8 fver; /* chip_info::fcxstep_ver */
+struct rtw89_btc_fbtc_steps_v2 {
+ u8 fver; /* btc_ver::fcxstep */
u8 rsvd;
__le16 cnt;
__le16 pos_old;
@@ -1660,7 +1735,7 @@ struct rtw89_btc_fbtc_steps {
struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
} __packed;
-struct rtw89_btc_fbtc_steps_v1 {
+struct rtw89_btc_fbtc_steps_v3 {
u8 fver;
u8 en;
__le16 rsvd;
@@ -1668,8 +1743,13 @@ struct rtw89_btc_fbtc_steps_v1 {
struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
} __packed;
-struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
- u8 fver; /* chip_info::fcxcysta_ver */
+union rtw89_btc_fbtc_steps_info {
+ struct rtw89_btc_fbtc_steps_v2 v2;
+ struct rtw89_btc_fbtc_steps_v3 v3;
+};
+
+struct rtw89_btc_fbtc_cysta_v2 { /* statistics for cycles */
+ u8 fver; /* btc_ver::fcxcysta */
u8 rsvd;
__le16 cycles; /* total cycle number */
__le16 cycles_a2dp[CXT_FLCTRL_MAX];
@@ -1717,6 +1797,17 @@ struct rtw89_btc_fbtc_a2dp_trx_stat {
u8 rsvd2;
} __packed;
+struct rtw89_btc_fbtc_a2dp_trx_stat_v4 {
+ u8 empty_cnt;
+ u8 retry_cnt;
+ u8 tx_rate;
+ u8 tx_cnt;
+ u8 ack_cnt;
+ u8 nack_cnt;
+ u8 no_empty_cnt;
+ u8 rsvd;
+} __packed;
+
struct rtw89_btc_fbtc_cycle_a2dp_empty_info {
__le16 cnt; /* a2dp empty cnt */
__le16 cnt_timeout; /* a2dp empty timeout cnt*/
@@ -1730,7 +1821,35 @@ struct rtw89_btc_fbtc_cycle_leak_info {
__le16 tmax; /* max leak-slot time */
} __packed;
-struct rtw89_btc_fbtc_cysta_v1 { /* statistics for cycles */
+#define RTW89_BTC_FDDT_PHASE_CYCLE GENMASK(9, 0)
+#define RTW89_BTC_FDDT_TRAIN_STEP GENMASK(15, 10)
+
+struct rtw89_btc_fbtc_cycle_fddt_info {
+ __le16 train_cycle;
+ __le16 tp;
+
+ s8 tx_power; /* absolute Tx power (dBm), 0xff-> no BTC control */
+ s8 bt_tx_power; /* decrease Tx power (dB) */
+ s8 bt_rx_gain; /* LNA constrain level */
+ u8 no_empty_cnt;
+
+ u8 rssi; /* [7:4] -> bt_rssi_level, [3:0]-> wl_rssi_level */
+ u8 cn; /* condition_num */
+ u8 train_status; /* [7:4]-> train-state, [3:0]-> train-phase */
+ u8 train_result; /* refer to enum btc_fddt_check_map */
+} __packed;
+
+#define RTW89_BTC_FDDT_CELL_TRAIN_STATE GENMASK(3, 0)
+#define RTW89_BTC_FDDT_CELL_TRAIN_PHASE GENMASK(7, 4)
+
+struct rtw89_btc_fbtc_fddt_cell_status {
+ s8 wl_tx_pwr;
+ s8 bt_tx_pwr;
+ s8 bt_rx_gain;
+ u8 state_phase; /* [0:3] train state, [4:7] train phase */
+} __packed;
+
+struct rtw89_btc_fbtc_cysta_v3 { /* statistics for cycles */
u8 fver;
u8 rsvd;
__le16 cycles; /* total cycle number */
@@ -1748,8 +1867,41 @@ struct rtw89_btc_fbtc_cysta_v1 { /* statistics for cycles */
__le32 except_map;
} __packed;
-struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
- u8 fver; /* chip_info::fcxnullsta_ver */
+#define FDD_TRAIN_WL_DIRECTION 2
+#define FDD_TRAIN_WL_RSSI_LEVEL 5
+#define FDD_TRAIN_BT_RSSI_LEVEL 5
+
+struct rtw89_btc_fbtc_cysta_v4 { /* statistics for cycles */
+ u8 fver;
+ u8 rsvd;
+ u8 collision_cnt; /* counter for event/timer occur at the same time */
+ u8 except_cnt;
+
+ __le16 skip_cnt;
+ __le16 cycles; /* total cycle number */
+
+ __le16 slot_step_time[BTC_CYCLE_SLOT_MAX]; /* record the wl/bt slot time */
+ __le16 slot_cnt[CXST_MAX]; /* slot count */
+ __le16 bcn_cnt[CXBCN_MAX];
+ struct rtw89_btc_fbtc_cycle_time_info cycle_time;
+ struct rtw89_btc_fbtc_cycle_leak_info leak_slot;
+ struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept;
+ struct rtw89_btc_fbtc_a2dp_trx_stat_v4 a2dp_trx[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_cycle_fddt_info fddt_trx[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_fddt_cell_status fddt_cells[FDD_TRAIN_WL_DIRECTION]
+ [FDD_TRAIN_WL_RSSI_LEVEL]
+ [FDD_TRAIN_BT_RSSI_LEVEL];
+ __le32 except_map;
+} __packed;
+
+union rtw89_btc_fbtc_cysta_info {
+ struct rtw89_btc_fbtc_cysta_v2 v2;
+ struct rtw89_btc_fbtc_cysta_v3 v3;
+ struct rtw89_btc_fbtc_cysta_v4 v4;
+};
+
+struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
+ u8 fver; /* btc_ver::fcxnullsta */
u8 rsvd;
__le16 rsvd2;
__le32 max_t[2]; /* max_t for 0:null0/1:null1 */
@@ -1757,8 +1909,8 @@ struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
__le32 result[2][4]; /* 0:fail, 1:ok, 2:on_time, 3:retry */
} __packed;
-struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
- u8 fver; /* chip_info::fcxnullsta_ver */
+struct rtw89_btc_fbtc_cynullsta_v2 { /* cycle null statistics */
+ u8 fver; /* btc_ver::fcxnullsta */
u8 rsvd;
__le16 rsvd2;
__le32 max_t[2]; /* max_t for 0:null0/1:null1 */
@@ -1766,8 +1918,13 @@ struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
__le32 result[2][5]; /* 0:fail, 1:ok, 2:on_time, 3:retry, 4:tx */
} __packed;
+union rtw89_btc_fbtc_cynullsta_info {
+ struct rtw89_btc_fbtc_cynullsta_v1 v1; /* info from fw */
+ struct rtw89_btc_fbtc_cynullsta_v2 v2;
+};
+
struct rtw89_btc_fbtc_btver {
- u8 fver; /* chip_info::fcxbtver_ver */
+ u8 fver; /* btc_ver::fcxbtver */
u8 rsvd;
__le16 rsvd2;
__le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */
@@ -1776,14 +1933,14 @@ struct rtw89_btc_fbtc_btver {
} __packed;
struct rtw89_btc_fbtc_btscan {
- u8 fver; /* chip_info::fcxbtscan_ver */
+ u8 fver; /* btc_ver::fcxbtscan */
u8 rsvd;
__le16 rsvd2;
u8 scan[6];
} __packed;
struct rtw89_btc_fbtc_btafh {
- u8 fver; /* chip_info::fcxbtafh_ver */
+ u8 fver; /* btc_ver::fcxbtafh */
u8 rsvd;
__le16 rsvd2;
u8 afh_l[4]; /*bit0:2402, bit1: 2403.... bit31:2433 */
@@ -1791,8 +1948,20 @@ struct rtw89_btc_fbtc_btafh {
u8 afh_h[4]; /*bit0:2466, bit1:2467......bit14:2480 */
} __packed;
+struct rtw89_btc_fbtc_btafh_v2 {
+ u8 fver; /* btc_ver::fcxbtafh */
+ u8 rsvd;
+ u8 rsvd2;
+ u8 map_type;
+ u8 afh_l[4];
+ u8 afh_m[4];
+ u8 afh_h[4];
+ u8 afh_le_a[4];
+ u8 afh_le_b[4];
+} __packed;
+
struct rtw89_btc_fbtc_btdevinfo {
- u8 fver; /* chip_info::fcxbtdevinfo_ver */
+ u8 fver; /* btc_ver::fcxbtdevinfo */
u8 rsvd;
__le16 vendor_id;
__le32 dev_name; /* only 24 bits valid */
@@ -1911,20 +2080,19 @@ struct rtw89_btc_rpt_cmn_info {
u8 valid;
} __packed;
+union rtw89_btc_fbtc_btafh_info {
+ struct rtw89_btc_fbtc_btafh v1;
+ struct rtw89_btc_fbtc_btafh_v2 v2;
+};
+
struct rtw89_btc_report_ctrl_state {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw for 52A*/
- struct rtw89_btc_fbtc_rpt_ctrl_v1 finfo_v1; /* info from fw for 52C*/
- };
+ union rtw89_btc_fbtc_rpt_ctrl_ver_info finfo;
};
struct rtw89_btc_rpt_fbtc_tdma {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_tdma finfo; /* info from fw */
- struct rtw89_btc_fbtc_tdma_v1 finfo_v1; /* info from fw for 52C*/
- };
+ union rtw89_btc_fbtc_tdma_le32 finfo;
};
struct rtw89_btc_rpt_fbtc_slots {
@@ -1934,26 +2102,17 @@ struct rtw89_btc_rpt_fbtc_slots {
struct rtw89_btc_rpt_fbtc_cysta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_cysta finfo; /* info from fw for 52A*/
- struct rtw89_btc_fbtc_cysta_v1 finfo_v1; /* info from fw for 52C*/
- };
+ union rtw89_btc_fbtc_cysta_info finfo;
};
struct rtw89_btc_rpt_fbtc_step {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_steps finfo; /* info from fw */
- struct rtw89_btc_fbtc_steps_v1 finfo_v1; /* info from fw */
- };
+ union rtw89_btc_fbtc_steps_info finfo; /* info from fw */
};
struct rtw89_btc_rpt_fbtc_nullsta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */
- struct rtw89_btc_fbtc_cynullsta_v1 finfo_v1; /* info from fw */
- };
+ union rtw89_btc_fbtc_cynullsta_info finfo;
};
struct rtw89_btc_rpt_fbtc_mreg {
@@ -1978,7 +2137,7 @@ struct rtw89_btc_rpt_fbtc_btscan {
struct rtw89_btc_rpt_fbtc_btafh {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_btafh finfo; /* info from fw */
+ union rtw89_btc_fbtc_btafh_info finfo;
};
struct rtw89_btc_rpt_fbtc_btdev {
@@ -2018,9 +2177,35 @@ struct rtw89_btc_btf_fwinfo {
struct rtw89_btc_rpt_fbtc_btdev rpt_fbtc_btdev;
};
+struct rtw89_btc_ver {
+ enum rtw89_core_chip_id chip_id;
+ u32 fw_ver_code;
+
+ u8 fcxbtcrpt;
+ u8 fcxtdma;
+ u8 fcxslots;
+ u8 fcxcysta;
+ u8 fcxstep;
+ u8 fcxnullsta;
+ u8 fcxmreg;
+ u8 fcxgpiodbg;
+ u8 fcxbtver;
+ u8 fcxbtscan;
+ u8 fcxbtafh;
+ u8 fcxbtdevinfo;
+ u8 fwlrole;
+ u8 frptmap;
+ u8 fcxctrl;
+
+ u16 info_buf;
+ u8 max_role_num;
+};
+
#define RTW89_BTC_POLICY_MAXLEN 512
struct rtw89_btc {
+ const struct rtw89_btc_ver *ver;
+
struct rtw89_btc_cx cx;
struct rtw89_btc_dm dm;
struct rtw89_btc_ctrl ctrl;
@@ -2194,6 +2379,7 @@ struct rtw89_sec_cam_entry {
struct rtw89_sta {
u8 mac_id;
bool disassoc;
+ bool er_cap;
struct rtw89_dev *rtwdev;
struct rtw89_vif *rtwvif;
struct rtw89_ra_info ra;
@@ -2266,6 +2452,7 @@ struct rtw89_vif {
bool last_a_ctrl;
bool dyn_tb_bedge_en;
u8 def_tri_idx;
+ u32 tdls_peer;
struct work_struct update_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
@@ -2274,6 +2461,7 @@ struct rtw89_vif {
struct rtw89_phy_rate_pattern rate_pattern;
struct cfg80211_scan_request *scan_req;
struct ieee80211_scan_ies *scan_ies;
+ struct list_head general_pkt_list;
};
enum rtw89_lv1_rcvy_step {
@@ -2660,6 +2848,7 @@ struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
const char *fw_name;
+ bool try_ce_fw;
u32 fifo_size;
u32 dle_scc_rsvd_size;
u16 max_amsdu_limit;
@@ -2728,20 +2917,6 @@ struct rtw89_chip_info {
u8 btcx_desired;
u8 scbd;
u8 mailbox;
- u16 btc_fwinfo_buf;
-
- u8 fcxbtcrpt_ver;
- u8 fcxtdma_ver;
- u8 fcxslots_ver;
- u8 fcxcysta_ver;
- u8 fcxstep_ver;
- u8 fcxnullsta_ver;
- u8 fcxmreg_ver;
- u8 fcxgpiodbg_ver;
- u8 fcxbtver_ver;
- u8 fcxbtscan_ver;
- u8 fcxbtafh_ver;
- u8 fcxbtdevinfo_ver;
u8 afh_guard_ch;
const u8 *wl_rssi_thres;
@@ -2771,6 +2946,7 @@ struct rtw89_chip_info {
u8 dcfo_comp_sft;
const struct rtw89_imr_info *imr_info;
const struct rtw89_rrsr_cfgs *rrsr_cfgs;
+ u32 bss_clr_map_reg;
u32 dma_ch_mask;
const struct wiphy_wowlan_support *wowlan_stub;
};
@@ -2839,6 +3015,7 @@ static inline void rtw89_init_wait(struct rtw89_wait_info *wait)
enum rtw89_fw_type {
RTW89_FW_NORMAL = 1,
RTW89_FW_WOWLAN = 3,
+ RTW89_FW_NORMAL_CE = 5,
};
enum rtw89_fw_feature {
@@ -2848,6 +3025,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_CRASH_TRIGGER,
RTW89_FW_FEATURE_PACKET_DROP,
RTW89_FW_FEATURE_NO_DEEP_PS,
+ RTW89_FW_FEATURE_NO_LPS_PG,
};
struct rtw89_fw_suit {
@@ -3550,7 +3728,6 @@ struct rtw89_wow_param {
DECLARE_BITMAP(flags, RTW89_WOW_FLAG_NUM);
struct rtw89_wow_cam_info patterns[RTW89_MAX_PATTERN_NUM];
u8 pattern_cnt;
- struct list_head pkt_list;
};
struct rtw89_mcc_info {
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 8297e35bfa52..0e0e1483c099 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -615,6 +615,7 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
struct seq_file *m = (struct seq_file *)filp->private_data;
struct rtw89_debugfs_priv *debugfs_priv = m->private;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
char buf[32];
size_t buf_size;
int sel;
@@ -634,6 +635,12 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
return -EINVAL;
}
+ if (sel == RTW89_DBG_SEL_MAC_30 && chip->chip_id != RTL8852C) {
+ rtw89_info(rtwdev, "sel %d is address hole on chip %d\n", sel,
+ chip->chip_id);
+ return -EINVAL;
+ }
+
debugfs_priv->cb_data = sel;
rtw89_info(rtwdev, "select mac page dump %d\n", debugfs_priv->cb_data);
@@ -3347,6 +3354,31 @@ static void rtw89_dump_addr_cam(struct seq_file *m,
}
}
+__printf(3, 4)
+static void rtw89_dump_pkt_offload(struct seq_file *m, struct list_head *pkt_list,
+ const char *fmt, ...)
+{
+ struct rtw89_pktofld_info *info;
+ struct va_format vaf;
+ va_list args;
+
+ if (list_empty(pkt_list))
+ return;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ vaf.fmt = fmt;
+
+ seq_printf(m, "%pV", &vaf);
+
+ va_end(args);
+
+ list_for_each_entry(info, pkt_list, list)
+ seq_printf(m, "%d ", info->id);
+
+ seq_puts(m, "\n");
+}
+
static
void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
@@ -3357,6 +3389,7 @@ void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr);
seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
+ rtw89_dump_pkt_offload(m, &rtwvif->general_pkt_list, "\tpkt_ofld[GENERAL]: ");
}
static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta)
@@ -3395,6 +3428,7 @@ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
struct rtw89_debugfs_priv *debugfs_priv = m->private;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ u8 idx;
mutex_lock(&rtwdev->mutex);
@@ -3409,6 +3443,15 @@ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
cam_info->sec_cam_map);
seq_printf(m, "\tba_cam: %*ph\n", (int)sizeof(cam_info->ba_cam_map),
cam_info->ba_cam_map);
+ seq_printf(m, "\tpkt_ofld: %*ph\n", (int)sizeof(rtwdev->pkt_offload),
+ rtwdev->pkt_offload);
+
+ for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
+ if (!(rtwdev->chip->support_bands & BIT(idx)))
+ continue;
+ rtw89_dump_pkt_offload(m, &rtwdev->scan_info.pkt_list[idx],
+ "\t\t[SCAN %u]: ", idx);
+ }
ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index d1de5e600836..079269bb5251 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -28,6 +28,7 @@ enum rtw89_debug_mask {
RTW89_DBG_STATE = BIT(17),
RTW89_DBG_WOW = BIT(18),
RTW89_DBG_UL_TB = BIT(19),
+ RTW89_DBG_CHAN = BIT(20),
RTW89_DBG_UNEXP = BIT(31),
};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index de1f23779fc6..0b73dc2e9ad7 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -10,6 +10,7 @@
#include "mac.h"
#include "phy.h"
#include "reg.h"
+#include "util.h"
static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
struct sk_buff *skb);
@@ -91,6 +92,7 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
const u8 *fwdynhdr;
const u8 *bin;
u32 base_hdr_len;
+ u32 mssc_len = 0;
u32 i;
if (!info)
@@ -120,6 +122,14 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
fw += RTW89_FW_HDR_SIZE;
section_info = info->section_info;
for (i = 0; i < info->section_num; i++) {
+ section_info->type = GET_FWSECTION_HDR_SECTIONTYPE(fw);
+ if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
+ section_info->mssc = GET_FWSECTION_HDR_MSSC(fw);
+ mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
+ } else {
+ section_info->mssc = 0;
+ }
+
section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw);
if (GET_FWSECTION_HDR_CHECKSUM(fw))
section_info->len += FWDL_SECTION_CHKSUM_LEN;
@@ -132,7 +142,7 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
section_info++;
}
- if (fw_end != bin) {
+ if (fw_end != bin + mssc_len) {
rtw89_err(rtwdev, "[ERR]fw bin size\n");
return -EINVAL;
}
@@ -142,7 +152,7 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
static
int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
- struct rtw89_fw_suit *fw_suit)
+ struct rtw89_fw_suit *fw_suit, bool nowarn)
{
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const u8 *mfw = fw_info->firmware->data;
@@ -173,7 +183,8 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
return 0;
}
- rtw89_err(rtwdev, "no suitable firmware found\n");
+ if (!nowarn)
+ rtw89_err(rtwdev, "no suitable firmware found\n");
return -ENOENT;
}
@@ -201,12 +212,13 @@ static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
}
static
-int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
+int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ bool nowarn)
{
struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
int ret;
- ret = rtw89_mfw_recognize(rtwdev, type, fw_suit);
+ ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
if (ret)
return ret;
@@ -245,6 +257,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
@@ -332,17 +345,27 @@ out:
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
int ret;
- ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL);
+ if (chip->try_ce_fw) {
+ ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
+ if (!ret)
+ goto normal_done;
+ }
+
+ ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
if (ret)
return ret;
+normal_done:
/* It still works if wowlan firmware isn't existing. */
- __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN);
+ __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
rtw89_fw_recognize_features(rtwdev);
+ rtw89_coex_recognize_ver(rtwdev);
+
return 0;
}
@@ -902,13 +925,12 @@ fail:
return ret;
}
-static int rtw89_fw_h2c_add_wow_fw_ofld(struct rtw89_dev *rtwdev,
+static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
enum rtw89_fw_pkt_ofld_type type,
u8 *id)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_pktofld_info *info;
struct sk_buff *skb;
int ret;
@@ -937,13 +959,13 @@ static int rtw89_fw_h2c_add_wow_fw_ofld(struct rtw89_dev *rtwdev,
if (!skb)
goto err;
- list_add_tail(&info->list, &rtw_wow->pkt_list);
ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
kfree_skb(skb);
if (ret)
- return ret;
+ goto err;
+ list_add_tail(&info->list, &rtwvif->general_pkt_list);
*id = info->id;
return 0;
@@ -952,13 +974,48 @@ err:
return -ENOMEM;
}
+void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool notify_fw)
+{
+ struct list_head *pkt_list = &rtwvif->general_pkt_list;
+ struct rtw89_pktofld_info *info, *tmp;
+
+ list_for_each_entry_safe(info, tmp, pkt_list, list) {
+ if (notify_fw)
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ rtw89_core_release_bit_map(rtwdev->pkt_offload,
+ info->id);
+ list_del(&info->list);
+ kfree(info);
+ }
+}
+
+void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
+{
+ struct rtw89_vif *rtwvif;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw);
+}
+
#define H2C_GENERAL_PKT_LEN 6
#define H2C_GENERAL_PKT_ID_UND 0xff
-int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
+int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, u8 macid)
{
+ u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
+ u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
+ u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
struct sk_buff *skb;
int ret;
+ rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
+ rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
+ rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
@@ -967,9 +1024,9 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
skb_put(skb, H2C_GENERAL_PKT_LEN);
SET_GENERAL_PKT_MACID(skb->data, macid);
SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
- SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
- SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
- SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
+ SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
+ SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
+ SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -1807,33 +1864,36 @@ fail:
#define PORT_DATA_OFFSET 4
#define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
-#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
-#define H2C_LEN_CXDRVINFO_ROLE_V1 (4 + 16 * RTW89_PORT_NUM + \
- H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + \
- H2C_LEN_CXDRVHDR)
+#define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
+ (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
+
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
struct rtw89_btc_wl_active_role *active = role_info->active_role;
struct sk_buff *skb;
+ u32 len;
u8 offset = 0;
u8 *cmd;
int ret;
int i;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE);
+ len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
return -ENOMEM;
}
- skb_put(skb, H2C_LEN_CXDRVINFO_ROLE);
+ skb_put(skb, len);
cmd = skb->data;
RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
- RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
@@ -1870,7 +1930,7 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, BTFC_SET,
SET_DRV_INFO, 0, 0,
- H2C_LEN_CXDRVINFO_ROLE);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1885,28 +1945,35 @@ fail:
return ret;
}
+#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
+ (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
+
int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
struct sk_buff *skb;
+ u32 len;
u8 *cmd, offset;
int ret;
int i;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE_V1);
+ len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
return -ENOMEM;
}
- skb_put(skb, H2C_LEN_CXDRVINFO_ROLE_V1);
+ skb_put(skb, len);
cmd = skb->data;
RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
- RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVHDR);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
@@ -1942,7 +2009,7 @@ int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
}
- offset = H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
+ offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
@@ -1953,7 +2020,7 @@ int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, BTFC_SET,
SET_DRV_INFO, 0, 0,
- H2C_LEN_CXDRVINFO_ROLE_V1);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1971,8 +2038,8 @@ fail:
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
struct sk_buff *skb;
u8 *cmd;
@@ -1992,7 +2059,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
- if (chip->chip_id == RTL8852A)
+ if (ver->fcxctrl == 0)
RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -2112,6 +2179,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+ rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
return -ENOMEM;
}
skb_put(skb, H2C_LEN_PKT_OFLD);
@@ -2130,6 +2198,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
+ rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
goto fail;
}
@@ -2663,11 +2732,14 @@ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
goto out;
}
- list_add_tail(&info->list, &scan_info->pkt_list[band]);
ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
- if (ret)
+ if (ret) {
+ kfree_skb(new);
+ kfree(info);
goto out;
+ }
+ list_add_tail(&info->list, &scan_info->pkt_list[band]);
kfree_skb(new);
}
out:
@@ -2738,7 +2810,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
if (ssid_num == 1 && req->ssids[0].ssid_len == 0) {
ch_info->tx_pkt = false;
if (!req->duration_mandatory)
- ch_info->period -= RTW89_DWELL_TIME;
+ ch_info->period -= RTW89_DWELL_TIME_6G;
}
}
@@ -2791,7 +2863,8 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
if (req->duration_mandatory)
ch_info->period = req->duration;
else if (channel->band == NL80211_BAND_6GHZ)
- ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME;
+ ch_info->period = RTW89_CHANNEL_TIME_6G +
+ RTW89_DWELL_TIME_6G;
else
ch_info->period = RTW89_CHANNEL_TIME;
@@ -3072,8 +3145,9 @@ int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
int ret;
if (enable) {
- ret = rtw89_fw_h2c_add_wow_fw_ofld(rtwdev, rtwvif,
- RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id);
+ ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_NULL_DATA,
+ &pkt_id);
if (ret)
return -EPERM;
}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 4d2f9ea9e002..cae07e325326 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -171,6 +171,8 @@ struct rtw89_fw_hdr_section_info {
const u8 *addr;
u32 len;
u32 dladdr;
+ u32 mssc;
+ u8 type;
};
struct rtw89_fw_bin_info {
@@ -203,6 +205,7 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_DFS_CHAN_TIME 105
#define RTW89_OFF_CHAN_TIME 100
#define RTW89_DWELL_TIME 20
+#define RTW89_DWELL_TIME_6G 10
#define RTW89_SCAN_WIDTH 0
#define RTW89_SCANOFLD_MAX_SSID 8
#define RTW89_SCANOFLD_MAX_IE_LEN 512
@@ -480,14 +483,21 @@ static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val)
#define FW_EDCA_PARAM_CWMIN_MSK GENMASK(11, 8)
#define FW_EDCA_PARAM_AIFS_MSK GENMASK(7, 0)
+#define FWDL_SECURITY_SECTION_TYPE 9
+#define FWDL_SECURITY_SIGLEN 512
+
+#define GET_FWSECTION_HDR_DL_ADDR(fwhdr) \
+ le32_get_bits(*((const __le32 *)(fwhdr)), GENMASK(31, 0))
+#define GET_FWSECTION_HDR_SECTIONTYPE(fwhdr) \
+ le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(27, 24))
#define GET_FWSECTION_HDR_SEC_SIZE(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(23, 0))
#define GET_FWSECTION_HDR_CHECKSUM(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 1), BIT(28))
#define GET_FWSECTION_HDR_REDL(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 1), BIT(29))
-#define GET_FWSECTION_HDR_DL_ADDR(fwhdr) \
- le32_get_bits(*((const __le32 *)(fwhdr)), GENMASK(31, 0))
+#define GET_FWSECTION_HDR_MSSC(fwhdr) \
+ le32_get_bits(*((const __le32 *)(fwhdr) + 2), GENMASK(31, 0))
#define GET_FW_HDR_MAJOR_VERSION(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(7, 0))
@@ -3209,16 +3219,16 @@ static inline struct rtw89_fw_c2h_attr *RTW89_SKB_C2H_CB(struct sk_buff *skb)
le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(1, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_H2C_FUNC(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
#define RTW89_GET_MAC_C2H_MCC_REQ_ACK_GROUP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(1, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
#define RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_RETURN(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 2))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 2))
#define RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_FUNC(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
struct rtw89_mac_mcc_tsf_rpt {
u32 macid_x;
@@ -3232,30 +3242,30 @@ struct rtw89_mac_mcc_tsf_rpt {
static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE);
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_X(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_Y(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_GROUP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(17, 16))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(17, 16))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_X(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_X(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_Y(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_Y(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 6), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_STATUS(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(5, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(5, 0))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_GROUP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 6))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 6))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_MACID(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_LOW(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
#define RTW89_FW_HDR_SIZE 32
#define RTW89_FW_SECTION_HDR_SIZE 16
@@ -3508,7 +3518,11 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len);
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid);
+int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u8 macid);
+void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool notify_fw);
+void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params);
void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index cf9a0a3120a7..2e2a2b6eab09 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -623,7 +623,8 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
if (err != MAC_AX_ERR_L1_ERR_DMAC &&
err != MAC_AX_ERR_L0_PROMOTE_TO_L1 &&
err != MAC_AX_ERR_L0_ERR_CMAC0 &&
- err != MAC_AX_ERR_L0_ERR_CMAC1)
+ err != MAC_AX_ERR_L0_ERR_CMAC1 &&
+ err != MAC_AX_ERR_RXI300)
return;
rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
@@ -663,6 +664,8 @@ u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
err = MAC_AX_ERR_CPU_EXCEPTION;
else if (err_scnr == RTW89_WCPU_ASSERTION)
err = MAC_AX_ERR_ASSERTION;
+ else if (err_scnr == RTW89_RXI300_ERROR)
+ err = MAC_AX_ERR_RXI300;
rtw89_fw_st_dbg_dump(rtwdev);
rtw89_mac_dump_err_status(rtwdev, err);
@@ -3411,6 +3414,11 @@ int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw)
val |= B_AX_WCPU_FWDL_EN;
rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val);
+
+ if (rtwdev->chip->chip_id == RTL8852B)
+ rtw89_write32_mask(rtwdev, R_AX_SEC_CTRL,
+ B_AX_SEC_IDMEM_SIZE_CONFIG_MASK, 0x2);
+
rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK,
boot_reason);
rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
@@ -3918,19 +3926,14 @@ static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
B_AX_TBTT_SHIFT_OFST_MASK, val);
}
-static void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- struct rtw89_vif *rtwvif_src, u8 offset,
- int *n_offset)
+void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_vif *rtwvif_src,
+ u16 offset_tu)
{
u32 val, reg;
- if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif == rtwvif_src)
- return;
-
- /* adjust offset randomly to avoid beacon conflict */
- offset = offset - offset / 4 + get_random_u32() % (offset / 2);
- val = RTW89_PORT_OFFSET_MS_TO_32US((*n_offset)++, offset);
+ val = RTW89_PORT_OFFSET_TU_TO_32US(offset_tu);
reg = rtw89_mac_reg_by_idx(R_AX_PORT0_TSF_SYNC + rtwvif->port * 4,
rtwvif->mac_idx);
@@ -3939,6 +3942,22 @@ static void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
rtw89_write32_set(rtwdev, reg, B_AX_SYNC_NOW);
}
+static void rtw89_mac_port_tsf_sync_rand(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_vif *rtwvif_src,
+ u8 offset, int *n_offset)
+{
+ if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif == rtwvif_src)
+ return;
+
+ /* adjust offset randomly to avoid beacon conflict */
+ offset = offset - offset / 4 + get_random_u32() % (offset / 2);
+ rtw89_mac_port_tsf_sync(rtwdev, rtwvif, rtwvif_src,
+ (*n_offset) * offset);
+
+ (*n_offset)++;
+}
+
static void rtw89_mac_port_tsf_resync_all(struct rtw89_dev *rtwdev)
{
struct rtw89_vif *src = NULL, *tmp;
@@ -3958,7 +3977,7 @@ static void rtw89_mac_port_tsf_resync_all(struct rtw89_dev *rtwdev)
offset /= (vif_aps + 1);
rtw89_for_each_rtwvif(rtwdev, tmp)
- rtw89_mac_port_tsf_sync(rtwdev, tmp, src, offset, &n_offset);
+ rtw89_mac_port_tsf_sync_rand(rtwdev, tmp, src, offset, &n_offset);
}
int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
@@ -4050,6 +4069,24 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
return 0;
}
+int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u64 *tsf)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u32 tsf_low, tsf_high;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, rtwvif->mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ tsf_low = rtw89_read32_port(rtwdev, rtwvif, p->tsftr_l);
+ tsf_high = rtw89_read32_port(rtwdev, rtwvif, p->tsftr_h);
+ *tsf = (u64)tsf_high << 32 | tsf_low;
+
+ return 0;
+}
+
static void rtw89_mac_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
struct cfg80211_bss *bss,
void *data)
@@ -4263,12 +4300,12 @@ rtw89_mac_c2h_mcc_rcv_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
case H2C_FUNC_MCC_SET_DURATION:
break;
default:
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"invalid MCC C2H RCV ACK: func %d\n", func);
return;
}
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC C2H RCV ACK: group %d, func %d\n", group, func);
}
@@ -4296,12 +4333,12 @@ rtw89_mac_c2h_mcc_req_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
case H2C_FUNC_DEL_MCC_GROUP:
case H2C_FUNC_RESET_MCC_GROUP:
default:
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"invalid MCC C2H REQ ACK: func %d\n", func);
return;
}
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC C2H REQ ACK: group %d, func %d, return code %d\n",
group, func, retcode);
@@ -4329,6 +4366,11 @@ rtw89_mac_c2h_mcc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
rpt->tsf_y_low = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_Y(c2h->data);
rpt->tsf_y_high = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_Y(c2h->data);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC C2H TSF RPT: macid %d> %llu, macid %d> %llu\n",
+ rpt->macid_x, (u64)rpt->tsf_x_high << 32 | rpt->tsf_x_low,
+ rpt->macid_y, (u64)rpt->tsf_y_high << 32 | rpt->tsf_y_low);
+
cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_REQ_TSF);
rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data);
}
@@ -4386,14 +4428,14 @@ rtw89_mac_c2h_mcc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rsp = false;
break;
default:
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"invalid MCC C2H STS RPT: status %d\n", status);
return;
}
- rtw89_debug(rtwdev, RTW89_DBG_FW,
- "MCC C2H STS RPT: group %d, macid %d, status %d, tsf {%d, %d}\n",
- group, macid, status, tsf_low, tsf_high);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC C2H STS RPT: group %d, macid %d, status %d, tsf %llu\n",
+ group, macid, status, (u64)tsf_high << 32 | tsf_low);
if (!rsp)
return;
@@ -4512,7 +4554,7 @@ EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr);
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx);
- int ret = 0;
+ int ret;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret)
@@ -4520,7 +4562,7 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
if (!enable) {
rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN);
- return ret;
+ return 0;
}
rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN |
@@ -4530,7 +4572,7 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK,
RTW89_PRPT_DEST_HOST);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
@@ -4865,9 +4907,16 @@ EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v1);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
{
- u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 val = 0;
+
+ if (chip->chip_id == RTL8852C)
+ return false;
+ else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B)
+ val = rtw89_read8_mask(rtwdev, R_AX_SYS_SDIO_CTRL + 3,
+ B_AX_LTE_MUX_CTRL_PATH >> 24);
- return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val);
+ return !!val;
}
u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index f0b684b205f1..8064d3953d7f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -168,7 +168,7 @@ enum rtw89_mac_ax_l0_to_l1_event {
MAC_AX_L0_TO_L1_EVENT_MAX = 15,
};
-#define RTW89_PORT_OFFSET_MS_TO_32US(n, shift_ms) ((n) * (shift_ms) * 1000 / 32)
+#define RTW89_PORT_OFFSET_TU_TO_32US(shift_tu) ((shift_tu) * 1024 / 32)
enum rtw89_mac_dbg_port_sel {
/* CMAC 0 related */
@@ -623,6 +623,7 @@ struct rtw89_mac_dle_dfi_qempty {
};
enum rtw89_mac_error_scenario {
+ RTW89_RXI300_ERROR = 1,
RTW89_WCPU_CPU_EXCEPTION = 2,
RTW89_WCPU_ASSERTION = 3,
};
@@ -769,6 +770,7 @@ enum mac_ax_err_info {
MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT = 0x2599,
MAC_AX_ERR_CPU_EXCEPTION = 0x3000,
MAC_AX_ERR_ASSERTION = 0x4000,
+ MAC_AX_ERR_RXI300 = 0x5000,
MAC_AX_GET_ERR_MAX,
MAC_AX_DUMP_SHAREBUFF_INDICATOR = 0x80000000,
@@ -828,6 +830,15 @@ static inline u32 rtw89_mac_reg_by_port(u32 base, u8 port, u8 mac_idx)
}
static inline u32
+rtw89_read32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx);
+ return rtw89_read32(rtwdev, reg);
+}
+
+static inline u32
rtw89_read32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u32 base, u32 mask)
{
@@ -906,6 +917,12 @@ int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val);
int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val);
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_vif *rtwvif_src,
+ u16 offset_tu);
+int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u64 *tsf);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif);
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index f9b95c52916b..d43281f7335b 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -135,6 +135,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
rtwvif->hit_rule = 0;
ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ INIT_LIST_HEAD(&rtwvif->general_pkt_list);
ret = rtw89_mac_add_vif(rtwdev, rtwvif);
if (ret) {
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 1c4500ba777c..ec8bb5f10482 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -1384,7 +1384,7 @@ static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx
return 0;
}
-static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
+const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
[RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
@@ -1399,11 +1399,24 @@ static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
[RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
[RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
};
+EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
+
+const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
+ [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1},
+ [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1},
+ [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
+};
+EXPORT_SYMBOL(rtw89_bd_ram_table_single);
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
@@ -3385,7 +3398,7 @@ static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
- if (chip_id == RTL8852A) {
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
if (enable)
ret = rtw89_pci_config_byte_set(rtwdev,
RTW89_PCIE_L1_CTRL,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 7d033501d4d9..1e19740db8c5 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -750,6 +750,12 @@ struct rtw89_pci_ch_dma_addr_set {
struct rtw89_pci_ch_dma_addr rx[RTW89_RXCH_NUM];
};
+struct rtw89_pci_bd_ram {
+ u8 start_idx;
+ u8 max_num;
+ u8 min_num;
+};
+
struct rtw89_pci_info {
enum mac_ax_bd_trunc_mode txbd_trunc_mode;
enum mac_ax_bd_trunc_mode rxbd_trunc_mode;
@@ -785,6 +791,7 @@ struct rtw89_pci_info {
u32 tx_dma_ch_mask;
const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
+ const struct rtw89_pci_bd_ram (*bd_ram_table)[RTW89_TXCH_NUM];
int (*ltr_set)(struct rtw89_dev *rtwdev, bool en);
u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev,
@@ -798,12 +805,6 @@ struct rtw89_pci_info {
struct rtw89_pci_isrs *isrs);
};
-struct rtw89_pci_bd_ram {
- u8 start_idx;
- u8 max_num;
- u8 min_num;
-};
-
struct rtw89_pci_tx_data {
dma_addr_t dma;
};
@@ -1057,6 +1058,8 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
extern const struct dev_pm_ops rtw89_pm_ops;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
+extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM];
+extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM];
struct pci_device_id;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 017710c580c7..d9f61ba3d176 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -367,6 +367,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
}
ra->bw_cap = bw_mode;
+ ra->er_cap = rtwsta->er_cap;
ra->mode_ctrl = mode;
ra->macid = rtwsta->mac_id;
ra->stbc_cap = stbc_en;
@@ -2041,6 +2042,7 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 max_nss_num = rtwdev->chip->rf_path_num;
static const u8 rs[] = {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -2063,7 +2065,7 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
BUILD_BUG_ON(rtw89_rs_idx_max[RTW89_RS_HEDCM] % 4);
addr = R_AX_PWR_BY_RATE;
- for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) {
+ for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
for (i = 0; i < ARRAY_SIZE(rs); i++) {
if (cur.nss >= rtw89_rs_nss_max[rs[i]])
continue;
@@ -2126,6 +2128,7 @@ void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 max_ntx_num = rtwdev->chip->rf_path_num;
struct rtw89_txpwr_limit lmt;
u8 ch = chan->channel;
u8 bw = chan->band_width;
@@ -2140,7 +2143,7 @@ void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev,
RTW89_TXPWR_LMT_PAGE_SIZE);
addr = R_AX_PWR_LMT;
- for (i = 0; i < RTW89_NTX_NUM; i++) {
+ for (i = 0; i < max_ntx_num; i++) {
rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt, i);
ptr = (s8 *)&lmt;
@@ -2161,6 +2164,7 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 max_ntx_num = rtwdev->chip->rf_path_num;
struct rtw89_txpwr_limit_ru lmt_ru;
u8 ch = chan->channel;
u8 bw = chan->band_width;
@@ -2175,7 +2179,7 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
RTW89_TXPWR_LMT_RU_PAGE_SIZE);
addr = R_AX_PWR_RU_LMT;
- for (i = 0; i < RTW89_NTX_NUM; i++) {
+ for (i = 0; i < max_ntx_num; i++) {
rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru, i);
ptr = (s8 *)&lmt_ru;
@@ -4116,6 +4120,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
u8 bss_color;
@@ -4124,11 +4129,11 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
bss_color = vif->bss_conf.he_bss_color.color;
- rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0, 0x1,
- phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_TGT, bss_color,
+ rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_VLD0, 0x1,
phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID,
+ rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT,
+ bss_color, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID,
vif->cfg.aid, phy_idx);
}
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 5324e645728b..600257909df2 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -275,6 +275,9 @@
#define B_AX_S1_LDO2PWRCUT_F BIT(23)
#define B_AX_S0_LDO_VSEL_F_MASK GENMASK(22, 21)
+#define R_AX_SEC_CTRL 0x0C00
+#define B_AX_SEC_IDMEM_SIZE_CONFIG_MASK GENMASK(17, 16)
+
#define R_AX_FILTER_MODEL_ADDR 0x0C04
#define R_AX_HAXI_INIT_CFG1 0x1000
@@ -3559,6 +3562,7 @@
#define RR_MOD_IQK GENMASK(19, 4)
#define RR_MOD_DPK GENMASK(19, 5)
#define RR_MOD_MASK GENMASK(19, 16)
+#define RR_MOD_DCK GENMASK(14, 10)
#define RR_MOD_RGM GENMASK(13, 4)
#define RR_MOD_V_DOWN 0x0
#define RR_MOD_V_STANDBY 0x1
@@ -3572,6 +3576,7 @@
#define RR_MOD_NBW GENMASK(15, 14)
#define RR_MOD_M_RXG GENMASK(13, 4)
#define RR_MOD_M_RXBB GENMASK(9, 5)
+#define RR_MOD_LO_SEL BIT(1)
#define RR_MODOPT 0x01
#define RR_MODOPT_M_TXPWR GENMASK(5, 0)
#define RR_WLSEL 0x02
@@ -3638,6 +3643,7 @@
#define RR_LUTWA_M2 GENMASK(4, 0)
#define RR_LUTWD1 0x3e
#define RR_LUTWD0 0x3f
+#define RR_LUTWD0_MB GENMASK(11, 6)
#define RR_LUTWD0_LB GENMASK(5, 0)
#define RR_TM 0x42
#define RR_TM_TRI BIT(19)
@@ -3671,6 +3677,8 @@
#define RR_TXRSV_GAPK BIT(19)
#define RR_BIAS 0x5e
#define RR_BIAS_GAPK BIT(19)
+#define RR_TXAC 0x5f
+#define RR_TXAC_IQG GENMASK(3, 0)
#define RR_BIASA 0x60
#define RR_BIASA_TXG GENMASK(15, 12)
#define RR_BIASA_TXA GENMASK(19, 16)
@@ -3729,10 +3737,14 @@
#define RR_XALNA2_SW2 GENMASK(9, 8)
#define RR_XALNA2_SW GENMASK(1, 0)
#define RR_DCK 0x92
+#define RR_DCK_S1 GENMASK(19, 16)
+#define RR_DCK_TIA GENMASK(15, 9)
#define RR_DCK_DONE GENMASK(7, 5)
#define RR_DCK_FINE BIT(1)
#define RR_DCK_LV BIT(0)
#define RR_DCK1 0x93
+#define RR_DCK1_S1 GENMASK(19, 16)
+#define RR_DCK1_TIA GENMASK(15, 9)
#define RR_DCK1_DONE BIT(5)
#define RR_DCK1_CLR GENMASK(3, 0)
#define RR_DCK1_SEL BIT(3)
@@ -3781,11 +3793,14 @@
#define RR_LUTDBG 0xdf
#define RR_LUTDBG_TIA BIT(12)
#define RR_LUTDBG_LOK BIT(2)
+#define RR_LUTPLL 0xec
+#define RR_CAL_RW BIT(19)
#define RR_LUTWE2 0xee
#define RR_LUTWE2_RTXBW BIT(2)
#define RR_LUTWE 0xef
#define RR_LUTWE_LOK BIT(2)
#define RR_RFC 0xf0
+#define RR_WCAL BIT(16)
#define RR_RFC_CKEN BIT(1)
#define R_UPD_P0 0x0000
@@ -4090,12 +4105,13 @@
#define R_MUIC 0x40F8
#define B_MUIC_EN BIT(0)
#define R_DCFO 0x4264
-#define B_DCFO GENMASK(1, 0)
+#define B_DCFO GENMASK(7, 0)
#define R_SEG0CSI 0x42AC
-#define B_SEG0CSI_IDX GENMASK(11, 0)
+#define B_SEG0CSI_IDX GENMASK(10, 0)
#define R_SEG0CSI_EN 0x42C4
#define B_SEG0CSI_EN BIT(23)
#define R_BSS_CLR_MAP 0x43ac
+#define R_BSS_CLR_MAP_V1 0x43B0
#define B_BSS_CLR_MAP_VLD0 BIT(28)
#define B_BSS_CLR_MAP_TGT GENMASK(27, 22)
#define B_BSS_CLR_MAP_STAID GENMASK(21, 11)
@@ -4725,6 +4741,7 @@
#define R_DRCK_FH 0xC094
#define B_DRCK_LAT BIT(9)
#define R_DRCK 0xC0C4
+#define B_DRCK_MUL GENMASK(21, 17)
#define B_DRCK_IDLE BIT(9)
#define B_DRCK_EN BIT(6)
#define B_DRCK_VAL GENMASK(4, 0)
@@ -4742,9 +4759,13 @@
#define B_PATH0_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26)
#define R_P0_CFCH_BW0 0xC0D4
#define B_P0_CFCH_BW0 GENMASK(27, 26)
+#define B_P0_CFCH_EN GENMASK(14, 11)
+#define B_P0_CFCH_CTL GENMASK(10, 7)
#define R_P0_CFCH_BW1 0xC0D8
#define B_P0_CFCH_EX BIT(13)
#define B_P0_CFCH_BW1 GENMASK(8, 5)
+#define R_ADCMOD 0xC0E8
+#define B_ADCMOD_LP GENMASK(31, 16)
#define R_ADDCK0D 0xC0F0
#define B_ADDCK0D_VAL2 GENMASK(31, 26)
#define B_ADDCK0D_VAL GENMASK(25, 16)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index eff6519cf019..9c42b6abd223 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -1035,7 +1035,7 @@ static void rtw8852a_spur_elimination(struct rtw89_dev *rtwdev, u8 central_ch)
0x210);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL,
0x210);
- rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x7c0);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, B_SEG0CSI_IDX, 0x7c0);
rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
B_P0_NBIIDX_NOTCH_EN, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
@@ -1047,7 +1047,7 @@ static void rtw8852a_spur_elimination(struct rtw89_dev *rtwdev, u8 central_ch)
0x210);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL,
0x210);
- rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x40);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, B_SEG0CSI_IDX, 0x40);
rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
B_P0_NBIIDX_NOTCH_EN, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
@@ -1059,7 +1059,7 @@ static void rtw8852a_spur_elimination(struct rtw89_dev *rtwdev, u8 central_ch)
0x2d0);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL,
0x2d0);
- rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x740);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, B_SEG0CSI_IDX, 0x740);
rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
B_P0_NBIIDX_NOTCH_EN, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
@@ -1878,9 +1878,13 @@ static
void rtw8852a_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
u32 val;
+ if (ver->fcxbtcrpt != 1)
+ return;
+
val = rtw89_read32(rtwdev, R_AX_BT_STAST_HIGH);
cx->cnt_bt[BTC_BCNT_HIPRI_TX] = FIELD_GET(B_AX_STATIS_BT_HI_TX_MASK, val);
cx->cnt_bt[BTC_BCNT_HIPRI_RX] = FIELD_GET(B_AX_STATIS_BT_HI_RX_MASK, val);
@@ -2051,6 +2055,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.chip_id = RTL8852A,
.ops = &rtw8852a_chip_ops,
.fw_name = "rtw89/rtw8852a_fw.bin",
+ .try_ce_fw = false,
.fifo_size = 458752,
.dle_scc_rsvd_size = 0,
.max_amsdu_limit = 3500,
@@ -2106,20 +2111,6 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
- .btc_fwinfo_buf = 1024,
-
- .fcxbtcrpt_ver = 1,
- .fcxtdma_ver = 1,
- .fcxslots_ver = 1,
- .fcxcysta_ver = 2,
- .fcxstep_ver = 2,
- .fcxnullsta_ver = 1,
- .fcxmreg_ver = 1,
- .fcxgpiodbg_ver = 1,
- .fcxbtver_ver = 1,
- .fcxbtscan_ver = 1,
- .fcxbtafh_ver = 1,
- .fcxbtdevinfo_ver = 1,
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852a_wl_rssi_thres,
@@ -2149,6 +2140,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dcfo_comp_sft = 3,
.imr_info = &rtw8852a_imr_info,
.rrsr_cfgs = &rtw8852a_rrsr_cfgs,
+ .bss_clr_map_reg = R_BSS_CLR_MAP,
.dma_ch_mask = 0,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852a,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index 582ff0d3a9ea..cd6c39b7f802 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -1643,7 +1643,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[IQK]==========IQK strat!!!!!==========\n");
+ "[IQK]==========IQK start!!!!!==========\n");
iqk_info->iqk_times++;
iqk_info->kcount = 0;
iqk_info->version = RTW8852A_IQK_VER;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 0cd8c0c44d19..d835a44a1d0d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -44,6 +44,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+ .bd_ram_table = &rtw89_bd_ram_table_dual,
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index b635ac1d1ca2..ee8dba7e0074 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -1618,6 +1618,7 @@ static void rtw8852b_set_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852b_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
u8 tx_shape_idx,
enum rtw89_phy_idx phy_idx)
{
@@ -1637,7 +1638,6 @@ static void rtw8852b_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
__DECL_DFIR_PARAM(sharp_14,
0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E,
0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A);
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
const u32 *param;
u32 addr;
@@ -1678,7 +1678,7 @@ static void rtw8852b_set_tx_shape(struct rtw89_dev *rtwdev,
u8 tx_shape_ofdm = rtw89_8852b_tx_shape[band][RTW89_RS_OFDM][regd];
if (band == RTW89_BAND_2G)
- rtw8852b_bb_set_tx_shape_dfir(rtwdev, tx_shape_cck, phy_idx);
+ rtw8852b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx);
rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG,
tx_shape_ofdm);
@@ -1720,7 +1720,7 @@ void rtw8852b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
pw_ofst = max_t(s8, pw_ofst - 3, -16);
reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst);
}
static int
@@ -2423,13 +2423,14 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.btc_update_bt_cnt = rtw8852b_btc_update_bt_cnt,
.btc_wl_s1_standby = rtw8852b_btc_wl_s1_standby,
.btc_set_wl_rx_gain = rtw8852b_btc_set_wl_rx_gain,
- .btc_set_policy = rtw89_btc_set_policy,
+ .btc_set_policy = rtw89_btc_set_policy_v1,
};
const struct rtw89_chip_info rtw8852b_chip_info = {
.chip_id = RTL8852B,
.ops = &rtw8852b_chip_ops,
.fw_name = "rtw89/rtw8852b_fw.bin",
+ .try_ce_fw = true,
.fifo_size = 196608,
.dle_scc_rsvd_size = 98304,
.max_amsdu_limit = 3500,
@@ -2437,6 +2438,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.rsvd_ple_ofst = 0x2f800,
.hfc_param_ini = rtw8852b_hfc_param_ini_pcie,
.dle_mem = rtw8852b_dle_mem_pcie,
+ .wde_qempty_acq_num = 4,
+ .wde_qempty_mgq_sel = 4,
.rf_base_addr = {0xe000, 0xf000},
.pwr_on_seq = NULL,
.pwr_off_seq = NULL,
@@ -2483,20 +2486,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.btcx_desired = 0x5,
.scbd = 0x1,
.mailbox = 0x1,
- .btc_fwinfo_buf = 1024,
-
- .fcxbtcrpt_ver = 1,
- .fcxtdma_ver = 1,
- .fcxslots_ver = 1,
- .fcxcysta_ver = 2,
- .fcxstep_ver = 2,
- .fcxnullsta_ver = 1,
- .fcxmreg_ver = 1,
- .fcxgpiodbg_ver = 1,
- .fcxbtver_ver = 1,
- .fcxbtscan_ver = 1,
- .fcxbtafh_ver = 1,
- .fcxbtdevinfo_ver = 1,
+
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852b_wl_rssi_thres,
.bt_rssi_thres = rtw89_btc_8852b_bt_rssi_thres,
@@ -2525,6 +2515,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dcfo_comp_sft = 3,
.imr_info = &rtw8852b_imr_info,
.rrsr_cfgs = &rtw8852b_rrsr_cfgs,
+ .bss_clr_map_reg = R_BSS_CLR_MAP_V1,
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index 0ef2ca8efeb0..ecf39d2d9f81 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+ .bd_ram_table = &rtw89_bd_ram_table_single,
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index a87482cc25f5..d2dde21d3daf 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1968,6 +1968,7 @@ static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
u8 tx_shape_idx,
enum rtw89_phy_idx phy_idx)
{
@@ -1991,7 +1992,6 @@ static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
__DECL_DFIR_ADDR(filter,
0x45BC, 0x45CC, 0x45D0, 0x45D4, 0x45D8, 0x45C0,
0x45C4, 0x45C8);
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
const u32 *param;
int i;
@@ -2032,7 +2032,7 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
u8 tx_shape_ofdm = rtw89_8852c_tx_shape[band][RTW89_RS_OFDM][regd];
if (band == RTW89_BAND_2G)
- rtw8852c_bb_set_tx_shape_dfir(rtwdev, tx_shape_cck, phy_idx);
+ rtw8852c_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx);
rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev,
(enum rtw89_mac_idx)phy_idx,
@@ -2857,6 +2857,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.chip_id = RTL8852C,
.ops = &rtw8852c_chip_ops,
.fw_name = "rtw89/rtw8852c_fw.bin",
+ .try_ce_fw = false,
.fifo_size = 458752,
.dle_scc_rsvd_size = 0,
.max_amsdu_limit = 8000,
@@ -2915,20 +2916,6 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
- .btc_fwinfo_buf = 1280,
-
- .fcxbtcrpt_ver = 4,
- .fcxtdma_ver = 3,
- .fcxslots_ver = 1,
- .fcxcysta_ver = 3,
- .fcxstep_ver = 3,
- .fcxnullsta_ver = 2,
- .fcxmreg_ver = 1,
- .fcxgpiodbg_ver = 1,
- .fcxbtver_ver = 1,
- .fcxbtscan_ver = 1,
- .fcxbtafh_ver = 1,
- .fcxbtdevinfo_ver = 1,
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852c_wl_rssi_thres,
@@ -2959,6 +2946,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dcfo_comp_sft = 5,
.imr_info = &rtw8852c_imr_info,
.rrsr_cfgs = &rtw8852c_rrsr_cfgs,
+ .bss_clr_map_reg = R_BSS_CLR_MAP,
.dma_ch_mask = 0,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852c,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 60cd676fe22c..2c0bc3a4ab3b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -11,6 +11,15 @@
#include "rtw8852c_rfk_table.h"
#include "rtw8852c_table.h"
+struct rxck_def {
+ u32 ctl;
+ u32 en;
+ u32 bw0;
+ u32 bw1;
+ u32 mul;
+ u32 lp;
+};
+
#define _TSSI_DE_MASK GENMASK(21, 12)
static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858};
static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860};
@@ -26,7 +35,7 @@ static const u32 rtw8852c_backup_bb_regs[] = {
};
static const u32 rtw8852c_backup_rf_regs[] = {
- 0xdf, 0x8f, 0x97, 0xa3, 0x5, 0x10005
+ 0xdf, 0x5f, 0x8f, 0x97, 0xa3, 0x5, 0x10005
};
#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs)
@@ -59,6 +68,13 @@ static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = {
{0x81a8, 0x81c4, 0x81c8, 0x81e8},
};
+static const u8 _dck_addr_bs[RF_PATH_NUM_8852C] = {0x0, 0x10};
+static const u8 _dck_addr[RF_PATH_NUM_8852C] = {0xc, 0x1c};
+
+static const struct rxck_def _ck480M = {0x8, 0x2, 0x3, 0xf, 0x0, 0x9};
+static const struct rxck_def _ck960M = {0x8, 0x2, 0x2, 0x8, 0x0, 0x9};
+static const struct rxck_def _ck1920M = {0x8, 0x0, 0x2, 0x4, 0x6, 0x9};
+
static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
@@ -337,7 +353,7 @@ static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
(dack->dadck_d[path][index] << 14);
addr = 0xc210 + offset;
rtw89_phy_write32(rtwdev, addr, val32);
- rtw89_phy_write32_set(rtwdev, addr, BIT(1));
+ rtw89_phy_write32_set(rtwdev, addr, BIT(0));
}
static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
@@ -437,6 +453,8 @@ static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
enum adc_ck ck)
{
+ const struct rxck_def *def;
+
rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
if (!force)
@@ -444,6 +462,26 @@ static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
+
+ switch (ck) {
+ case ADC_480M:
+ def = &_ck480M;
+ break;
+ case ADC_960M:
+ def = &_ck960M;
+ break;
+ case ADC_1920M:
+ default:
+ def = &_ck1920M;
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_CTL, def->ctl);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_EN, def->en);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, def->bw0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, def->bw1);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK | (path << 8), B_DRCK_MUL, def->mul);
+ rtw89_phy_write32_mask(rtwdev, R_ADCMOD | (path << 8), B_ADCMOD_LP, def->lp);
}
static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0)
@@ -627,8 +665,6 @@ static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
rtw8852c_rxck_force(rtwdev, path, true, ADC_480M);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x3);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xf);
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
break;
@@ -636,8 +672,6 @@ static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
rtw8852c_rxck_force(rtwdev, path, true, ADC_960M);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xd);
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
break;
@@ -645,8 +679,6 @@ static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
break;
@@ -1410,8 +1442,6 @@ static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1);
rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
@@ -1536,6 +1566,155 @@ static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool forc
}
}
+static void _rx_dck_value_rewrite(struct rtw89_dev *rtwdev, u8 path, u8 addr,
+ u8 val_i, u8 val_q)
+{
+ u32 ofst_val;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] rewrite val_i = 0x%x, val_q = 0x%x\n", val_i, val_q);
+
+ /* val_i and val_q are 7 bits, and target is 6 bits. */
+ ofst_val = u32_encode_bits(val_q >> 1, RR_LUTWD0_MB) |
+ u32_encode_bits(val_i >> 1, RR_LUTWD0_LB);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, MASKBYTE0, addr);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] Final val_i = 0x%x, val_q = 0x%x\n",
+ u32_get_bits(ofst_val, RR_LUTWD0_LB) << 1,
+ u32_get_bits(ofst_val, RR_LUTWD0_MB) << 1);
+}
+
+static bool _rx_dck_rek_check(struct rtw89_dev *rtwdev, u8 path)
+{
+ u8 i_even_bs, q_even_bs;
+ u8 i_odd_bs, q_odd_bs;
+ u8 i_even, q_even;
+ u8 i_odd, q_odd;
+ const u8 th = 10;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
+ i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
+ _dck_addr_bs[i], i_even_bs, q_even_bs);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
+ i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
+ _dck_addr[i], i_even, q_even);
+
+ if (abs(i_even_bs - i_even) > th || abs(q_even_bs - q_even) > th)
+ return true;
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
+ i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
+ _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
+ i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
+ _dck_addr[i] + 1, i_odd, q_odd);
+
+ if (abs(i_odd_bs - i_odd) > th || abs(q_odd_bs - q_odd) > th)
+ return true;
+ }
+
+ return false;
+}
+
+static void _rx_dck_fix_if_need(struct rtw89_dev *rtwdev, u8 path, u8 addr,
+ u8 val_i_bs, u8 val_q_bs, u8 val_i, u8 val_q)
+{
+ const u8 th = 10;
+
+ if ((abs(val_i_bs - val_i) < th) && (abs(val_q_bs - val_q) <= th)) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] offset check PASS!!\n");
+ return;
+ }
+
+ if (abs(val_i_bs - val_i) > th) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] val_i over TH (0x%x / 0x%x)\n", val_i_bs, val_i);
+ val_i = val_i_bs;
+ }
+
+ if (abs(val_q_bs - val_q) > th) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] val_q over TH (0x%x / 0x%x)\n", val_q_bs, val_q);
+ val_q = val_q_bs;
+ }
+
+ _rx_dck_value_rewrite(rtwdev, path, addr, val_i, val_q);
+}
+
+static void _rx_dck_recover(struct rtw89_dev *rtwdev, u8 path)
+{
+ u8 i_even_bs, q_even_bs;
+ u8 i_odd_bs, q_odd_bs;
+ u8 i_even, q_even;
+ u8 i_odd, q_odd;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] ===> recovery\n");
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
+ i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
+ i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
+ _dck_addr_bs[i], i_even_bs, q_even_bs);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
+ i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
+ _dck_addr[i], i_even, q_even);
+ _rx_dck_fix_if_need(rtwdev, path, _dck_addr[i],
+ i_even_bs, q_even_bs, i_even, q_even);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
+ _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
+ i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
+ _dck_addr[i] + 1, i_odd, q_odd);
+ _rx_dck_fix_if_need(rtwdev, path, _dck_addr[i] + 1,
+ i_odd_bs, q_odd_bs, i_odd, q_odd);
+ }
+}
+
static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
{
int ret;
@@ -1573,11 +1752,42 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 pat
}
}
+static
+u8 _rx_dck_channel_calc(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan)
+{
+ u8 target_ch = 0;
+
+ if (chan->band_type == RTW89_BAND_5G) {
+ if (chan->channel >= 36 && chan->channel <= 64) {
+ target_ch = 100;
+ } else if (chan->channel >= 100 && chan->channel <= 144) {
+ target_ch = chan->channel + 32;
+ if (target_ch > 144)
+ target_ch = chan->channel + 33;
+ } else if (chan->channel >= 149 && chan->channel <= 177) {
+ target_ch = chan->channel - 33;
+ }
+ } else if (chan->band_type == RTW89_BAND_6G) {
+ if (chan->channel >= 1 && chan->channel <= 125)
+ target_ch = chan->channel + 32;
+ else
+ target_ch = chan->channel - 32;
+ } else {
+ target_ch = chan->channel;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] cur_ch / target_ch = %d / %d\n",
+ chan->channel, target_ch);
+
+ return target_ch;
+}
+
#define RTW8852C_RF_REL_VERSION 34
-#define RTW8852C_DPK_VER 0x10
+#define RTW8852C_DPK_VER 0xf
#define RTW8852C_DPK_TH_AVG_NUM 4
#define RTW8852C_DPK_RF_PATH 2
-#define RTW8852C_DPK_KIP_REG_NUM 5
+#define RTW8852C_DPK_KIP_REG_NUM 7
#define RTW8852C_DPK_RXSRAM_DBG 0
enum rtw8852c_dpk_id {
@@ -1614,6 +1824,12 @@ enum dpk_agc_step {
DPK_AGC_STEP_SET_TX_GAIN,
};
+enum dpk_pas_result {
+ DPK_PAS_NOR,
+ DPK_PAS_GT,
+ DPK_PAS_LT,
+};
+
static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path, bool is_bybb)
{
@@ -1730,8 +1946,6 @@ static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
/*4. Set ADC clk*/
rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
B_P0_NRBW_DBG, 0x1);
rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
@@ -1872,12 +2086,11 @@ static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
0x50101 | BIT(rtwdev->dbcc_en));
rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
- if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) {
+ if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161)
rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
- rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
- } else {
- rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
- }
+
+ rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+ rtw89_write_rf(rtwdev, path, RR_TXAC, RR_TXAC_IQG, 0x8);
rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
@@ -2024,9 +2237,10 @@ static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
return _dpk_gainloss_read(rtwdev);
}
-static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
+static enum dpk_pas_result _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
{
u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
+ u32 val1_sqrt_sum, val2_sqrt_sum;
u8 i;
rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
@@ -2057,15 +2271,25 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
}
}
- if (val1_i * val1_i + val1_q * val1_q >= (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
- return true;
+ val1_sqrt_sum = val1_i * val1_i + val1_q * val1_q;
+ val2_sqrt_sum = val2_i * val2_i + val2_q * val2_q;
+
+ if (val1_sqrt_sum < val2_sqrt_sum)
+ return DPK_PAS_LT;
+ else if (val1_sqrt_sum >= val2_sqrt_sum * 8 / 5)
+ return DPK_PAS_GT;
else
- return false;
+ return DPK_PAS_NOR;
}
static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx)
{
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+ _dpk_kip_control_rfc(rtwdev, path, true);
+
_dpk_one_shot(rtwdev, phy, path, D_RXAGC);
return _dpk_sync_check(rtwdev, path, kidx);
@@ -2103,6 +2327,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0;
u8 tmp_rxbb;
u8 goout = 0, agc_cnt = 0;
+ enum dpk_pas_result pas;
u16 dgain = 0;
bool is_fail = false;
int limit = 200;
@@ -2138,9 +2363,13 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
case DPK_AGC_STEP_GAIN_LOSS_IDX:
tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx);
+ pas = _dpk_pas_read(rtwdev, true);
- if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
- tmp_gl_idx >= 7)
+ if (pas == DPK_PAS_LT && tmp_gl_idx > 0)
+ step = DPK_AGC_STEP_GL_LT_CRITERION;
+ else if (pas == DPK_PAS_GT && tmp_gl_idx == 0)
+ step = DPK_AGC_STEP_GL_GT_CRITERION;
+ else if (tmp_gl_idx >= 7)
step = DPK_AGC_STEP_GL_GT_CRITERION;
else if (tmp_gl_idx == 0)
step = DPK_AGC_STEP_GL_LT_CRITERION;
@@ -2467,12 +2696,14 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
enum rtw89_phy_idx phy, u8 kpath)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8};
+ static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8};
u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR];
u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {};
u8 path;
bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false};
+ static_assert(ARRAY_SIZE(kip_reg) == RTW8852C_DPK_KIP_REG_NUM);
+
if (dpk->is_dpk_reload_en) {
for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
if (!(kpath & BIT(path)))
@@ -3875,11 +4106,14 @@ void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
#define RXDCK_VER_8852C 0xe
-void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool is_afe, u8 retry_limit)
{
struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
u8 path, kpath;
u32 rf_reg5;
+ bool is_fail;
+ u8 rek_cnt;
kpath = _kpath(rtwdev, phy);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -3896,7 +4130,27 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
B_P0_TSSI_TRK_EN, 0x1);
rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
- _set_rx_dck(rtwdev, phy, path, is_afe);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_LO_SEL, rtwdev->dbcc_en);
+
+ for (rek_cnt = 0; rek_cnt < retry_limit; rek_cnt++) {
+ _set_rx_dck(rtwdev, phy, path, is_afe);
+
+ /* To reduce IO of dck_rek_check(), the last try is seen
+ * as failure always, and then do recovery procedure.
+ */
+ if (rek_cnt == retry_limit - 1) {
+ _rx_dck_recover(rtwdev, path);
+ break;
+ }
+
+ is_fail = _rx_dck_rek_check(rtwdev, path);
+ if (!is_fail)
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] rek_cnt[%d]=%d",
+ path, rek_cnt);
+
rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
@@ -3906,15 +4160,31 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
}
}
-#define RTW8852C_RX_DCK_TH 8
+void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+{
+ _rx_dck(rtwdev, phy, is_afe, 1);
+}
+
+#define RTW8852C_RX_DCK_TH 12
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 dck_channel;
u8 cur_thermal;
+ u32 tx_en;
int delta;
int path;
+ if (chan->band_type == RTW89_BAND_2G)
+ return;
+
+ if (rtwdev->scanning)
+ return;
+
for (path = 0; path < RF_PATH_NUM_8852C; path++) {
cur_thermal =
ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
@@ -3924,11 +4194,28 @@ void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
"[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n",
path, cur_thermal, delta);
- if (delta >= RTW8852C_RX_DCK_TH) {
- rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
- return;
- }
+ if (delta >= RTW8852C_RX_DCK_TH)
+ goto trigger_rx_dck;
}
+
+ return;
+
+trigger_rx_dck:
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+
+ for (path = 0; path < RF_PATH_NUM_8852C; path++) {
+ dck_channel = _rx_dck_channel_calc(rtwdev, chan);
+ _ctrl_ch(rtwdev, RTW89_PHY_0, dck_channel, chan->band_type);
+ }
+
+ _rx_dck(rtwdev, RTW89_PHY_0, false, 20);
+
+ for (path = 0; path < RF_PATH_NUM_8852C; path++)
+ _ctrl_ch(rtwdev, RTW89_PHY_0, chan->channel, chan->band_type);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 35901f64d17d..80490a5437df 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -53,6 +53,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
+ .bd_ram_table = &rtw89_bd_ram_table_dual,
.ltr_set = rtw89_pci_ltr_set_v1,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index c1a4bc1c64d1..61db7189fdab 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -611,6 +611,7 @@ bottom:
ser_reset_mac_binding(rtwdev);
rtw89_core_stop(rtwdev);
rtw89_entity_init(rtwdev);
+ rtw89_fw_release_general_pkt_list(rtwdev, false);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
}
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 9d4c6b6fa125..98eb9607cd21 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -75,7 +75,9 @@
#define RTW89_TXWD_INFO0_DATA_BW GENMASK(29, 28)
#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25)
#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16)
+#define RTW89_TXWD_INFO0_DATA_ER BIT(15)
#define RTW89_TXWD_INFO0_DISDATAFB BIT(10)
+#define RTW89_TXWD_INFO0_DATA_BW_ER BIT(8)
#define RTW89_TXWD_INFO0_MULTIPORT_ID GENMASK(6, 4)
/* TX WD INFO DWORD 1 */
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index b2b826b2e09a..c78ee2ab732c 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -490,21 +490,6 @@ static int rtw89_wow_check_fw_status(struct rtw89_dev *rtwdev, bool wow_enable)
return ret;
}
-static void rtw89_wow_release_pkt_list(struct rtw89_dev *rtwdev)
-{
- struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
- struct list_head *pkt_list = &rtw_wow->pkt_list;
- struct rtw89_pktofld_info *info, *tmp;
-
- list_for_each_entry_safe(info, tmp, pkt_list, list) {
- rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
- rtw89_core_release_bit_map(rtwdev->pkt_offload,
- info->id);
- list_del(&info->list);
- kfree(info);
- }
-}
-
static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
{
enum rtw89_fw_type fw_type = wow ? RTW89_FW_WOWLAN : RTW89_FW_NORMAL;
@@ -561,6 +546,11 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
}
if (is_conn) {
+ ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif, rtwsta->mac_id);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c general packet\n");
+ return ret;
+ }
rtw89_phy_ra_assoc(rtwdev, wow_sta);
rtw89_phy_set_bss_color(rtwdev, wow_vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, wow_vif);
@@ -708,8 +698,6 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
goto out;
}
- rtw89_wow_release_pkt_list(rtwdev);
-
ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n");
@@ -722,6 +710,8 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
goto out;
}
+ rtw89_fw_release_general_pkt_list(rtwdev, true);
+
ret = rtw89_wow_check_fw_status(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to check disable fw ready\n");
@@ -744,6 +734,8 @@ static int rtw89_wow_enable(struct rtw89_dev *rtwdev)
goto out;
}
+ rtw89_fw_release_general_pkt_list(rtwdev, true);
+
ret = rtw89_wow_swap_fw(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "wow: failed to swap to wow fw\n");
diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
index 8a3d86897ea8..45ac9371f262 100644
--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
+++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
@@ -160,6 +160,7 @@ int rsi_coex_attach(struct rsi_common *common)
rsi_coex_scheduler_thread,
"Coex-Tx-Thread")) {
rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
+ kfree(coex_cb);
return -EINVAL;
}
return 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index c7460fbba014..d4489b943873 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -894,7 +894,7 @@ static int rsi_load_9113_firmware(struct rsi_hw *adapter)
struct ta_metadata *metadata_p;
int status;
- status = bl_cmd(adapter, CONFIG_AUTO_READ_MODE, CMD_PASS,
+ status = bl_cmd(adapter, AUTO_READ_MODE, CMD_PASS,
"AUTO_READ_CMD");
if (status < 0)
return status;
@@ -984,7 +984,7 @@ fw_upgrade:
}
rsi_dbg(ERR_ZONE, "Firmware upgrade failed\n");
- status = bl_cmd(adapter, CONFIG_AUTO_READ_MODE, CMD_PASS,
+ status = bl_cmd(adapter, AUTO_READ_MODE, CMD_PASS,
"AUTO_READ_MODE");
if (status)
goto fail;
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index 5b07262a9740..479b1b0b57a6 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -69,7 +69,7 @@
#define EOF_REACHED 'E'
#define CHECK_CRC 'K'
#define POLLING_MODE 'P'
-#define CONFIG_AUTO_READ_MODE 'R'
+#define AUTO_READ_MODE 'R'
#define JUMP_TO_ZERO_PC 'J'
#define FW_LOADING_SUCCESSFUL 'S'
#define LOADING_INITIATED '1'
diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index a19cce3a7e6f..5663f197ea69 100644
--- a/drivers/net/wireless/ti/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
@@ -373,7 +373,7 @@ int wl1251_hw_init(struct wl1251 *wl)
if (ret < 0)
goto out_free_data_path;
- /* Beacons and boradcast settings */
+ /* Beacons and broadcast settings */
ret = wl1251_hw_init_beacon_broadcast(wl);
if (ret < 0)
goto out_free_data_path;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 1b532e00a56f..7fb2f9513476 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1328,7 +1328,7 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
} else {
++dev->stats.tx_packets;
dev->stats.tx_bytes += skb->len;
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
if (this->tx_buffer_cnt < 2)
netif_stop_queue(dev);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_rf.h b/drivers/net/wireless/zydas/zd1211rw/zd_rf.h
index 8bfec9e75125..9ca69df3d288 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf.h
@@ -85,9 +85,6 @@ static inline int zd_rf_should_patch_cck_gain(struct zd_rf *rf)
return rf->patch_cck_gain;
}
-int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel);
-int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel);
-
/* Functions for individual RF chips */
int zd_rf_init_rf2959(struct zd_rf *rf);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 12b074286df9..47d54d8ea59d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1741,6 +1741,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
* negotiate with the backend regarding supported features.
*/
netdev->features |= netdev->hw_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
netdev->ethtool_ops = &xennet_ethtool_ops;
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index d6100a787d39..1955c0ec209e 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -14,6 +14,7 @@
#include <linux/blk-mq.h>
#include <crypto/hash.h>
#include <net/busy_poll.h>
+#include <trace/events/sock.h>
#include "nvme.h"
#include "fabrics.h"
@@ -905,6 +906,8 @@ static void nvme_tcp_data_ready(struct sock *sk)
{
struct nvme_tcp_queue *queue;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (likely(queue && queue->rd_enabled) &&
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index c5759eb503d0..66e8f9fd0ca7 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -14,6 +14,7 @@
#include <linux/inet.h>
#include <linux/llist.h>
#include <crypto/hash.h>
+#include <trace/events/sock.h>
#include "nvmet.h"
@@ -1469,6 +1470,8 @@ static void nvmet_tcp_data_ready(struct sock *sk)
{
struct nvmet_tcp_queue *queue;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (likely(queue))
@@ -1666,6 +1669,8 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
{
struct nvmet_tcp_port *port;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
port = sk->sk_user_data;
if (!port)
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index 08f4cf0ad9e3..61530167efe4 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -48,6 +48,29 @@ static void tmr_cnt_write(struct ptp_qoriq *ptp_qoriq, u64 ns)
ptp_qoriq->write(&regs->ctrl_regs->tmr_cnt_h, hi);
}
+static u64 tmr_offset_read(struct ptp_qoriq *ptp_qoriq)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 lo, hi;
+ u64 ns;
+
+ lo = ptp_qoriq->read(&regs->ctrl_regs->tmroff_l);
+ hi = ptp_qoriq->read(&regs->ctrl_regs->tmroff_h);
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ return ns;
+}
+
+static void tmr_offset_write(struct ptp_qoriq *ptp_qoriq, u64 delta_ns)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 lo = delta_ns & 0xffffffff;
+ u32 hi = delta_ns >> 32;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmroff_l, lo);
+ ptp_qoriq->write(&regs->ctrl_regs->tmroff_h, hi);
+}
+
/* Caller must hold ptp_qoriq->lock. */
static void set_alarm(struct ptp_qoriq *ptp_qoriq)
{
@@ -55,7 +78,9 @@ static void set_alarm(struct ptp_qoriq *ptp_qoriq)
u64 ns;
u32 lo, hi;
- ns = tmr_cnt_read(ptp_qoriq) + 1500000000ULL;
+ ns = tmr_cnt_read(ptp_qoriq) + tmr_offset_read(ptp_qoriq)
+ + 1500000000ULL;
+
ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
ns -= ptp_qoriq->tclk_period;
hi = ns >> 32;
@@ -207,15 +232,24 @@ EXPORT_SYMBOL_GPL(ptp_qoriq_adjfine);
int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- s64 now;
- unsigned long flags;
struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ s64 now, curr_delta;
+ unsigned long flags;
spin_lock_irqsave(&ptp_qoriq->lock, flags);
- now = tmr_cnt_read(ptp_qoriq);
- now += delta;
- tmr_cnt_write(ptp_qoriq, now);
+ /* On LS1021A, eTSEC2 and eTSEC3 do not take into account the TMR_OFF
+ * adjustment
+ */
+ if (ptp_qoriq->etsec) {
+ now = tmr_cnt_read(ptp_qoriq);
+ now += delta;
+ tmr_cnt_write(ptp_qoriq, now);
+ } else {
+ curr_delta = tmr_offset_read(ptp_qoriq);
+ curr_delta += delta;
+ tmr_offset_write(ptp_qoriq, curr_delta);
+ }
set_fipers(ptp_qoriq);
spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
@@ -232,7 +266,7 @@ int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
spin_lock_irqsave(&ptp_qoriq->lock, flags);
- ns = tmr_cnt_read(ptp_qoriq);
+ ns = tmr_cnt_read(ptp_qoriq) + tmr_offset_read(ptp_qoriq);
spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
@@ -253,6 +287,7 @@ int ptp_qoriq_settime(struct ptp_clock_info *ptp,
spin_lock_irqsave(&ptp_qoriq->lock, flags);
+ tmr_offset_write(ptp_qoriq, 0);
tmr_cnt_write(ptp_qoriq, ns);
set_fipers(ptp_qoriq);
@@ -488,6 +523,7 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
/* The eTSEC uses differnt memory map with DPAA/ENETC */
if (of_device_is_compatible(node, "fsl,etsec-ptp")) {
+ ptp_qoriq->etsec = true;
ptp_qoriq->regs.ctrl_regs = base + ETSEC_CTRL_REGS_OFFSET;
ptp_qoriq->regs.alarm_regs = base + ETSEC_ALARM_REGS_OFFSET;
ptp_qoriq->regs.fiper_regs = base + ETSEC_FIPER_REGS_OFFSET;
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index dfb84bb03d32..90ec477386a8 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -370,7 +370,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
CTCM_FUNTAIL, dev->name, len);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
- goto again;
+ goto again;
}
if (len > ch->max_bufsize) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
@@ -378,7 +378,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
- goto again;
+ goto again;
}
/*
@@ -403,7 +403,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
*((__u16 *)skb->data) = len;
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
- goto again;
+ goto again;
}
if (block_len > 2) {
*((__u16 *)skb->data) = block_len - 2;
@@ -1006,7 +1006,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
use gptr as mpc indicator */
if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
ctcm_chx_restart(fi, event, arg);
- goto done;
+ goto done;
}
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
@@ -1024,7 +1024,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
CTCM_FUNTAIL, ch->id);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
ctcm_chx_restart(fi, event, arg);
- goto done;
+ goto done;
}
fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
@@ -1251,12 +1251,12 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
spin_unlock(&ch->collect_lock);
fsm_newstate(fi, CTC_STATE_TXIDLE);
- goto done;
+ goto done;
}
if (ctcm_checkalloc_buffer(ch)) {
spin_unlock(&ch->collect_lock);
- goto done;
+ goto done;
}
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
@@ -1389,7 +1389,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): TRANS_SKB = NULL",
CTCM_FUNTAIL, dev->name);
- goto again;
+ goto again;
}
if (len < TH_HEADER_LENGTH) {
@@ -1409,7 +1409,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
"%s(%s): skb allocation failed",
CTCM_FUNTAIL, dev->name);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
- goto again;
+ goto again;
}
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_RESET:
@@ -1441,9 +1441,9 @@ again:
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
ch->ccw[1].count = ch->max_bufsize;
- if (do_debug_ccw)
+ if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[0],
- sizeof(struct ccw1) * 3);
+ sizeof(struct ccw1) * 3);
dolock = !in_hardirq();
if (dolock)
spin_lock_irqsave(
@@ -1562,7 +1562,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
if (rc != 0) {
fsm_newstate(fi, CTC_STATE_RXINIT);
ctcm_ccw_check_rc(ch, rc, "initial RX");
- goto done;
+ goto done;
}
break;
default:
@@ -1677,10 +1677,10 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
fsm_deltimer(&grp->timer);
- goto done;
+ goto done;
}
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
case MPCG_STATE_XID2INITX:
/* XID2 was received before ATTN Busy for second
channel.Send yside xid for second channel.
@@ -1768,7 +1768,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
/* give the previous IO time to complete */
fsm_addtimer(&wch->sweep_timer,
200, CTC_EVENT_RSWEEP_TIMER, wch);
- goto done;
+ goto done;
}
skb = skb_dequeue(&wch->sweep_queue);
@@ -1780,7 +1780,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
ctcm_clear_busy_do(dev);
dev_kfree_skb_any(skb);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
} else {
refcount_inc(&skb->users);
skb_queue_tail(&wch->io_queue, skb);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index bdfab9ea0046..28db69d91f17 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -494,7 +494,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->collect_len += l;
}
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
- goto done;
+ goto done;
}
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
/*
@@ -685,7 +685,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->collect_len += skb->len;
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
- goto done;
+ goto done;
}
/*
@@ -885,7 +885,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
"%s(%s): NULL sk_buff passed",
CTCM_FUNTAIL, dev->name);
priv->stats.tx_dropped++;
- goto done;
+ goto done;
}
if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
@@ -908,7 +908,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
}
newskb->protocol = skb->protocol;
skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
@@ -931,7 +931,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
- goto done;
+ goto done;
}
if (ctcm_test_and_set_busy(dev)) {
@@ -943,7 +943,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
}
netif_trans_update(dev);
@@ -957,7 +957,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_carrier_errors++;
ctcm_clear_busy(dev);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
}
ctcm_clear_busy(dev);
done:
@@ -1421,7 +1421,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
"%s (%s) already in list, using old entry",
__func__, (*c)->id);
- goto free_return;
+ goto free_return;
}
spin_lock_init(&ch->collect_lock);
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 8ac213a55141..b8a226c6e1a9 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -481,7 +481,7 @@ void ctc_mpc_establish_connectivity(int port_num,
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer);
- goto done;
+ goto done;
}
if ((wch->in_mpcgroup) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
@@ -495,7 +495,7 @@ void ctc_mpc_establish_connectivity(int port_num,
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer);
- goto done;
+ goto done;
}
break;
case MPCG_STATE_XID0IOWAIT:
@@ -896,8 +896,9 @@ void mpc_group_ready(unsigned long adev)
grp->estconnfunc(grp->port_num, 0,
grp->group_max_buflen);
grp->estconnfunc = NULL;
- } else if (grp->allochanfunc)
+ } else if (grp->allochanfunc) {
grp->allochanfunc(grp->port_num, grp->group_max_buflen);
+ }
grp->send_qllc_disc = 1;
grp->changed_side = 0;
@@ -1109,7 +1110,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
- goto done;
+ goto done;
}
skb_reset_mac_header(pskb);
new_len = curr_pdu->pdu_offset;
@@ -1132,7 +1133,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
}
skb_put_data(skb, pskb->data, new_len);
@@ -1543,7 +1544,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): xid = NULL",
CTCM_FUNTAIL, ch->id);
- goto done;
+ goto done;
}
CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
@@ -1556,7 +1557,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): r/w channel pairing mismatch",
CTCM_FUNTAIL, ch->id);
- goto done;
+ goto done;
}
if (xid->xid2_dlc_type == XID2_READ_SIDE) {
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 38fe90c2597d..70c5bbda0fea 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/ism.h>
#include <net/smc.h>
#include <asm/pci_insn.h>
@@ -15,7 +16,6 @@
*/
#define ISM_DMB_WORD_OFFSET 1
#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32)
-#define ISM_NR_DMBS 1920
#define ISM_IDENT_MASK 0x00FFFF
#define ISM_REG_SBA 0x1
@@ -177,7 +177,7 @@ struct ism_eq_header {
struct ism_eq {
struct ism_eq_header header;
- struct smcd_event entry[15];
+ struct ism_event entry[15];
};
struct ism_sba {
@@ -189,21 +189,6 @@ struct ism_sba {
u16 dmbe_mask[ISM_NR_DMBS];
};
-struct ism_dev {
- spinlock_t lock;
- struct pci_dev *pdev;
- struct smcd_dev *smcd;
-
- struct ism_sba *sba;
- dma_addr_t sba_dma_addr;
- DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
-
- struct ism_eq *ieq;
- dma_addr_t ieq_dma_addr;
-
- int ieq_idx;
-};
-
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index dfd401d9e362..eb7e13486087 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -15,9 +15,6 @@
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/processor.h>
-#include <net/smc.h>
-
-#include <asm/debug.h>
#include "ism.h"
@@ -34,6 +31,84 @@ static const struct pci_device_id ism_device_table[] = {
MODULE_DEVICE_TABLE(pci, ism_device_table);
static debug_info_t *ism_debug_info;
+static const struct smcd_ops ism_ops;
+
+#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
+static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
+ /* a list for fast mapping */
+static u8 max_client;
+static DEFINE_SPINLOCK(clients_lock);
+struct ism_dev_list {
+ struct list_head list;
+ struct mutex mutex; /* protects ism device list */
+};
+
+static struct ism_dev_list ism_dev_list = {
+ .list = LIST_HEAD_INIT(ism_dev_list.list),
+ .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
+};
+
+int ism_register_client(struct ism_client *client)
+{
+ struct ism_dev *ism;
+ unsigned long flags;
+ int i, rc = -ENOSPC;
+
+ mutex_lock(&ism_dev_list.mutex);
+ spin_lock_irqsave(&clients_lock, flags);
+ for (i = 0; i < MAX_CLIENTS; ++i) {
+ if (!clients[i]) {
+ clients[i] = client;
+ client->id = i;
+ if (i == max_client)
+ max_client++;
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&clients_lock, flags);
+ if (i < MAX_CLIENTS) {
+ /* initialize with all devices that we got so far */
+ list_for_each_entry(ism, &ism_dev_list.list, list) {
+ ism->priv[i] = NULL;
+ client->add(ism);
+ }
+ }
+ mutex_unlock(&ism_dev_list.mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ism_register_client);
+
+int ism_unregister_client(struct ism_client *client)
+{
+ struct ism_dev *ism;
+ unsigned long flags;
+ int rc = 0;
+
+ mutex_lock(&ism_dev_list.mutex);
+ spin_lock_irqsave(&clients_lock, flags);
+ clients[client->id] = NULL;
+ if (client->id + 1 == max_client)
+ max_client--;
+ spin_unlock_irqrestore(&clients_lock, flags);
+ list_for_each_entry(ism, &ism_dev_list.list, list) {
+ for (int i = 0; i < ISM_NR_DMBS; ++i) {
+ if (ism->sba_client_arr[i] == client->id) {
+ pr_err("%s: attempt to unregister client '%s'"
+ "with registered dmb(s)\n", __func__,
+ client->name);
+ rc = -EBUSY;
+ goto out;
+ }
+ }
+ }
+out:
+ mutex_unlock(&ism_dev_list.mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ism_unregister_client);
static int ism_cmd(struct ism_dev *ism, void *cmd)
{
@@ -193,15 +268,14 @@ static int ism_read_local_gid(struct ism_dev *ism)
if (ret)
goto out;
- ism->smcd->local_gid = cmd.response.gid;
+ ism->local_gid = cmd.response.gid;
out:
return ret;
}
-static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
u32 vid)
{
- struct ism_dev *ism = smcd->priv;
union ism_query_rgid cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -215,14 +289,14 @@ static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
return ism_cmd(ism, &cmd);
}
-static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
dmb->cpu_addr, dmb->dma_addr);
}
-static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
unsigned long bit;
@@ -251,9 +325,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
return dmb->cpu_addr ? 0 : -ENOMEM;
}
-static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
+ struct ism_client *client)
{
- struct ism_dev *ism = smcd->priv;
union ism_reg_dmb cmd;
int ret;
@@ -278,13 +352,14 @@ static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
goto out;
}
dmb->dmb_tok = cmd.response.dmb_tok;
+ ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ism_register_dmb);
-static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
- struct ism_dev *ism = smcd->priv;
union ism_unreg_dmb cmd;
int ret;
@@ -294,6 +369,8 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
cmd.request.dmb_tok = dmb->dmb_tok;
+ ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
+
ret = ism_cmd(ism, &cmd);
if (ret && ret != ISM_ERROR)
goto out;
@@ -302,10 +379,10 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ism_unregister_dmb);
-static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
{
- struct ism_dev *ism = smcd->priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -317,9 +394,8 @@ static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
-static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
{
- struct ism_dev *ism = smcd->priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -331,20 +407,9 @@ static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
-static int ism_set_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
-}
-
-static int ism_reset_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
-}
-
-static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
u32 event_code, u64 info)
{
- struct ism_dev *ism = smcd->priv;
union ism_sig_ieq cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -365,10 +430,9 @@ static unsigned int max_bytes(unsigned int start, unsigned int len,
return min(boundary - (start & (boundary - 1)), len);
}
-static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
- bool sf, unsigned int offset, void *data, unsigned int size)
+int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
+ unsigned int offset, void *data, unsigned int size)
{
- struct ism_dev *ism = smcd->priv;
unsigned int bytes;
u64 dmb_req;
int ret;
@@ -389,6 +453,7 @@ static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
return 0;
}
+EXPORT_SYMBOL_GPL(ism_move);
static struct ism_systemeid SYSTEM_EID = {
.seid_string = "IBM-SYSZ-ISMSEID00000000",
@@ -410,15 +475,14 @@ static void ism_create_system_eid(void)
memcpy(&SYSTEM_EID.type, tmp, 4);
}
-static u8 *ism_get_system_eid(void)
+u8 *ism_get_seid(void)
{
return SYSTEM_EID.seid_string;
}
+EXPORT_SYMBOL_GPL(ism_get_seid);
-static u16 ism_get_chid(struct smcd_dev *smcd)
+static u16 ism_get_chid(struct ism_dev *ism)
{
- struct ism_dev *ism = (struct ism_dev *)smcd->priv;
-
if (!ism || !ism->pdev)
return 0;
@@ -427,7 +491,8 @@ static u16 ism_get_chid(struct smcd_dev *smcd)
static void ism_handle_event(struct ism_dev *ism)
{
- struct smcd_event *entry;
+ struct ism_event *entry;
+ int i;
while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
@@ -435,13 +500,18 @@ static void ism_handle_event(struct ism_dev *ism)
entry = &ism->ieq->entry[ism->ieq_idx];
debug_event(ism_debug_info, 2, entry, sizeof(*entry));
- smcd_handle_event(ism->smcd, entry);
+ spin_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i)
+ if (clients[i])
+ clients[i]->handle_event(ism, entry);
+ spin_unlock(&clients_lock);
}
}
static irqreturn_t ism_handle_irq(int irq, void *data)
{
struct ism_dev *ism = data;
+ struct ism_client *clt;
unsigned long bit, end;
unsigned long *bv;
u16 dmbemask;
@@ -461,7 +531,8 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
barrier();
- smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET, dmbemask);
+ clt = clients[ism->sba_client_arr[bit]];
+ clt->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
}
if (ism->sba->e) {
@@ -473,33 +544,40 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static const struct smcd_ops ism_ops = {
- .query_remote_gid = ism_query_rgid,
- .register_dmb = ism_register_dmb,
- .unregister_dmb = ism_unregister_dmb,
- .add_vlan_id = ism_add_vlan_id,
- .del_vlan_id = ism_del_vlan_id,
- .set_vlan_required = ism_set_vlan_required,
- .reset_vlan_required = ism_reset_vlan_required,
- .signal_event = ism_signal_ieq,
- .move_data = ism_move,
- .get_system_eid = ism_get_system_eid,
- .get_chid = ism_get_chid,
-};
+static u64 ism_get_local_gid(struct ism_dev *ism)
+{
+ return ism->local_gid;
+}
+
+static void ism_dev_add_work_func(struct work_struct *work)
+{
+ struct ism_client *client = container_of(work, struct ism_client,
+ add_work);
+
+ client->add(client->tgt_ism);
+ atomic_dec(&client->tgt_ism->add_dev_cnt);
+ wake_up(&client->tgt_ism->waitq);
+}
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
- int ret;
+ unsigned long flags;
+ int i, ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret <= 0)
goto out;
+ ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
+ if (!ism->sba_client_arr)
+ goto free_vectors;
+ memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
+
ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
pci_name(pdev), ism);
if (ret)
- goto free_vectors;
+ goto free_client_arr;
ret = register_sba(ism);
if (ret)
@@ -513,13 +591,31 @@ static int ism_dev_init(struct ism_dev *ism)
if (ret)
goto unreg_ieq;
- if (!ism_add_vlan_id(ism->smcd, ISM_RESERVED_VLANID))
+ if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
/* hardware is V2 capable */
ism_create_system_eid();
- ret = smcd_register_dev(ism->smcd);
- if (ret)
- goto unreg_ieq;
+ init_waitqueue_head(&ism->waitq);
+ atomic_set(&ism->free_clients_cnt, 0);
+ atomic_set(&ism->add_dev_cnt, 0);
+
+ wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
+ spin_lock_irqsave(&clients_lock, flags);
+ for (i = 0; i < max_client; ++i)
+ if (clients[i]) {
+ INIT_WORK(&clients[i]->add_work,
+ ism_dev_add_work_func);
+ clients[i]->tgt_ism = ism;
+ atomic_inc(&ism->add_dev_cnt);
+ schedule_work(&clients[i]->add_work);
+ }
+ spin_unlock_irqrestore(&clients_lock, flags);
+
+ wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
+
+ mutex_lock(&ism_dev_list.mutex);
+ list_add(&ism->list, &ism_dev_list.list);
+ mutex_unlock(&ism_dev_list.mutex);
query_info(ism);
return 0;
@@ -530,6 +626,8 @@ unreg_sba:
unregister_sba(ism);
free_irq:
free_irq(pci_irq_vector(pdev, 0), ism);
+free_client_arr:
+ kfree(ism->sba_client_arr);
free_vectors:
pci_free_irq_vectors(pdev);
out:
@@ -548,6 +646,12 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ism->lock);
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
+ ism->dev.parent = &pdev->dev;
+ device_initialize(&ism->dev);
+ dev_set_name(&ism->dev, dev_name(&pdev->dev));
+ ret = device_add(&ism->dev);
+ if (ret)
+ goto err_dev;
ret = pci_enable_device_mem(pdev);
if (ret)
@@ -565,55 +669,80 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dma_set_max_seg_size(&pdev->dev, SZ_1M);
pci_set_master(pdev);
- ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
- ISM_NR_DMBS);
- if (!ism->smcd) {
- ret = -ENOMEM;
- goto err_resource;
- }
-
- ism->smcd->priv = ism;
ret = ism_dev_init(ism);
if (ret)
- goto err_free;
+ goto err_resource;
return 0;
-err_free:
- smcd_free_dev(ism->smcd);
err_resource:
+ pci_clear_master(pdev);
pci_release_mem_regions(pdev);
err_disable:
pci_disable_device(pdev);
err:
- kfree(ism);
+ device_del(&ism->dev);
+err_dev:
dev_set_drvdata(&pdev->dev, NULL);
+ kfree(ism);
+
return ret;
}
+static void ism_dev_remove_work_func(struct work_struct *work)
+{
+ struct ism_client *client = container_of(work, struct ism_client,
+ remove_work);
+
+ client->remove(client->tgt_ism);
+ atomic_dec(&client->tgt_ism->free_clients_cnt);
+ wake_up(&client->tgt_ism->waitq);
+}
+
+/* Callers must hold ism_dev_list.mutex */
static void ism_dev_exit(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
+ unsigned long flags;
+ int i;
+
+ wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
+ spin_lock_irqsave(&clients_lock, flags);
+ for (i = 0; i < max_client; ++i)
+ if (clients[i]) {
+ INIT_WORK(&clients[i]->remove_work,
+ ism_dev_remove_work_func);
+ clients[i]->tgt_ism = ism;
+ atomic_inc(&ism->free_clients_cnt);
+ schedule_work(&clients[i]->remove_work);
+ }
+ spin_unlock_irqrestore(&clients_lock, flags);
+
+ wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
- smcd_unregister_dev(ism->smcd);
if (SYSTEM_EID.serial_number[0] != '0' ||
SYSTEM_EID.type[0] != '0')
- ism_del_vlan_id(ism->smcd, ISM_RESERVED_VLANID);
+ ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
unregister_ieq(ism);
unregister_sba(ism);
free_irq(pci_irq_vector(pdev, 0), ism);
+ kfree(ism->sba_client_arr);
pci_free_irq_vectors(pdev);
+ list_del_init(&ism->list);
}
static void ism_remove(struct pci_dev *pdev)
{
struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
+ mutex_lock(&ism_dev_list.mutex);
ism_dev_exit(ism);
+ mutex_unlock(&ism_dev_list.mutex);
- smcd_free_dev(ism->smcd);
+ pci_clear_master(pdev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
+ device_del(&ism->dev);
dev_set_drvdata(&pdev->dev, NULL);
kfree(ism);
}
@@ -633,6 +762,8 @@ static int __init ism_init(void)
if (!ism_debug_info)
return -ENODEV;
+ memset(clients, 0, sizeof(clients));
+ max_client = 0;
debug_register_view(ism_debug_info, &debug_hex_ascii_view);
ret = pci_register_driver(&ism_driver);
if (ret)
@@ -643,9 +774,110 @@ static int __init ism_init(void)
static void __exit ism_exit(void)
{
+ struct ism_dev *ism;
+
+ mutex_lock(&ism_dev_list.mutex);
+ list_for_each_entry(ism, &ism_dev_list.list, list) {
+ ism_dev_exit(ism);
+ }
+ mutex_unlock(&ism_dev_list.mutex);
+
pci_unregister_driver(&ism_driver);
debug_unregister(ism_debug_info);
}
module_init(ism_init);
module_exit(ism_exit);
+
+/*************************** SMC-D Implementation *****************************/
+
+#if IS_ENABLED(CONFIG_SMC)
+static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+ u32 vid)
+{
+ return ism_query_rgid(smcd->priv, rgid, vid_valid, vid);
+}
+
+static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
+ struct ism_client *client)
+{
+ return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
+}
+
+static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+ return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
+}
+
+static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+ return ism_add_vlan_id(smcd->priv, vlan_id);
+}
+
+static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+ return ism_del_vlan_id(smcd->priv, vlan_id);
+}
+
+static int smcd_set_vlan_required(struct smcd_dev *smcd)
+{
+ return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
+}
+
+static int smcd_reset_vlan_required(struct smcd_dev *smcd)
+{
+ return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
+}
+
+static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info)
+{
+ return ism_signal_ieq(smcd->priv, rgid, trigger_irq, event_code, info);
+}
+
+static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size)
+{
+ return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
+}
+
+static u64 smcd_get_local_gid(struct smcd_dev *smcd)
+{
+ return ism_get_local_gid(smcd->priv);
+}
+
+static u16 smcd_get_chid(struct smcd_dev *smcd)
+{
+ return ism_get_chid(smcd->priv);
+}
+
+static inline struct device *smcd_get_dev(struct smcd_dev *dev)
+{
+ struct ism_dev *ism = dev->priv;
+
+ return &ism->dev;
+}
+
+static const struct smcd_ops ism_ops = {
+ .query_remote_gid = smcd_query_rgid,
+ .register_dmb = smcd_register_dmb,
+ .unregister_dmb = smcd_unregister_dmb,
+ .add_vlan_id = smcd_add_vlan_id,
+ .del_vlan_id = smcd_del_vlan_id,
+ .set_vlan_required = smcd_set_vlan_required,
+ .reset_vlan_required = smcd_reset_vlan_required,
+ .signal_event = smcd_signal_ieq,
+ .move_data = smcd_move,
+ .get_system_eid = ism_get_seid,
+ .get_local_gid = smcd_get_local_gid,
+ .get_chid = smcd_get_chid,
+ .get_dev = smcd_get_dev,
+};
+
+const struct smcd_ops *ism_get_smcd_ops(void)
+{
+ return &ism_ops;
+}
+EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
+#endif
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8bd9fd51208c..1d5b207c2b9e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2801,9 +2801,11 @@ static void qeth_print_status_message(struct qeth_card *card)
* of the level OSA sets the first character to zero
* */
if (!card->info.mcl_level[0]) {
- sprintf(card->info.mcl_level, "%02x%02x",
- card->info.mcl_level[2],
- card->info.mcl_level[3]);
+ scnprintf(card->info.mcl_level,
+ sizeof(card->info.mcl_level),
+ "%02x%02x",
+ card->info.mcl_level[2],
+ card->info.mcl_level[3]);
break;
}
fallthrough;
@@ -6090,7 +6092,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
if (!debug_level_enabled(id, level))
return;
va_start(args, fmt);
- vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
+ vscnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
debug_text_event(id, level, dbf_txt_buf);
}
@@ -6330,8 +6332,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_dev;
}
- snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
- dev_name(&gdev->dev));
+ scnprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
+ dev_name(&gdev->dev));
card->debug = qeth_get_dbf_entry(dbf_name);
if (!card->debug) {
rc = qeth_add_dbf_entry(card, dbf_name);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index d1adc4b83193..eea93f8f106f 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -23,15 +23,15 @@ static ssize_t qeth_dev_state_show(struct device *dev,
switch (card->state) {
case CARD_STATE_DOWN:
- return sprintf(buf, "DOWN\n");
+ return sysfs_emit(buf, "DOWN\n");
case CARD_STATE_SOFTSETUP:
if (card->dev->flags & IFF_UP)
- return sprintf(buf, "UP (LAN %s)\n",
- netif_carrier_ok(card->dev) ? "ONLINE" :
- "OFFLINE");
- return sprintf(buf, "SOFTSETUP\n");
+ return sysfs_emit(buf, "UP (LAN %s)\n",
+ netif_carrier_ok(card->dev) ?
+ "ONLINE" : "OFFLINE");
+ return sysfs_emit(buf, "SOFTSETUP\n");
default:
- return sprintf(buf, "UNKNOWN\n");
+ return sysfs_emit(buf, "UNKNOWN\n");
}
}
@@ -42,7 +42,7 @@ static ssize_t qeth_dev_chpid_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%02X\n", card->info.chpid);
+ return sysfs_emit(buf, "%02X\n", card->info.chpid);
}
static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
@@ -52,7 +52,7 @@ static ssize_t qeth_dev_if_name_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", netdev_name(card->dev));
+ return sysfs_emit(buf, "%s\n", netdev_name(card->dev));
}
static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
@@ -62,7 +62,7 @@ static ssize_t qeth_dev_card_type_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
+ return sysfs_emit(buf, "%s\n", qeth_get_cardname_short(card));
}
static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
@@ -86,7 +86,7 @@ static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
+ return sysfs_emit(buf, "%s\n", qeth_get_bufsize_str(card));
}
static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
@@ -96,7 +96,7 @@ static ssize_t qeth_dev_portno_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->dev->dev_port);
+ return sysfs_emit(buf, "%i\n", card->dev->dev_port);
}
static ssize_t qeth_dev_portno_store(struct device *dev,
@@ -134,7 +134,7 @@ static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
static ssize_t qeth_dev_portname_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "no portname required\n");
+ return sysfs_emit(buf, "no portname required\n");
}
static ssize_t qeth_dev_portname_store(struct device *dev,
@@ -157,18 +157,18 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_PREC:
- return sprintf(buf, "%s\n", "by precedence");
+ return sysfs_emit(buf, "%s\n", "by precedence");
case QETH_PRIO_Q_ING_TOS:
- return sprintf(buf, "%s\n", "by type of service");
+ return sysfs_emit(buf, "%s\n", "by type of service");
case QETH_PRIO_Q_ING_SKB:
- return sprintf(buf, "%s\n", "by skb-priority");
+ return sysfs_emit(buf, "%s\n", "by skb-priority");
case QETH_PRIO_Q_ING_VLAN:
- return sprintf(buf, "%s\n", "by VLAN headers");
+ return sysfs_emit(buf, "%s\n", "by VLAN headers");
case QETH_PRIO_Q_ING_FIXED:
- return sprintf(buf, "always queue %i\n",
+ return sysfs_emit(buf, "always queue %i\n",
card->qdio.default_out_queue);
default:
- return sprintf(buf, "disabled\n");
+ return sysfs_emit(buf, "disabled\n");
}
}
@@ -242,7 +242,7 @@ static ssize_t qeth_dev_bufcnt_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
+ return sysfs_emit(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
}
static ssize_t qeth_dev_bufcnt_store(struct device *dev,
@@ -298,7 +298,7 @@ static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
static ssize_t qeth_dev_performance_stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "1\n");
+ return sysfs_emit(buf, "1\n");
}
static ssize_t qeth_dev_performance_stats_store(struct device *dev,
@@ -335,7 +335,7 @@ static ssize_t qeth_dev_layer2_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->options.layer);
+ return sysfs_emit(buf, "%i\n", card->options.layer);
}
static ssize_t qeth_dev_layer2_store(struct device *dev,
@@ -470,23 +470,25 @@ static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
int rc = 0;
if (!qeth_card_hw_is_reachable(card))
- return sprintf(buf, "n/a\n");
+ return sysfs_emit(buf, "n/a\n");
rc = qeth_query_switch_attributes(card, &sw_info);
if (rc)
return rc;
if (!sw_info.capabilities)
- rc = sprintf(buf, "unknown");
+ rc = sysfs_emit(buf, "unknown");
if (sw_info.capabilities & QETH_SWITCH_FORW_802_1)
- rc = sprintf(buf, (sw_info.settings & QETH_SWITCH_FORW_802_1 ?
- "[802.1]" : "802.1"));
+ rc = sysfs_emit(buf,
+ (sw_info.settings & QETH_SWITCH_FORW_802_1 ?
+ "[802.1]" : "802.1"));
if (sw_info.capabilities & QETH_SWITCH_FORW_REFL_RELAY)
- rc += sprintf(buf + rc,
- (sw_info.settings & QETH_SWITCH_FORW_REFL_RELAY ?
- " [rr]" : " rr"));
- rc += sprintf(buf + rc, "\n");
+ rc += sysfs_emit_at(buf, rc,
+ (sw_info.settings &
+ QETH_SWITCH_FORW_REFL_RELAY ?
+ " [rr]" : " rr"));
+ rc += sysfs_emit_at(buf, rc, "\n");
return rc;
}
@@ -573,7 +575,7 @@ static ssize_t qeth_dev_blkt_total_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->info.blkt.time_total);
+ return sysfs_emit(buf, "%i\n", card->info.blkt.time_total);
}
static ssize_t qeth_dev_blkt_total_store(struct device *dev,
@@ -593,7 +595,7 @@ static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->info.blkt.inter_packet);
+ return sysfs_emit(buf, "%i\n", card->info.blkt.inter_packet);
}
static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
@@ -613,7 +615,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->info.blkt.inter_packet_jumbo);
+ return sysfs_emit(buf, "%i\n", card->info.blkt.inter_packet_jumbo);
}
static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index e250f49535fa..c1caf7734c3e 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -172,7 +172,7 @@ static void qeth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
qeth_add_stat_strings(&data, prefix, card_stats,
CARD_STATS_LEN);
for (i = 0; i < card->qdio.no_out_queues; i++) {
- snprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i);
+ scnprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i);
qeth_add_stat_strings(&data, prefix, txq_stats,
TXQ_STATS_LEN);
}
@@ -192,8 +192,8 @@ static void qeth_get_drvinfo(struct net_device *dev,
sizeof(info->driver));
strscpy(info->fw_version, card->info.mcl_level,
sizeof(info->fw_version));
- snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
- CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
+ scnprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
+ CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
}
static void qeth_get_channels(struct net_device *dev,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index c6ded3fdd715..9f13ed170a43 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1255,37 +1255,38 @@ static void qeth_bridge_emit_host_event(struct qeth_card *card,
switch (evtype) {
case anev_reg_unreg:
- snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
- (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
- ? "deregister" : "register");
+ scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
+ (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
+ ? "deregister" : "register");
env[i] = str[i]; i++;
if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
- snprintf(str[i], sizeof(str[i]), "VLAN=%d",
- addr_lnid->lnid);
+ scnprintf(str[i], sizeof(str[i]), "VLAN=%d",
+ addr_lnid->lnid);
env[i] = str[i]; i++;
}
if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
- snprintf(str[i], sizeof(str[i]), "MAC=%pM",
- addr_lnid->mac);
+ scnprintf(str[i], sizeof(str[i]), "MAC=%pM",
+ addr_lnid->mac);
env[i] = str[i]; i++;
}
- snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
- token->cssid, token->ssid, token->devnum);
+ scnprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
+ token->cssid, token->ssid, token->devnum);
env[i] = str[i]; i++;
- snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
+ scnprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
env[i] = str[i]; i++;
- snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
- token->chpid);
+ scnprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
+ token->chpid);
env[i] = str[i]; i++;
- snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid);
+ scnprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x",
+ token->chid);
env[i] = str[i]; i++;
break;
case anev_abort:
- snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
+ scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
env[i] = str[i]; i++;
break;
case anev_reset:
- snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
+ scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
env[i] = str[i]; i++;
break;
}
@@ -1314,17 +1315,17 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
NULL
};
- snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
- snprintf(env_role, sizeof(env_role), "ROLE=%s",
- (data->role == QETH_SBP_ROLE_NONE) ? "none" :
- (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
- (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
- "<INVALID>");
- snprintf(env_state, sizeof(env_state), "STATE=%s",
- (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
- (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
- (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
- "<INVALID>");
+ scnprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
+ scnprintf(env_role, sizeof(env_role), "ROLE=%s",
+ (data->role == QETH_SBP_ROLE_NONE) ? "none" :
+ (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
+ (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
+ "<INVALID>");
+ scnprintf(env_state, sizeof(env_state), "STATE=%s",
+ (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
+ (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
+ (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
+ "<INVALID>");
kobject_uevent_env(&data->card->gdev->dev.kobj,
KOBJ_CHANGE, env);
kfree(data);
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index a617351fff57..7f592f912517 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -19,7 +19,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
char *word;
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
mutex_lock(&card->sbp_lock);
if (qeth_card_hw_is_reachable(card) &&
@@ -53,7 +53,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x",
card->options.sbp.role, state);
else
- rc = sprintf(buf, "%s\n", word);
+ rc = sysfs_emit(buf, "%s\n", word);
}
mutex_unlock(&card->sbp_lock);
@@ -66,7 +66,7 @@ static ssize_t qeth_bridge_port_role_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
}
@@ -117,7 +117,7 @@ static ssize_t qeth_bridge_port_state_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
}
@@ -132,11 +132,11 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
int enabled;
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
enabled = card->options.sbp.hostnotification;
- return sprintf(buf, "%d\n", enabled);
+ return sysfs_emit(buf, "%d\n", enabled);
}
static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
@@ -180,7 +180,7 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
char *state;
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
if (card->options.sbp.reflect_promisc) {
if (card->options.sbp.reflect_promisc_primary)
@@ -190,7 +190,7 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
} else
state = "none";
- return sprintf(buf, "%s\n", state);
+ return sysfs_emit(buf, "%s\n", state);
}
static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
@@ -280,10 +280,10 @@ static ssize_t qeth_vnicc_timeout_show(struct device *dev,
rc = qeth_l2_vnicc_get_timeout(card, &timeout);
if (rc == -EBUSY)
- return sprintf(buf, "n/a (BridgePort)\n");
+ return sysfs_emit(buf, "n/a (BridgePort)\n");
if (rc == -EOPNOTSUPP)
- return sprintf(buf, "n/a\n");
- return rc ? rc : sprintf(buf, "%d\n", timeout);
+ return sysfs_emit(buf, "n/a\n");
+ return rc ? rc : sysfs_emit(buf, "%d\n", timeout);
}
/* change timeout setting */
@@ -318,10 +318,10 @@ static ssize_t qeth_vnicc_char_show(struct device *dev,
rc = qeth_l2_vnicc_get_state(card, vnicc, &state);
if (rc == -EBUSY)
- return sprintf(buf, "n/a (BridgePort)\n");
+ return sysfs_emit(buf, "n/a (BridgePort)\n");
if (rc == -EOPNOTSUPP)
- return sprintf(buf, "n/a\n");
- return rc ? rc : sprintf(buf, "%d\n", state);
+ return sysfs_emit(buf, "n/a\n");
+ return rc ? rc : sysfs_emit(buf, "%d\n", state);
}
/* change setting of characteristic */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d8487a10cd55..af4e60d2917e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -19,6 +19,7 @@
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/in.h>
+#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
@@ -46,9 +47,9 @@ int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
char *buf)
{
if (proto == QETH_PROT_IPV4)
- return sprintf(buf, "%pI4", addr);
+ return scnprintf(buf, INET_ADDRSTRLEN, "%pI4", addr);
else
- return sprintf(buf, "%pI6", addr);
+ return scnprintf(buf, INET6_ADDRSTRLEN, "%pI6", addr);
}
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
@@ -158,7 +159,7 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
int rc = 0;
struct qeth_ipaddr *addr;
- char buf[40];
+ char buf[INET6_ADDRSTRLEN];
if (tmp_addr->type == QETH_IP_TYPE_RXIP)
QETH_CARD_TEXT(card, 2, "addrxip");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 1082380b21f8..9f90a860ca2c 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -32,26 +32,26 @@ static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
{
switch (route->type) {
case PRIMARY_ROUTER:
- return sprintf(buf, "%s\n", "primary router");
+ return sysfs_emit(buf, "%s\n", "primary router");
case SECONDARY_ROUTER:
- return sprintf(buf, "%s\n", "secondary router");
+ return sysfs_emit(buf, "%s\n", "secondary router");
case MULTICAST_ROUTER:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
- return sprintf(buf, "%s\n", "multicast router+");
+ return sysfs_emit(buf, "%s\n", "multicast router+");
else
- return sprintf(buf, "%s\n", "multicast router");
+ return sysfs_emit(buf, "%s\n", "multicast router");
case PRIMARY_CONNECTOR:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
- return sprintf(buf, "%s\n", "primary connector+");
+ return sysfs_emit(buf, "%s\n", "primary connector+");
else
- return sprintf(buf, "%s\n", "primary connector");
+ return sysfs_emit(buf, "%s\n", "primary connector");
case SECONDARY_CONNECTOR:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
- return sprintf(buf, "%s\n", "secondary connector+");
+ return sysfs_emit(buf, "%s\n", "secondary connector+");
else
- return sprintf(buf, "%s\n", "secondary connector");
+ return sysfs_emit(buf, "%s\n", "secondary connector");
default:
- return sprintf(buf, "%s\n", "no");
+ return sysfs_emit(buf, "%s\n", "no");
}
}
@@ -138,7 +138,7 @@ static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
+ return sysfs_emit(buf, "%i\n", card->options.sniffer ? 1 : 0);
}
static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
@@ -200,7 +200,7 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
EBCASC(tmp_hsuid, 8);
- return sprintf(buf, "%s\n", tmp_hsuid);
+ return sysfs_emit(buf, "%s\n", tmp_hsuid);
}
static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
@@ -252,8 +252,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
goto out;
}
- snprintf(card->options.hsuid, sizeof(card->options.hsuid),
- "%-8s", tmp);
+ scnprintf(card->options.hsuid, sizeof(card->options.hsuid),
+ "%-8s", tmp);
ASCEBC(card->options.hsuid, 8);
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
@@ -285,7 +285,7 @@ static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", card->ipato.enabled ? 1 : 0);
+ return sysfs_emit(buf, "%u\n", card->ipato.enabled ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
@@ -330,7 +330,7 @@ static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", card->ipato.invert4 ? 1 : 0);
+ return sysfs_emit(buf, "%u\n", card->ipato.invert4 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
@@ -367,35 +367,21 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
- int str_len = 0;
+ char addr_str[INET6_ADDRSTRLEN];
+ int offset = 0;
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
- char addr_str[40];
- int entry_len;
-
if (ipatoe->proto != proto)
continue;
- entry_len = qeth_l3_ipaddr_to_string(proto, ipatoe->addr,
- addr_str);
- if (entry_len < 0)
- continue;
-
- /* Append /%mask to the entry: */
- entry_len += 1 + ((proto == QETH_PROT_IPV4) ? 2 : 3);
- /* Enough room to format %entry\n into null terminated page? */
- if (entry_len + 1 > PAGE_SIZE - str_len - 1)
- break;
-
- entry_len = scnprintf(buf, PAGE_SIZE - str_len,
- "%s/%i\n", addr_str, ipatoe->mask_bits);
- str_len += entry_len;
- buf += entry_len;
+ qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
+ offset += sysfs_emit_at(buf, offset, "%s/%i\n",
+ addr_str, ipatoe->mask_bits);
}
mutex_unlock(&card->ip_lock);
- return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
+ return offset ? offset : sysfs_emit(buf, "\n");
}
static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
@@ -413,7 +399,7 @@ static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
int rc;
/* Expected input pattern: %addr/%mask */
- sep = strnchr(buf, 40, '/');
+ sep = strnchr(buf, INET6_ADDRSTRLEN, '/');
if (!sep)
return -EINVAL;
@@ -501,7 +487,7 @@ static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", card->ipato.invert6 ? 1 : 0);
+ return sysfs_emit(buf, "%u\n", card->ipato.invert6 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
@@ -586,35 +572,22 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
enum qeth_ip_types type)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ char addr_str[INET6_ADDRSTRLEN];
struct qeth_ipaddr *ipaddr;
- int str_len = 0;
+ int offset = 0;
int i;
mutex_lock(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
- char addr_str[40];
- int entry_len;
-
if (ipaddr->proto != proto || ipaddr->type != type)
continue;
- entry_len = qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u,
- addr_str);
- if (entry_len < 0)
- continue;
-
- /* Enough room to format %addr\n into null terminated page? */
- if (entry_len + 1 > PAGE_SIZE - str_len - 1)
- break;
-
- entry_len = scnprintf(buf, PAGE_SIZE - str_len, "%s\n",
- addr_str);
- str_len += entry_len;
- buf += entry_len;
+ qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u, addr_str);
+ offset += sysfs_emit_at(buf, offset, "%s\n", addr_str);
}
mutex_unlock(&card->ip_lock);
- return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
+ return offset ? offset : sysfs_emit(buf, "\n");
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0454d94e8cf0..c76f82fb8b63 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -36,6 +36,7 @@
#include <scsi/scsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include <trace/events/iscsi.h>
+#include <trace/events/sock.h>
#include "iscsi_tcp.h"
@@ -170,6 +171,8 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_conn *conn;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (!conn) {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 25ba20e42825..389a35308be3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <linux/sched/clock.h>
#include <linux/ctype.h>
#include <linux/aer.h>
#include <linux/slab.h>
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 57052726299d..820bdd9f8e46 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <net/sock.h>
#include <linux/workqueue.h>
+#include <trace/events/sock.h>
#include <linux/soc/qcom/qmi.h>
static struct socket *qmi_sock_create(struct qmi_handle *qmi,
@@ -569,6 +570,8 @@ static void qmi_data_ready(struct sock *sk)
{
struct qmi_handle *qmi = sk->sk_user_data;
+ trace_sk_data_ready(sk);
+
/*
* This will be NULL if we receive data while being in
* qmi_handle_release()
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index ff49c8f3fe24..24040c118e49 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <net/sock.h>
+#include <trace/events/sock.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -384,6 +385,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk)
struct iscsit_conn *conn = sk->sk_user_data;
bool rc;
+ trace_sk_data_ready(sk);
pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn);
write_lock_bh(&sk->sk_callback_lock);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a2b374372363..1f3b89c885cc 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -51,8 +51,7 @@ struct vhost_vsock {
struct hlist_node hash;
struct vhost_work send_pkt_work;
- spinlock_t send_pkt_list_lock;
- struct list_head send_pkt_list; /* host->guest pending packets */
+ struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
atomic_t queued_replies;
@@ -108,40 +107,31 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
vhost_disable_notify(&vsock->dev, vq);
do {
- struct virtio_vsock_pkt *pkt;
+ struct virtio_vsock_hdr *hdr;
+ size_t iov_len, payload_len;
struct iov_iter iov_iter;
+ u32 flags_to_restore = 0;
+ struct sk_buff *skb;
unsigned out, in;
size_t nbytes;
- size_t iov_len, payload_len;
int head;
- u32 flags_to_restore = 0;
- spin_lock_bh(&vsock->send_pkt_list_lock);
- if (list_empty(&vsock->send_pkt_list)) {
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
+
+ if (!skb) {
vhost_enable_notify(&vsock->dev, vq);
break;
}
- pkt = list_first_entry(&vsock->send_pkt_list,
- struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
&out, &in, NULL, NULL);
if (head < 0) {
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
break;
}
if (head == vq->num) {
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
/* We cannot finish yet if more buffers snuck in while
* re-enabling notify.
*/
@@ -153,26 +143,27 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
}
if (out) {
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
vq_err(vq, "Expected 0 output buffers, got %u\n", out);
break;
}
iov_len = iov_length(&vq->iov[out], in);
- if (iov_len < sizeof(pkt->hdr)) {
- virtio_transport_free_pkt(pkt);
+ if (iov_len < sizeof(*hdr)) {
+ kfree_skb(skb);
vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
break;
}
iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
- payload_len = pkt->len - pkt->off;
+ payload_len = skb->len;
+ hdr = virtio_vsock_hdr(skb);
/* If the packet is greater than the space available in the
* buffer, we split it using multiple buffers.
*/
- if (payload_len > iov_len - sizeof(pkt->hdr)) {
- payload_len = iov_len - sizeof(pkt->hdr);
+ if (payload_len > iov_len - sizeof(*hdr)) {
+ payload_len = iov_len - sizeof(*hdr);
/* As we are copying pieces of large packet's buffer to
* small rx buffers, headers of packets in rx queue are
@@ -185,31 +176,30 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* bits set. After initialized header will be copied to
* rx buffer, these required bits will be restored.
*/
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
- pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
- pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
}
}
}
/* Set the correct length in the header */
- pkt->hdr.len = cpu_to_le32(payload_len);
+ hdr->len = cpu_to_le32(payload_len);
- nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
- if (nbytes != sizeof(pkt->hdr)) {
- virtio_transport_free_pkt(pkt);
+ nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
+ if (nbytes != sizeof(*hdr)) {
+ kfree_skb(skb);
vq_err(vq, "Faulted on copying pkt hdr\n");
break;
}
- nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
- &iov_iter);
+ nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
if (nbytes != payload_len) {
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
vq_err(vq, "Faulted on copying pkt buf\n");
break;
}
@@ -217,31 +207,28 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
/* Deliver to monitoring devices all packets that we
* will transmit.
*/
- virtio_transport_deliver_tap_pkt(pkt);
+ virtio_transport_deliver_tap_pkt(skb);
- vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
+ vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
added = true;
- pkt->off += payload_len;
+ skb_pull(skb, payload_len);
total_len += payload_len;
/* If we didn't send all the payload we can requeue the packet
* to send it with the next available buffer.
*/
- if (pkt->off < pkt->len) {
- pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
+ if (skb->len > 0) {
+ hdr->flags |= cpu_to_le32(flags_to_restore);
- /* We are queueing the same virtio_vsock_pkt to handle
+ /* We are queueing the same skb to handle
* the remaining bytes, and we want to deliver it
* to monitoring devices in the next iteration.
*/
- pkt->tap_delivered = false;
-
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ virtio_vsock_skb_clear_tap_delivered(skb);
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
} else {
- if (pkt->reply) {
+ if (virtio_vsock_skb_reply(skb)) {
int val;
val = atomic_dec_return(&vsock->queued_replies);
@@ -253,7 +240,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
restart_tx = true;
}
- virtio_transport_free_pkt(pkt);
+ consume_skb(skb);
}
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
if (added)
@@ -278,28 +265,26 @@ static void vhost_transport_send_pkt_work(struct vhost_work *work)
}
static int
-vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+vhost_transport_send_pkt(struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vhost_vsock *vsock;
- int len = pkt->len;
+ int len = skb->len;
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
- vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
+ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
if (!vsock) {
rcu_read_unlock();
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
return -ENODEV;
}
- if (pkt->reply)
+ if (virtio_vsock_skb_reply(skb))
atomic_inc(&vsock->queued_replies);
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add_tail(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
+ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
rcu_read_unlock();
@@ -310,10 +295,8 @@ static int
vhost_transport_cancel_pkt(struct vsock_sock *vsk)
{
struct vhost_vsock *vsock;
- struct virtio_vsock_pkt *pkt, *n;
int cnt = 0;
int ret = -ENODEV;
- LIST_HEAD(freeme);
rcu_read_lock();
@@ -322,20 +305,7 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
if (!vsock)
goto out;
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
- if (pkt->vsk != vsk)
- continue;
- list_move(&pkt->list, &freeme);
- }
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
- list_for_each_entry_safe(pkt, n, &freeme, list) {
- if (pkt->reply)
- cnt++;
- list_del(&pkt->list);
- virtio_transport_free_pkt(pkt);
- }
+ cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
if (cnt) {
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
@@ -352,12 +322,14 @@ out:
return ret;
}
-static struct virtio_vsock_pkt *
-vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
+static struct sk_buff *
+vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
unsigned int out, unsigned int in)
{
- struct virtio_vsock_pkt *pkt;
+ struct virtio_vsock_hdr *hdr;
struct iov_iter iov_iter;
+ struct sk_buff *skb;
+ size_t payload_len;
size_t nbytes;
size_t len;
@@ -366,50 +338,48 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
+ len = iov_length(vq->iov, out);
+
+ /* len contains both payload and hdr */
+ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
+ if (!skb)
return NULL;
- len = iov_length(vq->iov, out);
iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
- nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
- if (nbytes != sizeof(pkt->hdr)) {
+ hdr = virtio_vsock_hdr(skb);
+ nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
+ if (nbytes != sizeof(*hdr)) {
vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
- sizeof(pkt->hdr), nbytes);
- kfree(pkt);
+ sizeof(*hdr), nbytes);
+ kfree_skb(skb);
return NULL;
}
- pkt->len = le32_to_cpu(pkt->hdr.len);
+ payload_len = le32_to_cpu(hdr->len);
/* No payload */
- if (!pkt->len)
- return pkt;
+ if (!payload_len)
+ return skb;
- /* The pkt is too big */
- if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
- kfree(pkt);
+ /* The pkt is too big or the length in the header is invalid */
+ if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
+ payload_len + sizeof(*hdr) > len) {
+ kfree_skb(skb);
return NULL;
}
- pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
- if (!pkt->buf) {
- kfree(pkt);
- return NULL;
- }
+ virtio_vsock_skb_rx_put(skb);
- pkt->buf_len = pkt->len;
-
- nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
- if (nbytes != pkt->len) {
- vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
- pkt->len, nbytes);
- virtio_transport_free_pkt(pkt);
+ nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
+ if (nbytes != payload_len) {
+ vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
+ payload_len, nbytes);
+ kfree_skb(skb);
return NULL;
}
- return pkt;
+ return skb;
}
/* Is there space left for replies to rx packets? */
@@ -496,9 +466,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
poll.work);
struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
dev);
- struct virtio_vsock_pkt *pkt;
int head, pkts = 0, total_len = 0;
unsigned int out, in;
+ struct sk_buff *skb;
bool added = false;
mutex_lock(&vq->mutex);
@@ -511,6 +481,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
vhost_disable_notify(&vsock->dev, vq);
do {
+ struct virtio_vsock_hdr *hdr;
+
if (!vhost_vsock_more_replies(vsock)) {
/* Stop tx until the device processes already
* pending replies. Leave tx virtqueue
@@ -532,24 +504,26 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
break;
}
- pkt = vhost_vsock_alloc_pkt(vq, out, in);
- if (!pkt) {
+ skb = vhost_vsock_alloc_skb(vq, out, in);
+ if (!skb) {
vq_err(vq, "Faulted on pkt\n");
continue;
}
- total_len += sizeof(pkt->hdr) + pkt->len;
+ total_len += sizeof(*hdr) + skb->len;
/* Deliver to monitoring devices all received packets */
- virtio_transport_deliver_tap_pkt(pkt);
+ virtio_transport_deliver_tap_pkt(skb);
+
+ hdr = virtio_vsock_hdr(skb);
/* Only accept correctly addressed packets */
- if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
- le64_to_cpu(pkt->hdr.dst_cid) ==
+ if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
+ le64_to_cpu(hdr->dst_cid) ==
vhost_transport_get_local_cid())
- virtio_transport_recv_pkt(&vhost_transport, pkt);
+ virtio_transport_recv_pkt(&vhost_transport, skb);
else
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
vhost_add_used(vq, head, 0);
added = true;
@@ -693,8 +667,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
VHOST_VSOCK_WEIGHT, true, NULL);
file->private_data = vsock;
- spin_lock_init(&vsock->send_pkt_list_lock);
- INIT_LIST_HEAD(&vsock->send_pkt_list);
+ skb_queue_head_init(&vsock->send_pkt_queue);
vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
return 0;
@@ -760,16 +733,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
vhost_vsock_flush(vsock);
vhost_dev_stop(&vsock->dev);
- spin_lock_bh(&vsock->send_pkt_list_lock);
- while (!list_empty(&vsock->send_pkt_list)) {
- struct virtio_vsock_pkt *pkt;
-
- pkt = list_first_entry(&vsock->send_pkt_list,
- struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
- virtio_transport_free_pkt(pkt);
- }
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
vhost_dev_cleanup(&vsock->dev);
kfree(vsock->dev.vqs);
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 7a464a541d91..dcb00938de61 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -14,6 +14,7 @@
#include <net/inet_common.h>
#include <net/inet_connection_sock.h>
#include <net/request_sock.h>
+#include <trace/events/sock.h>
#include <xen/events.h>
#include <xen/grant_table.h>
@@ -301,6 +302,8 @@ static void pvcalls_sk_data_ready(struct sock *sock)
struct sock_mapping *map = sock->sk_user_data;
struct pvcalls_ioworker *iow;
+ trace_sk_data_ready(sock);
+
if (map == NULL)
return;
@@ -589,6 +592,8 @@ static void pvcalls_pass_sk_data_ready(struct sock *sock)
unsigned long flags;
int notify;
+ trace_sk_data_ready(sock);
+
if (mappass == NULL)
return;
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 61cd6c2628fa..a9b14f81d655 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -54,6 +54,7 @@
#include <net/ipv6.h>
#include <trace/events/dlm.h>
+#include <trace/events/sock.h>
#include "dlm_internal.h"
#include "lowcomms.h"
@@ -502,6 +503,8 @@ static void lowcomms_data_ready(struct sock *sk)
{
struct connection *con = sock2con(sk);
+ trace_sk_data_ready(sk);
+
set_bit(CF_RECV_INTR, &con->flags);
lowcomms_queue_rwork(con);
}
@@ -533,6 +536,8 @@ static void lowcomms_state_change(struct sock *sk)
static void lowcomms_listen_data_ready(struct sock *sk)
{
+ trace_sk_data_ready(sk);
+
queue_work(io_workqueue, &listen_con.rwork);
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index a07b24d170f2..aecbd712a00c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -46,6 +46,7 @@
#include <linux/net.h>
#include <linux/export.h>
#include <net/tcp.h>
+#include <trace/events/sock.h>
#include <linux/uaccess.h>
@@ -585,6 +586,8 @@ static void o2net_data_ready(struct sock *sk)
void (*ready)(struct sock *sk);
struct o2net_sock_container *sc;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
sc = sk->sk_user_data;
if (sc) {
@@ -1931,6 +1934,8 @@ static void o2net_listen_data_ready(struct sock *sk)
{
void (*ready)(struct sock *sk);
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
ready = sk->sk_user_data;
if (ready == NULL) { /* check for teardown race */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 5e6a876e17ba..4b12dad5a8a4 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -950,6 +950,12 @@ static inline bool acpi_driver_match_device(struct device *dev,
return false;
}
+static inline bool acpi_check_dsm(acpi_handle handle, const guid_t *guid,
+ u64 rev, u64 funcs)
+{
+ return false;
+}
+
static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
const guid_t *guid,
u64 rev, u64 func,
@@ -958,6 +964,15 @@ static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
return NULL;
}
+static inline union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle,
+ const guid_t *guid,
+ u64 rev, u64 func,
+ union acpi_object *argv4,
+ acpi_object_type type)
+{
+ return NULL;
+}
+
static inline int acpi_device_uevent_modalias(struct device *dev,
struct kobj_uevent_env *env)
{
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index d91af50ac58d..c15221dcb75e 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -1,21 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright (c) 2013-2022, Intel Corporation. */
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the drivers for all devices starting from our 40G product line
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+ * from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
@@ -29,8 +21,8 @@
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value
- * is of status_code type, defined in the shared type.h.
+ * except RESET_VF, which does not require any response. The returned value
+ * is of virtchnl_status_code type, defined here.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
@@ -122,9 +114,13 @@ enum virtchnl_ops {
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ /* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
@@ -162,19 +158,6 @@ enum virtchnl_ops {
#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum virtchnl_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-
/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
@@ -235,7 +218,9 @@ enum virtchnl_vsi_type {
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
- enum virtchnl_vsi_type vsi_type;
+
+ /* see enum virtchnl_vsi_type */
+ s32 vsi_type;
u16 qset_handle;
u8 default_mac_addr[ETH_ALEN];
};
@@ -247,7 +232,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
-#define VIRTCHNL_VF_OFFLOAD_IWARP BIT(1)
+#define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
+#define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
@@ -324,7 +310,9 @@ struct virtchnl_rxq_info {
u8 rxdid;
u8 pad1[2];
u64 dma_ring_addr;
- enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+
+ /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
+ s32 rx_split_pos;
u32 pad2;
};
@@ -336,6 +324,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
@@ -373,8 +364,13 @@ struct virtchnl_vf_res_request {
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
* PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
*/
struct virtchnl_vector_map {
u16 vsi_id;
@@ -401,6 +397,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
*/
struct virtchnl_queue_select {
u16 vsi_id;
@@ -965,8 +964,12 @@ enum virtchnl_flow_type {
struct virtchnl_filter {
union virtchnl_flow_spec data;
union virtchnl_flow_spec mask;
- enum virtchnl_flow_type flow_type;
- enum virtchnl_action action;
+
+ /* see enum virtchnl_flow_type */
+ s32 flow_type;
+
+ /* see enum virtchnl_action */
+ s32 action;
u32 action_meta;
u8 field_flags;
u8 pad[3];
@@ -994,7 +997,8 @@ enum virtchnl_event_codes {
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
- enum virtchnl_event_codes event;
+ /* see enum virtchnl_event_codes */
+ s32 event;
union {
/* If the PF driver does not support the new speed reporting
* capabilities then use link_event else use link_event_adv to
@@ -1007,6 +1011,7 @@ struct virtchnl_pf_event {
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
+ u8 pad[3];
} link_event;
struct {
/* link_speed provided in Mbps */
@@ -1016,39 +1021,41 @@ struct virtchnl_pf_event {
} link_event_adv;
} event_data;
- int severity;
+ s32 severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
-/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
+/* used to specify if a ceq_idx or aeq_idx is invalid */
+#define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
+/* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
+ * VF uses this message to request PF to map RDMA vectors to RDMA queues.
+ * The request for this originates from the VF RDMA driver through
+ * a client interface between VF LAN and VF RDMA driver.
* A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
+ * there is a single AEQ per VF RDMA instance in which case
+ * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
+ * idx for ceqs There will never be a case where there will be multiple CEQs
+ * attached to a single vector.
* PF configures interrupt mapping and returns status.
*/
-struct virtchnl_iwarp_qv_info {
+struct virtchnl_rdma_qv_info {
u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
+ u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
+ u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
u8 itr_idx;
u8 pad[3];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
-struct virtchnl_iwarp_qvlist_info {
+struct virtchnl_rdma_qvlist_info {
u32 num_vectors;
- struct virtchnl_iwarp_qv_info qv_info[1];
+ struct virtchnl_rdma_qv_info qv_info[1];
};
-VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_rdma_qvlist_info);
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
@@ -1107,7 +1114,7 @@ enum virtchnl_rss_algorithm {
#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
(((hdr)->type) >> PROTO_HDR_SHIFT)
#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
- ((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
+ ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
(VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
@@ -1203,7 +1210,8 @@ enum virtchnl_proto_hdr_field {
};
struct virtchnl_proto_hdr {
- enum virtchnl_proto_hdr_type type;
+ /* see enum virtchnl_proto_hdr_type */
+ s32 type;
u32 field_selector; /* a bit mask to select field for header type */
u8 buffer[64];
/**
@@ -1233,15 +1241,18 @@ VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
struct virtchnl_rss_cfg {
struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
- enum virtchnl_rss_algorithm rss_algorithm; /* RSS algorithm type */
- u8 reserved[128]; /* reserve for future */
+
+ /* see enum virtchnl_rss_algorithm; rss algorithm type */
+ s32 rss_algorithm;
+ u8 reserved[128]; /* reserve for future */
};
VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
/* action configuration for FDIR */
struct virtchnl_filter_action {
- enum virtchnl_action type;
+ /* see enum virtchnl_action type */
+ s32 type;
union {
/* used for queue and qgroup action */
struct {
@@ -1283,7 +1294,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
/* Status returned to VF after VF requests FDIR commands
* VIRTCHNL_FDIR_SUCCESS
* VF FDIR related request is successfully done by PF
- * The request can be OP_ADD/DEL.
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
*
* VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
* OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
@@ -1304,6 +1315,10 @@ VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
* VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
* OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
* for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
*/
enum virtchnl_fdir_prgm_status {
VIRTCHNL_FDIR_SUCCESS = 0,
@@ -1313,6 +1328,7 @@ enum virtchnl_fdir_prgm_status {
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+ VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
};
/* VIRTCHNL_OP_ADD_FDIR_FILTER
@@ -1329,7 +1345,9 @@ struct virtchnl_fdir_add {
u16 validate_only; /* INPUT */
u32 flow_id; /* OUTPUT */
struct virtchnl_fdir_rule rule_cfg; /* INPUT */
- enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
@@ -1342,7 +1360,9 @@ struct virtchnl_fdir_del {
u16 vsi_id; /* INPUT */
u16 pad;
u32 flow_id; /* INPUT */
- enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
@@ -1361,7 +1381,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len = 0;
+ u32 valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
@@ -1436,7 +1456,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
- case VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_RDMA:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
@@ -1446,19 +1466,16 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
else
err_msg_format = true;
break;
- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_rdma_qvlist_info);
if (msglen >= valid_len) {
- struct virtchnl_iwarp_qvlist_info *qv =
- (struct virtchnl_iwarp_qvlist_info *)msg;
- if (qv->num_vectors == 0) {
- err_msg_format = true;
- break;
- }
+ struct virtchnl_rdma_qvlist_info *qv =
+ (struct virtchnl_rdma_qvlist_info *)msg;
+
valid_len += ((qv->num_vectors - 1) *
- sizeof(struct virtchnl_iwarp_qv_info));
+ sizeof(struct virtchnl_rdma_qv_info));
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
@@ -1502,8 +1519,6 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_CHANNELS:
break;
case VIRTCHNL_OP_ADD_CLOUD_FILTER:
- valid_len = sizeof(struct virtchnl_filter);
- break;
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index c9be1657f03d..ebfa12f69501 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -115,6 +115,32 @@
((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
})
+#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
+
+/**
+ * FIELD_PREP_CONST() - prepare a constant bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_CONST() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ *
+ * Unlike FIELD_PREP() this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version, and non-constant masks cannot be used.
+ */
+#define FIELD_PREP_CONST(_mask, _val) \
+ ( \
+ /* mask must be non-zero */ \
+ BUILD_BUG_ON_ZERO((_mask) == 0) + \
+ /* check if value fits */ \
+ BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \
+ /* check if mask is contiguous */ \
+ __BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) + \
+ /* and create the value */ \
+ (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) \
+ )
+
/**
* FIELD_GET() - extract a bitfield element
* @_mask: shifted mask defining the field's length and position
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 634d37a599fa..520b238abd5a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -28,6 +28,7 @@
#include <linux/btf.h>
#include <linux/rcupdate_trace.h>
#include <linux/static_call.h>
+#include <linux/memcontrol.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -180,6 +181,10 @@ enum btf_field_type {
BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
BPF_LIST_HEAD = (1 << 4),
BPF_LIST_NODE = (1 << 5),
+ BPF_RB_ROOT = (1 << 6),
+ BPF_RB_NODE = (1 << 7),
+ BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
+ BPF_RB_NODE | BPF_RB_ROOT,
};
struct btf_field_kptr {
@@ -189,7 +194,7 @@ struct btf_field_kptr {
u32 btf_id;
};
-struct btf_field_list_head {
+struct btf_field_graph_root {
struct btf *btf;
u32 value_btf_id;
u32 node_offset;
@@ -201,7 +206,7 @@ struct btf_field {
enum btf_field_type type;
union {
struct btf_field_kptr kptr;
- struct btf_field_list_head list_head;
+ struct btf_field_graph_root graph_root;
};
};
@@ -283,6 +288,10 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
return "bpf_list_head";
case BPF_LIST_NODE:
return "bpf_list_node";
+ case BPF_RB_ROOT:
+ return "bpf_rb_root";
+ case BPF_RB_NODE:
+ return "bpf_rb_node";
default:
WARN_ON_ONCE(1);
return "unknown";
@@ -303,6 +312,10 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_list_head);
case BPF_LIST_NODE:
return sizeof(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return sizeof(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return sizeof(struct bpf_rb_node);
default:
WARN_ON_ONCE(1);
return 0;
@@ -323,6 +336,10 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_list_head);
case BPF_LIST_NODE:
return __alignof__(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return __alignof__(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return __alignof__(struct bpf_rb_node);
default:
WARN_ON_ONCE(1);
return 0;
@@ -346,6 +363,13 @@ static inline void bpf_obj_init(const struct btf_field_offs *foffs, void *obj)
memset(obj + foffs->field_off[i], 0, foffs->field_sz[i]);
}
+/* 'dst' must be a temporary buffer and should not point to memory that is being
+ * used in parallel by a bpf program or bpf syscall, otherwise the access from
+ * the bpf program or bpf syscall may be corrupted by the reinitialization,
+ * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
+ * allocator, it is still possible for 'dst' to be used in parallel by a bpf
+ * program or bpf syscall.
+ */
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
bpf_obj_init(map->field_offs, dst);
@@ -433,6 +457,9 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
void bpf_timer_cancel_and_free(void *timer);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock);
+void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
+ struct bpf_spin_lock *spin_lock);
+
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
@@ -575,6 +602,11 @@ enum bpf_type_flag {
/* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
+ /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
+ * Currently only valid for linked-list and rbtree nodes.
+ */
+ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
+
__BPF_TYPE_FLAG_MAX,
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
@@ -899,8 +931,12 @@ enum bpf_cgroup_storage_type {
/* The argument is a structure. */
#define BTF_FMODEL_STRUCT_ARG BIT(0)
+/* The argument is signed. */
+#define BTF_FMODEL_SIGNED_ARG BIT(1)
+
struct btf_func_model {
u8 ret_size;
+ u8 ret_flags;
u8 nr_args;
u8 arg_size[MAX_BPF_FUNC_ARGS];
u8 arg_flags[MAX_BPF_FUNC_ARGS];
@@ -939,7 +975,13 @@ struct btf_func_model {
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86.
*/
-#define BPF_MAX_TRAMP_LINKS 38
+enum {
+#if defined(__s390x__)
+ BPF_MAX_TRAMP_LINKS = 27,
+#else
+ BPF_MAX_TRAMP_LINKS = 38,
+#endif
+};
struct bpf_tramp_links {
struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
@@ -1261,7 +1303,8 @@ struct bpf_prog_aux {
enum bpf_prog_type saved_dst_prog_type;
enum bpf_attach_type saved_dst_attach_type;
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
- bool offload_requested;
+ bool dev_bound; /* Program is bound to the netdev. */
+ bool offload_requested; /* Program is bound and offloaded to the netdev. */
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
bool func_proto_unreliable;
bool sleepable;
@@ -1421,7 +1464,8 @@ struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
int (*check_member)(const struct btf_type *t,
- const struct btf_member *member);
+ const struct btf_member *member,
+ const struct bpf_prog *prog);
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
@@ -1472,6 +1516,7 @@ struct bpf_dummy_ops {
int (*test_1)(struct bpf_dummy_ops_state *cb);
int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
char a3, unsigned long a4);
+ int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
};
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
@@ -1523,9 +1568,9 @@ struct bpf_array {
u32 index_mask;
struct bpf_array_aux *aux;
union {
- char value[0] __aligned(8);
- void *ptrs[0] __aligned(8);
- void __percpu *pptrs[0] __aligned(8);
+ DECLARE_FLEX_ARRAY(char, value) __aligned(8);
+ DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
+ DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
};
};
@@ -1833,7 +1878,7 @@ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_free_id(struct bpf_prog *prog);
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+void bpf_map_free_id(struct bpf_map *map);
struct btf_field *btf_record_find(const struct btf_record *rec,
u32 offset, enum btf_field_type type);
@@ -1873,6 +1918,8 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
+void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
+ gfp_t flags);
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags);
#else
@@ -1889,6 +1936,12 @@ bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
return kzalloc(size, flags);
}
+static inline void *
+bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
+{
+ return kvcalloc(n, size, flags);
+}
+
static inline void __percpu *
bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
gfp_t flags)
@@ -2186,6 +2239,14 @@ struct bpf_core_ctx {
const struct btf *btf;
};
+bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off);
+
+bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
+ const struct btf *reg_btf, u32 reg_id,
+ const struct btf *arg_btf, u32 arg_id);
+
int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
int relo_idx, void *insn);
@@ -2451,7 +2512,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog);
-void bpf_prog_offload_destroy(struct bpf_prog *prog);
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
struct bpf_prog *prog);
@@ -2479,14 +2540,26 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
void unpriv_ebpf_notify(int new_state);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
-int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux);
+void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
+void bpf_dev_bound_netdev_unregister(struct net_device *dev);
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
{
+ return aux->dev_bound;
+}
+
+static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
+{
return aux->offload_requested;
}
-static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
{
return unlikely(map->ops == &bpf_map_offload_ops);
}
@@ -2507,18 +2580,50 @@ void sock_map_unhash(struct sock *sk);
void sock_map_destroy(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
#else
-static inline int bpf_prog_offload_init(struct bpf_prog *prog,
- union bpf_attr *attr)
+static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux)
{
return -EOPNOTSUPP;
}
-static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
+static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
+ u32 func_id)
+{
+ return NULL;
+}
+
+static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
+ union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
+{
+}
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
{
return false;
}
-static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
{
return false;
}
@@ -2795,10 +2900,18 @@ struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
#define MAX_BPRINTF_VARARGS 12
+#define MAX_BPRINTF_BUF 1024
+
+struct bpf_bprintf_data {
+ u32 *bin_args;
+ char *buf;
+ bool get_bin_args;
+ bool get_buf;
+};
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
- u32 **bin_buf, u32 num_args);
-void bpf_bprintf_cleanup(void);
+ u32 num_args, struct bpf_bprintf_data *data);
+void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
/* the implementation of the opaque uapi struct bpf_dynptr */
struct bpf_dynptr_kern {
@@ -2852,4 +2965,11 @@ static inline bool type_is_alloc(u32 type)
return type & MEM_ALLOC;
}
+static inline gfp_t bpf_memcg_flags(gfp_t flags)
+{
+ if (memcg_bpf_enabled())
+ return flags | __GFP_ACCOUNT;
+ return flags;
+}
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 53d175cbaa02..cf1bb1cf4a7b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -43,6 +43,22 @@ enum bpf_reg_liveness {
REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
};
+/* For every reg representing a map value or allocated object pointer,
+ * we consider the tuple of (ptr, id) for them to be unique in verifier
+ * context and conside them to not alias each other for the purposes of
+ * tracking lock state.
+ */
+struct bpf_active_lock {
+ /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
+ * there's no active lock held, and other fields have no
+ * meaning. If non-NULL, it indicates that a lock is held and
+ * id member has the reg->id of the register which can be >= 0.
+ */
+ void *ptr;
+ /* This will be reg->id */
+ u32 id;
+};
+
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
@@ -70,7 +86,10 @@ struct bpf_reg_state {
u32 btf_id;
};
- u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ u32 mem_size;
+ u32 dynptr_id; /* for dynptr slices */
+ };
/* For dynptr stack slots */
struct {
@@ -92,6 +111,26 @@ struct bpf_reg_state {
u32 subprogno; /* for PTR_TO_FUNC */
};
+ /* For scalar types (SCALAR_VALUE), this represents our knowledge of
+ * the actual value.
+ * For pointer types, this represents the variable part of the offset
+ * from the pointed-to object, and is shared with all bpf_reg_states
+ * with the same id as us.
+ */
+ struct tnum var_off;
+ /* Used to determine if any memory access using this register will
+ * result in a bad access.
+ * These refer to the same value as var_off, not necessarily the actual
+ * contents of the register.
+ */
+ s64 smin_value; /* minimum possible (s64)value */
+ s64 smax_value; /* maximum possible (s64)value */
+ u64 umin_value; /* minimum possible (u64)value */
+ u64 umax_value; /* maximum possible (u64)value */
+ s32 s32_min_value; /* minimum possible (s32)value */
+ s32 s32_max_value; /* maximum possible (s32)value */
+ u32 u32_min_value; /* minimum possible (u32)value */
+ u32 u32_max_value; /* maximum possible (u32)value */
/* For PTR_TO_PACKET, used to find other pointers with the same variable
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
@@ -144,26 +183,6 @@ struct bpf_reg_state {
* allowed and has the same effect as bpf_sk_release(sk).
*/
u32 ref_obj_id;
- /* For scalar types (SCALAR_VALUE), this represents our knowledge of
- * the actual value.
- * For pointer types, this represents the variable part of the offset
- * from the pointed-to object, and is shared with all bpf_reg_states
- * with the same id as us.
- */
- struct tnum var_off;
- /* Used to determine if any memory access using this register will
- * result in a bad access.
- * These refer to the same value as var_off, not necessarily the actual
- * contents of the register.
- */
- s64 smin_value; /* minimum possible (s64)value */
- s64 smax_value; /* maximum possible (s64)value */
- u64 umin_value; /* minimum possible (u64)value */
- u64 umax_value; /* maximum possible (u64)value */
- s32 s32_min_value; /* minimum possible (s32)value */
- s32 s32_max_value; /* maximum possible (s32)value */
- u32 u32_min_value; /* minimum possible (u32)value */
- u32 u32_max_value; /* maximum possible (u32)value */
/* parentage chain for liveness checking */
struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
@@ -223,11 +242,6 @@ struct bpf_reference_state {
* exiting a callback function.
*/
int callback_ref;
- /* Mark the reference state to release the registers sharing the same id
- * on bpf_spin_unlock (for nodes that we will lose ownership to but are
- * safe to access inside the critical section).
- */
- bool release_on_unlock;
};
/* state of the program:
@@ -328,21 +342,8 @@ struct bpf_verifier_state {
u32 branches;
u32 insn_idx;
u32 curframe;
- /* For every reg representing a map value or allocated object pointer,
- * we consider the tuple of (ptr, id) for them to be unique in verifier
- * context and conside them to not alias each other for the purposes of
- * tracking lock state.
- */
- struct {
- /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
- * there's no active lock held, and other fields have no
- * meaning. If non-NULL, it indicates that a lock is held and
- * id member has the reg->id of the register which can be >= 0.
- */
- void *ptr;
- /* This will be reg->id */
- u32 id;
- } active_lock;
+
+ struct bpf_active_lock active_lock;
bool speculative;
bool active_rcu_lock;
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 5f628f323442..49e0fe6d8274 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -73,6 +73,14 @@
#define KF_RCU (1 << 7) /* kfunc only takes rcu pointer arguments */
/*
+ * Tag marking a kernel function as a kfunc. This is meant to minimize the
+ * amount of copy-paste that kfunc authors have to include for correctness so
+ * as to avoid issues such as the compiler inlining or eliding either a static
+ * kfunc, or a global kfunc in an LTO build.
+ */
+#define __bpf_kfunc __used noinline
+
+/*
* Return the name of the passed struct, if exists, or halt the build if for
* example the structure gets renamed. In this way, developers have to revisit
* the code using that structure name, and update it accordingly.
@@ -236,6 +244,16 @@ static inline bool btf_type_is_small_int(const struct btf_type *t)
return btf_type_is_int(t) && t->size <= sizeof(u64);
}
+static inline u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_signed_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && (btf_int_encoding(t) & BTF_INT_SIGNED);
+}
+
static inline bool btf_type_is_enum(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
@@ -306,11 +324,6 @@ static inline u8 btf_int_offset(const struct btf_type *t)
return BTF_INT_OFFSET(*(u32 *)(t + 1));
}
-static inline u8 btf_int_encoding(const struct btf_type *t)
-{
- return BTF_INT_ENCODING(*(u32 *)(t + 1));
-}
-
static inline bool btf_type_is_scalar(const struct btf_type *t)
{
return btf_type_is_int(t) || btf_type_is_enum(t);
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index ef0a77173e3c..9b8a9c39614b 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -116,7 +116,7 @@ struct can_tdc_const {
#ifdef CONFIG_CAN_CALC_BITTIMING
int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc);
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
@@ -124,7 +124,7 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
#else /* !CONFIG_CAN_CALC_BITTIMING */
static inline int
can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
netdev_err(dev, "bit-timing calculation not available\n");
return -EINVAL;
@@ -138,10 +138,16 @@ can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
}
#endif /* CONFIG_CAN_CALC_BITTIMING */
+void can_sjw_set_default(struct can_bittiming *bt);
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt);
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack);
/*
* can_bit_time() - Duration of one bit
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d45e5de13721..10c92bd9b807 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -391,6 +391,26 @@ unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
nr_cpumask_bits, cpumask_check(cpu));
}
+/**
+ * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @srcp3: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2,
+ const struct cpumask *srcp3)
+{
+ return find_nth_and_andnot_bit(cpumask_bits(srcp1),
+ cpumask_bits(srcp2),
+ cpumask_bits(srcp3),
+ nr_cpumask_bits, cpumask_check(cpu));
+}
+
#define CPU_BITS_NONE \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
diff --git a/include/linux/dsa/ksz_common.h b/include/linux/dsa/ksz_common.h
new file mode 100644
index 000000000000..576a99ca698d
--- /dev/null
+++ b/include/linux/dsa/ksz_common.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch tag common header
+ *
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_KSZ_COMMON_H_
+#define _NET_DSA_KSZ_COMMON_H_
+
+#include <net/dsa.h>
+
+/* All time stamps from the KSZ consist of 2 bits for seconds and 30 bits for
+ * nanoseconds. This is NOT the same as 32 bits for nanoseconds.
+ */
+#define KSZ_TSTAMP_SEC_MASK GENMASK(31, 30)
+#define KSZ_TSTAMP_NSEC_MASK GENMASK(29, 0)
+
+static inline ktime_t ksz_decode_tstamp(u32 tstamp)
+{
+ u64 ns = FIELD_GET(KSZ_TSTAMP_SEC_MASK, tstamp) * NSEC_PER_SEC +
+ FIELD_GET(KSZ_TSTAMP_NSEC_MASK, tstamp);
+
+ return ns_to_ktime(ns);
+}
+
+struct ksz_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ksz_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*hwtstamp_set_state)(struct dsa_switch *ds, bool on);
+};
+
+struct ksz_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_type;
+ bool update_correction;
+ u32 tstamp;
+};
+
+#define KSZ_SKB_CB(skb) \
+ ((struct ksz_skb_cb *)((skb)->cb))
+
+static inline struct ksz_tagger_data *
+ksz_tagger_data(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
+#endif /* _NET_DSA_KSZ_COMMON_H_ */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 9e0a76fc7de9..2792185dda22 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -15,6 +15,7 @@
#include <linux/bitmap.h>
#include <linux/compat.h>
+#include <linux/if_ether.h>
#include <linux/netlink.h>
#include <uapi/linux/ethtool.h>
@@ -72,12 +73,14 @@ enum {
* @rx_buf_len: Current length of buffers on the rx ring.
* @tcp_data_split: Scatter packet headers and data to separate buffers
* @tx_push: The flag of tx push mode
+ * @rx_push: The flag of rx push mode
* @cqe_size: Size of TX/RX completion queue event
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
u8 tcp_data_split;
u8 tx_push;
+ u8 rx_push;
u32 cqe_size;
};
@@ -86,11 +89,13 @@ struct kernel_ethtool_ringparam {
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
+ * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
+ ETHTOOL_RING_USE_RX_PUSH = BIT(3),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
@@ -106,11 +111,6 @@ enum ethtool_supported_ring_param {
struct net_device;
struct netlink_ext_ack;
-/* Some generic methods drivers may use in their ethtool_ops */
-u32 ethtool_op_get_link(struct net_device *dev);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
-
-
/* Link extended state and substate. */
struct ethtool_link_ext_state_info {
enum ethtool_link_ext_state link_ext_state;
@@ -217,6 +217,9 @@ __ethtool_get_link_ksettings(struct net_device *dev,
struct kernel_ethtool_coalesce {
u8 use_cqe_mode_tx;
u8 use_cqe_mode_rx;
+ u32 tx_aggr_max_bytes;
+ u32 tx_aggr_max_frames;
+ u32 tx_aggr_time_usecs;
};
/**
@@ -260,7 +263,10 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
-#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25)
+#define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(26, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
@@ -288,6 +294,10 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
#define ETHTOOL_COALESCE_USE_CQE \
(ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
+#define ETHTOOL_COALESCE_TX_AGGR \
+ (ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES | \
+ ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES | \
+ ETHTOOL_COALESCE_TX_AGGR_TIME_USECS)
#define ETHTOOL_STAT_NOT_SET (~0ULL)
@@ -301,48 +311,59 @@ static inline void ethtool_stats_init(u64 *stats, unsigned int n)
* via a more targeted API.
*/
struct ethtool_eth_mac_stats {
- u64 FramesTransmittedOK;
- u64 SingleCollisionFrames;
- u64 MultipleCollisionFrames;
- u64 FramesReceivedOK;
- u64 FrameCheckSequenceErrors;
- u64 AlignmentErrors;
- u64 OctetsTransmittedOK;
- u64 FramesWithDeferredXmissions;
- u64 LateCollisions;
- u64 FramesAbortedDueToXSColls;
- u64 FramesLostDueToIntMACXmitError;
- u64 CarrierSenseErrors;
- u64 OctetsReceivedOK;
- u64 FramesLostDueToIntMACRcvError;
- u64 MulticastFramesXmittedOK;
- u64 BroadcastFramesXmittedOK;
- u64 FramesWithExcessiveDeferral;
- u64 MulticastFramesReceivedOK;
- u64 BroadcastFramesReceivedOK;
- u64 InRangeLengthErrors;
- u64 OutOfRangeLengthField;
- u64 FrameTooLongErrors;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 FramesTransmittedOK;
+ u64 SingleCollisionFrames;
+ u64 MultipleCollisionFrames;
+ u64 FramesReceivedOK;
+ u64 FrameCheckSequenceErrors;
+ u64 AlignmentErrors;
+ u64 OctetsTransmittedOK;
+ u64 FramesWithDeferredXmissions;
+ u64 LateCollisions;
+ u64 FramesAbortedDueToXSColls;
+ u64 FramesLostDueToIntMACXmitError;
+ u64 CarrierSenseErrors;
+ u64 OctetsReceivedOK;
+ u64 FramesLostDueToIntMACRcvError;
+ u64 MulticastFramesXmittedOK;
+ u64 BroadcastFramesXmittedOK;
+ u64 FramesWithExcessiveDeferral;
+ u64 MulticastFramesReceivedOK;
+ u64 BroadcastFramesReceivedOK;
+ u64 InRangeLengthErrors;
+ u64 OutOfRangeLengthField;
+ u64 FrameTooLongErrors;
+ );
};
/* Basic IEEE 802.3 PHY statistics (30.3.2.1.*), not otherwise exposed
* via a more targeted API.
*/
struct ethtool_eth_phy_stats {
- u64 SymbolErrorDuringCarrier;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 SymbolErrorDuringCarrier;
+ );
};
/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
* via a more targeted API.
*/
struct ethtool_eth_ctrl_stats {
- u64 MACControlFramesTransmitted;
- u64 MACControlFramesReceived;
- u64 UnsupportedOpcodesReceived;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 MACControlFramesTransmitted;
+ u64 MACControlFramesReceived;
+ u64 UnsupportedOpcodesReceived;
+ );
};
/**
* struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
* @tx_pause_frames: transmitted pause frame count. Reported to user space
* as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES.
*
@@ -356,8 +377,11 @@ struct ethtool_eth_ctrl_stats {
* from the standard.
*/
struct ethtool_pause_stats {
- u64 tx_pause_frames;
- u64 rx_pause_frames;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 tx_pause_frames;
+ u64 rx_pause_frames;
+ );
};
#define ETHTOOL_MAX_LANES 8
@@ -407,6 +431,8 @@ struct ethtool_rmon_hist_range {
/**
* struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
* @undersize_pkts: Equivalent to `etherStatsUndersizePkts` from the RFC.
* @oversize_pkts: Equivalent to `etherStatsOversizePkts` from the RFC.
* @fragments: Equivalent to `etherStatsFragments` from the RFC.
@@ -422,13 +448,16 @@ struct ethtool_rmon_hist_range {
* ranges is left to the driver.
*/
struct ethtool_rmon_stats {
- u64 undersize_pkts;
- u64 oversize_pkts;
- u64 fragments;
- u64 jabbers;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 undersize_pkts;
+ u64 oversize_pkts;
+ u64 fragments;
+ u64 jabbers;
- u64 hist[ETHTOOL_RMON_HIST_MAX];
- u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+ u64 hist[ETHTOOL_RMON_HIST_MAX];
+ u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+ );
};
#define ETH_MODULE_EEPROM_PAGE_LEN 128
@@ -468,6 +497,98 @@ struct ethtool_module_power_mode_params {
};
/**
+ * struct ethtool_mm_state - 802.3 MAC merge layer state
+ * @verify_time:
+ * wait time between verification attempts in ms (according to clause
+ * 30.14.1.6 aMACMergeVerifyTime)
+ * @max_verify_time:
+ * maximum accepted value for the @verify_time variable in set requests
+ * @verify_status:
+ * state of the verification state machine of the MM layer (according to
+ * clause 30.14.1.2 aMACMergeStatusVerify)
+ * @tx_enabled:
+ * set if the MM layer is administratively enabled in the TX direction
+ * (according to clause 30.14.1.3 aMACMergeEnableTx)
+ * @tx_active:
+ * set if the MM layer is enabled in the TX direction, which makes FP
+ * possible (according to 30.14.1.5 aMACMergeStatusTx). This should be
+ * true if MM is enabled, and the verification status is either verified,
+ * or disabled.
+ * @pmac_enabled:
+ * set if the preemptible MAC is powered on and is able to receive
+ * preemptible packets and respond to verification frames.
+ * @verify_enabled:
+ * set if the Verify function of the MM layer (which sends SMD-V
+ * verification requests) is administratively enabled (regardless of
+ * whether it is currently in the ETHTOOL_MM_VERIFY_STATUS_DISABLED state
+ * or not), according to clause 30.14.1.4 aMACMergeVerifyDisableTx (but
+ * using positive rather than negative logic). The device should always
+ * respond to received SMD-V requests as long as @pmac_enabled is set.
+ * @tx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that the link partner
+ * supports receiving, expressed in octets. Compared to the definition
+ * from clause 30.14.1.7 aMACMergeAddFragSize which is expressed in the
+ * range 0 to 3 (requiring a translation to the size in octets according
+ * to the formula 64 * (1 + addFragSize) - 4), a value in a continuous and
+ * unbounded range can be specified here.
+ * @rx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that this device
+ * supports receiving, expressed in octets.
+ */
+struct ethtool_mm_state {
+ u32 verify_time;
+ u32 max_verify_time;
+ enum ethtool_mm_verify_status verify_status;
+ bool tx_enabled;
+ bool tx_active;
+ bool pmac_enabled;
+ bool verify_enabled;
+ u32 tx_min_frag_size;
+ u32 rx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_cfg - 802.3 MAC merge layer configuration
+ * @verify_time: see struct ethtool_mm_state
+ * @verify_enabled: see struct ethtool_mm_state
+ * @tx_enabled: see struct ethtool_mm_state
+ * @pmac_enabled: see struct ethtool_mm_state
+ * @tx_min_frag_size: see struct ethtool_mm_state
+ */
+struct ethtool_mm_cfg {
+ u32 verify_time;
+ bool verify_enabled;
+ bool tx_enabled;
+ bool pmac_enabled;
+ u32 tx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_stats - 802.3 MAC merge layer statistics
+ * @MACMergeFrameAssErrorCount:
+ * received MAC frames with reassembly errors
+ * @MACMergeFrameSmdErrorCount:
+ * received MAC frames/fragments rejected due to unknown or incorrect SMD
+ * @MACMergeFrameAssOkCount:
+ * received MAC frames that were successfully reassembled and passed up
+ * @MACMergeFragCountRx:
+ * number of additional correct SMD-C mPackets received due to preemption
+ * @MACMergeFragCountTx:
+ * number of additional mPackets sent due to preemption
+ * @MACMergeHoldCount:
+ * number of times the MM layer entered the HOLD state, which blocks
+ * transmission of preemptible traffic
+ */
+struct ethtool_mm_stats {
+ u64 MACMergeFrameAssErrorCount;
+ u64 MACMergeFrameSmdErrorCount;
+ u64 MACMergeFrameAssOkCount;
+ u64 MACMergeFragCountRx;
+ u64 MACMergeFragCountTx;
+ u64 MACMergeHoldCount;
+};
+
+/**
* struct ethtool_ops - optional netdev operations
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
@@ -639,6 +760,9 @@ struct ethtool_module_power_mode_params {
* plugged-in.
* @set_module_power_mode: Set the power mode policy for the plug-in module
* used by the network device.
+ * @get_mm: Query the 802.3 MAC Merge layer state.
+ * @set_mm: Set the 802.3 MAC Merge layer parameters.
+ * @get_mm_stats: Query the 802.3 MAC Merge layer statistics.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -777,6 +901,10 @@ struct ethtool_ops {
int (*set_module_power_mode)(struct net_device *dev,
const struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack);
+ int (*get_mm)(struct net_device *dev, struct ethtool_mm_state *state);
+ int (*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+ void (*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats);
};
int ethtool_check_ops(const struct ethtool_ops *ops);
@@ -802,12 +930,17 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
struct phy_device;
struct phy_tdr_config;
+struct phy_plca_cfg;
+struct phy_plca_status;
/**
* struct ethtool_phy_ops - Optional PHY device options
* @get_sset_count: Get number of strings that @get_strings will write.
* @get_strings: Return a set of strings that describe the requested objects
* @get_stats: Return extended statistics about the PHY device.
+ * @get_plca_cfg: Return PLCA configuration.
+ * @set_plca_cfg: Set PLCA configuration.
+ * @get_plca_status: Get PLCA configuration.
* @start_cable_test: Start a cable test
* @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
*
@@ -819,6 +952,13 @@ struct ethtool_phy_ops {
int (*get_strings)(struct phy_device *dev, u8 *data);
int (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
int (*start_cable_test)(struct phy_device *phydev,
struct netlink_ext_ack *extack);
int (*start_cable_test_tdr)(struct phy_device *phydev,
@@ -851,6 +991,51 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
*/
int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+/* Some generic methods drivers may use in their ethtool_ops */
+u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+
+/**
+ * ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment
+ * size expressed as multiplier into (absolute) minimum fragment size
+ * value expressed in octets
+ * @val_add: Value of addFragSize multiplier
+ */
+static inline u32 ethtool_mm_frag_size_add_to_min(u32 val_add)
+{
+ return (ETH_ZLEN + ETH_FCS_LEN) * (1 + val_add) - ETH_FCS_LEN;
+}
+
+/**
+ * ethtool_mm_frag_size_min_to_add - Translate (absolute) minimum fragment size
+ * expressed in octets into (standard) additional fragment size expressed
+ * as multiplier
+ * @val_min: Value of addFragSize variable in octets
+ * @val_add: Pointer where the standard addFragSize value is to be returned
+ * @extack: Netlink extended ack
+ *
+ * Translate a value in octets to one of 0, 1, 2, 3 according to the reverse
+ * application of the 802.3 formula 64 * (1 + addFragSize) - 4. To be called
+ * by drivers which do not support programming the minimum fragment size to a
+ * continuous range. Returns error on other fragment length values.
+ */
+static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+ struct netlink_ext_ack *extack)
+{
+ u32 add_frag_size;
+
+ for (add_frag_size = 0; add_frag_size < 4; add_frag_size++) {
+ if (ethtool_mm_frag_size_add_to_min(add_frag_size) == val_min) {
+ *val_add = add_frag_size;
+ return 0;
+ }
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "minFragSize required to be one of 60, 124, 188 or 252");
+ return -EINVAL;
+}
+
/**
* ethtool_sprintf - Write formatted string to ethtool string data
* @data: Pointer to start of string to update
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index aba348d58ff6..17003b385756 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -29,6 +29,17 @@ int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV);
int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV);
int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last,
u32 step);
+void ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+void ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+void ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
+void ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats);
+
#else
static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
{
@@ -70,5 +81,36 @@ static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
{
return -EOPNOTSUPP;
}
+
+static inline void
+ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ccc4a4a58c72..1727898f1641 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -14,6 +14,7 @@
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/capability.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
diff --git a/include/linux/find.h b/include/linux/find.h
index ccaf61a0f5fd..4647864a5ffd 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -22,6 +22,9 @@ unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long
unsigned long size, unsigned long n);
unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n);
+unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ const unsigned long *addr3, unsigned long size,
+ unsigned long n);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
@@ -255,6 +258,36 @@ unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned lon
return __find_nth_andnot_bit(addr1, addr2, size, n);
}
+/**
+ * find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
+ * excluding those set in 3rd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @addr3: The 3rd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static __always_inline
+unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & (~*addr3) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_andnot_bit(addr1, addr2, addr3, size, n);
+}
+
#ifndef find_first_and_bit
/**
* find_first_and_bit - find the first set bit in both memory regions
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
index 2d9203314865..df25fffdc0ae 100644
--- a/include/linux/fsl/enetc_mdio.h
+++ b/include/linux/fsl/enetc_mdio.h
@@ -37,16 +37,27 @@ struct enetc_mdio_priv {
#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum);
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value);
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad, int regnum);
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad, int regnum,
+ u16 value);
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
#else
-static inline int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static inline int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id,
+ int regnum)
{ return -EINVAL; }
-static inline int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
+static inline int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id,
+ int regnum, u16 value)
+{ return -EINVAL; }
+static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum, u16 value)
{ return -EINVAL; }
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index 01acebe37fab..b301bf7199d3 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -149,6 +149,7 @@ struct ptp_qoriq {
struct device *dev;
bool extts_fifo_support;
bool fiper3_support;
+ bool etsec;
int irq;
int phc_index;
u32 tclk_period; /* nanoseconds */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index d84e0e99f084..500404d85141 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -965,15 +965,33 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#endif /* I2C */
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ return i2c_find_device_by_fwnode(of_fwnode_handle(node));
+}
/* must call put_device() when done with returned i2c_adapter device */
-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_find_adapter_by_fwnode(of_fwnode_handle(node));
+}
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
+}
const struct of_device_id
*i2c_of_match_device(const struct of_device_id *matches,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 80d6308dea06..2463bdd2a382 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1356,6 +1356,7 @@ struct ieee80211_mgmt {
} __packed wnm_timing_msr;
} u;
} __packed action;
+ DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
} u;
} __packed __aligned(2);
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 0303eb84d596..140f61ec0f5f 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -44,6 +44,13 @@
#define IEEE802154_SHORT_ADDR_LEN 2
#define IEEE802154_PAN_ID_LEN 2
+/* Duration in superframe order */
+#define IEEE802154_MAX_SCAN_DURATION 14
+#define IEEE802154_ACTIVE_SCAN_DURATION 15
+/* Superframe duration in slots */
+#define IEEE802154_SUPERFRAME_PERIOD 16
+/* Various periods expressed in symbols */
+#define IEEE802154_SLOT_PERIOD 60
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 78890143f079..b19d3284551f 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -15,6 +15,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/sockptr.h>
#include <uapi/linux/igmp.h>
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 3d9c6750af62..d11c25f5030a 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -35,4 +35,25 @@ static inline unsigned int ip_transport_len(const struct sk_buff *skb)
{
return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
}
+
+static inline unsigned int iph_totlen(const struct sk_buff *skb, const struct iphdr *iph)
+{
+ u32 len = ntohs(iph->tot_len);
+
+ return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
+ len : skb->len - skb_network_offset(skb);
+}
+
+static inline unsigned int skb_ip_totlen(const struct sk_buff *skb)
+{
+ return iph_totlen(skb, ip_hdr(skb));
+}
+
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFFU
+
+static inline void iph_set_totlen(struct iphdr *iph, unsigned int len)
+{
+ iph->tot_len = len <= IP_MAX_MTU ? htons(len) : 0;
+}
#endif /* _LINUX_IP_H */
diff --git a/include/linux/ism.h b/include/linux/ism.h
new file mode 100644
index 000000000000..ea2bcdae7401
--- /dev/null
+++ b/include/linux/ism.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internal Shared Memory
+ *
+ * Definitions for the ISM module
+ *
+ * Copyright IBM Corp. 2022
+ */
+#ifndef _ISM_H
+#define _ISM_H
+
+#include <linux/workqueue.h>
+
+struct ism_dmb {
+ u64 dmb_tok;
+ u64 rgid;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+/* Unless we gain unexpected popularity, this limit should hold for a while */
+#define MAX_CLIENTS 8
+#define ISM_NR_DMBS 1920
+
+struct ism_dev {
+ spinlock_t lock; /* protects the ism device */
+ struct list_head list;
+ struct pci_dev *pdev;
+
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+ u8 *sba_client_arr; /* entries are indices into 'clients' array */
+ void *priv[MAX_CLIENTS];
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+
+ struct device dev;
+ u64 local_gid;
+ int ieq_idx;
+
+ atomic_t free_clients_cnt;
+ atomic_t add_dev_cnt;
+ wait_queue_head_t waitq;
+};
+
+struct ism_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct ism_client {
+ const char *name;
+ void (*add)(struct ism_dev *dev);
+ void (*remove)(struct ism_dev *dev);
+ void (*handle_event)(struct ism_dev *dev, struct ism_event *event);
+ /* Parameter dmbemask contains a bit vector with updated DMBEs, if sent
+ * via ism_move_data(). Callback function must handle all active bits
+ * indicated by dmbemask.
+ */
+ void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
+ /* Private area - don't touch! */
+ struct work_struct remove_work;
+ struct work_struct add_work;
+ struct ism_dev *tgt_ism;
+ u8 id;
+};
+
+int ism_register_client(struct ism_client *client);
+int ism_unregister_client(struct ism_client *client);
+static inline void *ism_get_priv(struct ism_dev *dev,
+ struct ism_client *client) {
+ return dev->priv[client->id];
+}
+
+static inline void ism_set_priv(struct ism_dev *dev, struct ism_client *client,
+ void *priv) {
+ dev->priv[client->id] = priv;
+}
+
+int ism_register_dmb(struct ism_dev *dev, struct ism_dmb *dmb,
+ struct ism_client *client);
+int ism_unregister_dmb(struct ism_dev *dev, struct ism_dmb *dmb);
+int ism_move(struct ism_dev *dev, u64 dmb_tok, unsigned int idx, bool sf,
+ unsigned int offset, void *data, unsigned int size);
+u8 *ism_get_seid(void);
+
+const struct smcd_ops *ism_get_smcd_ops(void);
+
+#endif /* _ISM_H */
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 373630fe5c28..cffabdbce075 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -38,8 +38,10 @@ struct mdiobb_ctrl {
u8 op_c22_write;
};
-int mdiobb_read(struct mii_bus *bus, int phy, int reg);
-int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val);
+int mdiobb_read_c45(struct mii_bus *bus, int devad, int phy, int reg);
+int mdiobb_write_c45(struct mii_bus *bus, int devad, int phy, int reg, u16 val);
/* The returned bus is not yet registered with the phy layer. */
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index f7fbbf3069e7..27013d6bf24a 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -10,14 +10,6 @@
#include <linux/bitfield.h>
#include <linux/mod_devicetable.h>
-/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
- * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
- */
-#define MII_ADDR_C45 (1<<30)
-#define MII_DEVADDR_C45_SHIFT 16
-#define MII_DEVADDR_C45_MASK GENMASK(20, 16)
-#define MII_REGADDR_C45_MASK GENMASK(15, 0)
-
struct gpio_desc;
struct mii_bus;
struct reset_control;
@@ -410,6 +402,90 @@ static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
return result;
}
+/**
+ * mii_eee_cap1_mod_linkmode_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20)
+ * IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60)
+ * IEEE 802.3-2018 45.2.7.14 "EEE "link partner ability 1 register (7.61)
+ */
+static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ adv, val & MDIO_EEE_100TX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ adv, val & MDIO_EEE_1000T);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ adv, val & MDIO_EEE_10GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, val & MDIO_EEE_1000KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, val & MDIO_EEE_10GKX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, val & MDIO_EEE_10GKR);
+}
+
+/**
+ * linkmode_to_mii_eee_cap1_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2018 45.2.7.13
+ * "EEE advertisement 1" register (7.60)
+ */
+static inline u32 linkmode_to_mii_eee_cap1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, adv))
+ result |= MDIO_EEE_100TX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_1000T;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_10GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, adv))
+ result |= MDIO_EEE_1000KX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, adv))
+ result |= MDIO_EEE_10GKX4;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, adv))
+ result |= MDIO_EEE_10GKR;
+
+ return result;
+}
+
+/**
+ * mii_10base_t1_adv_mod_linkmode_t()
+ * @adv: linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates IEEE 802.3cg-2019 45.2.7.26 "10BASE-T1 AN status"
+ * register (7.527) value to the linkmode.
+ */
+static inline void mii_10base_t1_adv_mod_linkmode_t(unsigned long *adv, u16 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ adv, val & MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L);
+}
+
+/**
+ * linkmode_adv_to_mii_10base_t1_t()
+ * @adv: linkmode advertisement settings
+ *
+ * A function that translates the linkmode to IEEE 802.3cg-2019 45.2.7.25
+ * "10BASE-T1 AN control" register (7.526) value.
+ */
+static inline u32 linkmode_adv_to_mii_10base_t1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, adv))
+ result |= MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L;
+
+ return result;
+}
+
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
@@ -423,6 +499,21 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
u16 set);
int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
u16 mask, u16 set);
+int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum);
+int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 val);
+int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 mask, u16 set);
+
+int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 mask, u16 set);
static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
{
@@ -448,44 +539,19 @@ static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
mask, set);
}
-static inline u32 mdiobus_c45_addr(int devad, u16 regnum)
-{
- return MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum;
-}
-
-static inline u16 mdiobus_c45_regad(u32 regnum)
-{
- return FIELD_GET(MII_REGADDR_C45_MASK, regnum);
-}
-
-static inline u16 mdiobus_c45_devad(u32 regnum)
+static inline int mdiodev_c45_modify(struct mdio_device *mdiodev, int devad,
+ u32 regnum, u16 mask, u16 set)
{
- return FIELD_GET(MII_DEVADDR_C45_MASK, regnum);
+ return mdiobus_c45_modify(mdiodev->bus, mdiodev->addr, devad, regnum,
+ mask, set);
}
-static inline int __mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
- u16 regnum)
-{
- return __mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
-}
-
-static inline int __mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
- u16 regnum, u16 val)
-{
- return __mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum),
- val);
-}
-
-static inline int mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
- u16 regnum)
-{
- return mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
-}
-
-static inline int mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
- u16 regnum, u16 val)
+static inline int mdiodev_c45_modify_changed(struct mdio_device *mdiodev,
+ int devad, u32 regnum, u16 mask,
+ u16 set)
{
- return mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum), val);
+ return mdiobus_c45_modify_changed(mdiodev->bus, mdiodev->addr, devad,
+ regnum, mask, set);
}
static inline int mdiodev_c45_read(struct mdio_device *mdiodev, int devad,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 85dc9b88ea37..1e38e99998c7 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1754,6 +1754,12 @@ struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
+extern struct static_key_false memcg_bpf_enabled_key;
+static inline bool memcg_bpf_enabled(void)
+{
+ return static_branch_likely(&memcg_bpf_enabled_key);
+}
+
extern struct static_key_false memcg_kmem_enabled_key;
static inline bool memcg_kmem_enabled(void)
@@ -1832,6 +1838,11 @@ static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
return NULL;
}
+static inline bool memcg_bpf_enabled(void)
+{
+ return false;
+}
+
static inline bool memcg_kmem_enabled(void)
{
return false;
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 1f7c33b2f5a3..8bef1ab62bba 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -8,6 +8,8 @@
#ifndef _MICREL_PHY_H
#define _MICREL_PHY_H
+#define MICREL_OUI 0x0885
+
#define MICREL_PHY_ID_MASK 0x00fffff0
#define PHY_ID_KSZ8873MLL 0x000e7237
@@ -29,6 +31,7 @@
#define PHY_ID_KSZ9131 0x00221640
#define PHY_ID_LAN8814 0x00221660
#define PHY_ID_LAN8804 0x00221670
+#define PHY_ID_LAN8841 0x00221650
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 9db93e487496..b6b626157b03 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -446,6 +446,7 @@ enum {
struct mlx4_wqe_inline_seg {
__be32 byte_count;
+ __u8 data[];
};
enum mlx4_update_qp_attr {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 29d4b201c7b2..71b06ebad402 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -362,11 +362,13 @@ enum mlx5_event {
enum mlx5_driver_event {
MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+ MLX5_DRIVER_EVENT_UPLINK_NETDEV,
};
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
+ MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE = 0x2,
};
enum {
@@ -1204,6 +1206,7 @@ enum mlx5_cap_type {
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
+ MLX5_CAP_CRYPTO = 0x1a,
MLX5_CAP_DEV_SHAMPO = 0x1d,
MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
@@ -1460,6 +1463,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_IPSEC(mdev, cap)\
MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+#define MLX5_CAP_CRYPTO(mdev, cap)\
+ MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
+
#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 333c1fec72f8..a170c8565779 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -49,6 +49,7 @@
#include <linux/notifier.h>
#include <linux/refcount.h>
#include <linux/auxiliary_bus.h>
+#include <linux/mutex.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -100,6 +101,8 @@ enum {
};
enum {
+ MLX5_REG_SBPR = 0xb001,
+ MLX5_REG_SBCM = 0xb002,
MLX5_REG_QPTS = 0x4002,
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
@@ -214,6 +217,7 @@ struct mlx5_rsc_debug {
enum mlx5_dev_event {
MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
MLX5_DEV_EVENT_PORT_AFFINITY = 129,
+ MLX5_DEV_EVENT_MULTIPORT_ESW = 130,
};
enum mlx5_port_status {
@@ -308,6 +312,7 @@ struct mlx5_cmd {
struct workqueue_struct *wq;
struct semaphore sem;
struct semaphore pages_sem;
+ struct semaphore throttle_sem;
int mode;
u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
@@ -427,8 +432,6 @@ struct mlx5_core_health {
u8 synd;
u32 fatal_error;
u32 crdump_size;
- /* wq spinlock to synchronize draining */
- spinlock_t wq_lock;
struct workqueue_struct *wq;
unsigned long flags;
struct work_struct fatal_report_work;
@@ -513,6 +516,7 @@ struct mlx5_vhca_state_notifier;
struct mlx5_sf_dev_table;
struct mlx5_sf_hw_table;
struct mlx5_sf_table;
+struct mlx5_crypto_dek_priv;
struct mlx5_rate_limit {
u32 rate;
@@ -551,10 +555,6 @@ enum {
* creation/deletion on drivers rescan. Unset during device attach.
*/
MLX5_PRIV_FLAGS_DETACH = 1 << 2,
- /* Distinguish between mlx5e_probe/remove called by module init/cleanup
- * and called by other flows which can already hold devlink lock
- */
- MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW = 1 << 3,
};
struct mlx5_adev {
@@ -679,8 +679,9 @@ struct mlx5e_resources {
u32 mkey;
struct mlx5_sq_bfreg bfreg;
} hw_objs;
- struct devlink_port dl_port;
struct net_device *uplink_netdev;
+ struct mutex uplink_netdev_lock;
+ struct mlx5_crypto_dek_priv *dek_priv;
};
enum mlx5_sw_icm_type {
@@ -1018,6 +1019,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
+void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
+
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
@@ -1157,6 +1161,7 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
@@ -1207,6 +1212,11 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF;
}
+static inline bool mlx5_core_is_management_pf(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, num_ports) == 1 && !MLX5_CAP_GEN(dev, native_port_num);
+}
+
static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index ba6958b49a8e..2cb404c7ea13 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -51,6 +51,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_COUNTER,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
MLX5_FLOW_DESTINATION_TYPE_RANGE,
+ MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE,
};
enum {
@@ -102,6 +103,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_PORT_SEL,
MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
};
enum {
@@ -296,6 +299,8 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
+void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes);
u32 mlx5_fc_id(struct mlx5_fc *counter);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a9ee7bc59c90..5ad5126615a1 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -306,6 +306,7 @@ enum {
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
+ MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_MAX
};
@@ -315,6 +316,11 @@ enum {
MLX5_CMD_OP_GENERAL_END = 0xd00,
};
+enum {
+ MLX5_FT_NIC_RX_2_NIC_RX_RDMA = BIT(0),
+ MLX5_FT_NIC_TX_RDMA_2_NIC_TX = BIT(1),
+};
+
struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
@@ -1112,6 +1118,30 @@ struct mlx5_ifc_sync_steering_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_sync_crypto_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 crypto_type[0x10];
+
+ u8 reserved_at_80[0x80];
+};
+
+struct mlx5_ifc_sync_crypto_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_device_mem_cap_bits {
u8 memic[0x1];
u8 reserved_at_1[0x1f];
@@ -1496,7 +1526,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 null_mkey[0x1];
u8 log_max_klm_list_size[0x6];
- u8 reserved_at_120[0xa];
+ u8 reserved_at_120[0x2];
+ u8 qpc_extension[0x1];
+ u8 reserved_at_123[0x7];
u8 log_max_ra_req_dc[0x6];
u8 reserved_at_130[0x2];
u8 eth_wqe_too_small[0x1];
@@ -1662,7 +1694,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x6];
+ u8 reserved_at_270[0x3];
+ u8 qp_error_syndrome[0x1];
+ u8 reserved_at_274[0x2];
u8 lag_dct[0x2];
u8 lag_tx_port_affinity[0x1];
u8 lag_native_fdb_selection[0x1];
@@ -1768,7 +1802,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ats[0x1];
u8 reserved_at_462[0x1];
u8 log_max_uctx[0x5];
- u8 reserved_at_468[0x2];
+ u8 reserved_at_468[0x1];
+ u8 crypto[0x1];
u8 ipsec_offload[0x1];
u8 log_max_umem[0x5];
u8 max_num_eqs[0x10];
@@ -1899,7 +1934,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_e0[0xc0];
- u8 reserved_at_1a0[0xb];
+ u8 flow_table_type_2_type[0x8];
+ u8 reserved_at_1a8[0x3];
u8 log_min_mkey_entity_size[0x5];
u8 reserved_at_1b0[0x10];
@@ -1923,6 +1959,7 @@ enum mlx5_ifc_flow_destination_type {
MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
};
enum mlx5_flow_table_miss_action {
@@ -1937,7 +1974,8 @@ struct mlx5_ifc_dest_format_struct_bits {
u8 destination_eswitch_owner_vhca_id_valid[0x1];
u8 packet_reformat[0x1];
- u8 reserved_at_22[0xe];
+ u8 reserved_at_22[0x6];
+ u8 destination_table_type[0x8];
u8 destination_eswitch_owner_vhca_id[0x10];
};
@@ -3351,6 +3389,30 @@ struct mlx5_ifc_shampo_cap_bits {
u8 reserved_at_40[0x7c0];
};
+struct mlx5_ifc_crypto_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 synchronize_dek[0x1];
+ u8 int_kek_manual[0x1];
+ u8 int_kek_auto[0x1];
+ u8 reserved_at_6[0x1a];
+
+ u8 reserved_at_20[0x3];
+ u8 log_dek_max_alloc[0x5];
+ u8 reserved_at_28[0x3];
+ u8 log_max_num_deks[0x5];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x3];
+ u8 log_dek_granularity[0x5];
+ u8 reserved_at_68[0x3];
+ u8 log_max_num_int_kek[0x5];
+ u8 sw_wrapped_dek[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
@@ -3371,6 +3433,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
struct mlx5_ifc_shampo_cap_bits shampo_cap;
struct mlx5_ifc_macsec_cap_bits macsec_cap;
+ struct mlx5_ifc_crypto_cap_bits crypto_cap;
u8 reserved_at_0[0x8000];
};
@@ -5342,6 +5405,37 @@ struct mlx5_ifc_query_rmp_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_cqe_error_syndrome_bits {
+ u8 hw_error_syndrome[0x8];
+ u8 hw_syndrome_type[0x4];
+ u8 reserved_at_c[0x4];
+ u8 vendor_error_syndrome[0x8];
+ u8 syndrome[0x8];
+};
+
+struct mlx5_ifc_qp_context_extension_bits {
+ u8 reserved_at_0[0x60];
+
+ struct mlx5_ifc_cqe_error_syndrome_bits error_syndrome;
+
+ u8 reserved_at_80[0x580];
+};
+
+struct mlx5_ifc_qpc_extension_and_pas_list_in_bits {
+ struct mlx5_ifc_qp_context_extension_bits qpc_data_extension;
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_qp_pas_list_in_bits {
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits {
+ struct mlx5_ifc_qp_pas_list_in_bits qp_pas_list;
+ struct mlx5_ifc_qpc_extension_and_pas_list_in_bits qpc_ext_and_pas_list;
+};
+
struct mlx5_ifc_query_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5358,7 +5452,7 @@ struct mlx5_ifc_query_qp_out_bits {
u8 reserved_at_800[0x80];
- u8 pas[][0x40];
+ union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits qp_pas_or_qpc_ext_and_pas;
};
struct mlx5_ifc_query_qp_in_bits {
@@ -5368,7 +5462,8 @@ struct mlx5_ifc_query_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 qpn[0x18];
u8 reserved_at_60[0x20];
@@ -6196,6 +6291,18 @@ struct mlx5_ifc_match_definer_bits {
};
};
+struct mlx5_ifc_general_obj_create_param_bits {
+ u8 alias_object[0x1];
+ u8 reserved_at_1[0x2];
+ u8 log_obj_range[0x5];
+ u8 reserved_at_8[0x18];
+};
+
+struct mlx5_ifc_general_obj_query_param_bits {
+ u8 alias_object[0x1];
+ u8 obj_offset[0x1f];
+};
+
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
u8 opcode[0x10];
u8 uid[0x10];
@@ -6205,9 +6312,10 @@ struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
u8 obj_id[0x20];
- u8 reserved_at_60[0x3];
- u8 log_obj_range[0x5];
- u8 reserved_at_68[0x18];
+ union {
+ struct mlx5_ifc_general_obj_create_param_bits create;
+ struct mlx5_ifc_general_obj_query_param_bits query;
+ } op_param;
};
struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
@@ -8571,7 +8679,8 @@ struct mlx5_ifc_create_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 input_qpn[0x18];
u8 reserved_at_60[0x20];
@@ -9862,13 +9971,20 @@ struct mlx5_ifc_mpegc_reg_bits {
};
enum {
+ MLX5_MTUTC_FREQ_ADJ_UNITS_PPB = 0x0,
+ MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM = 0x1,
+};
+
+enum {
MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3,
};
struct mlx5_ifc_mtutc_reg_bits {
- u8 reserved_at_0[0x1c];
+ u8 reserved_at_0[0x5];
+ u8 freq_adj_units[0x3];
+ u8 reserved_at_8[0x14];
u8 operation[0x4];
u8 freq_adjustment[0x20];
@@ -9941,7 +10057,10 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x5d];
+ u8 reserved_at_0[0x50];
+ u8 mtutc_freq_adj_units[0x1];
+ u8 mtutc_time_adjustment_extended_range[0x1];
+ u8 reserved_at_52[0xb];
u8 mcia_32dwords[0x1];
u8 out_pulse_duration_ns[0x1];
u8 npps_period[0x1];
@@ -11000,6 +11119,67 @@ struct mlx5_ifc_pbmc_reg_bits {
u8 reserved_at_2e0[0x80];
};
+struct mlx5_ifc_sbpr_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x4];
+ u8 dir[0x2];
+ u8 reserved_at_8[0x14];
+ u8 pool[0x4];
+
+ u8 infi_size[0x1];
+ u8 reserved_at_21[0x7];
+ u8 size[0x18];
+
+ u8 reserved_at_40[0x1c];
+ u8 mode[0x4];
+
+ u8 reserved_at_60[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_81[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 ext_buff_occupancy[0x18];
+};
+
+struct mlx5_ifc_sbcm_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x6];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 pg_buff[0x6];
+ u8 reserved_at_18[0x6];
+ u8 dir[0x2];
+
+ u8 reserved_at_20[0x1f];
+ u8 exc[0x1];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_a1[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_c0[0x8];
+ u8 min_buff[0x18];
+
+ u8 infi_max[0x1];
+ u8 reserved_at_e1[0x7];
+ u8 max_buff[0x18];
+
+ u8 reserved_at_100[0x20];
+
+ u8 reserved_at_120[0x1c];
+ u8 pool[0x4];
+};
+
struct mlx5_ifc_qtct_reg_bits {
u8 reserved_at_0[0x8];
u8 port_number[0x8];
@@ -11639,6 +11819,7 @@ enum {
MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
+ MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
};
enum {
@@ -11818,21 +11999,62 @@ struct mlx5_ifc_query_macsec_obj_out_bits {
struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
};
+struct mlx5_ifc_wrapped_dek_bits {
+ u8 gcm_iv[0x60];
+
+ u8 reserved_at_60[0x20];
+
+ u8 const0[0x1];
+ u8 key_size[0x1];
+ u8 reserved_at_82[0x2];
+ u8 key2_invalid[0x1];
+ u8 reserved_at_85[0x3];
+ u8 pd[0x18];
+
+ u8 key_purpose[0x5];
+ u8 reserved_at_a5[0x13];
+ u8 kek_id[0x8];
+
+ u8 reserved_at_c0[0x40];
+
+ u8 key1[0x8][0x20];
+
+ u8 key2[0x8][0x20];
+
+ u8 reserved_at_300[0x40];
+
+ u8 const1[0x1];
+ u8 reserved_at_341[0x1f];
+
+ u8 reserved_at_360[0x20];
+
+ u8 auth_tag[0x80];
+};
+
struct mlx5_ifc_encryption_key_obj_bits {
u8 modify_field_select[0x40];
- u8 reserved_at_40[0x14];
+ u8 state[0x8];
+ u8 sw_wrapped[0x1];
+ u8 reserved_at_49[0xb];
u8 key_size[0x4];
u8 reserved_at_58[0x4];
- u8 key_type[0x4];
+ u8 key_purpose[0x4];
u8 reserved_at_60[0x8];
u8 pd[0x18];
- u8 reserved_at_80[0x180];
- u8 key[8][0x20];
+ u8 reserved_at_80[0x100];
+
+ u8 opaque[0x40];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 key[8][0x80];
- u8 reserved_at_300[0x500];
+ u8 sw_wrapped_dek[8][0x80];
+
+ u8 reserved_at_a00[0x600];
};
struct mlx5_ifc_create_encryption_key_in_bits {
@@ -11840,6 +12062,11 @@ struct mlx5_ifc_create_encryption_key_in_bits {
struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
};
+struct mlx5_ifc_modify_encryption_key_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
+};
+
enum {
MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0,
MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1,
@@ -11895,6 +12122,34 @@ struct mlx5_ifc_create_flow_meter_aso_obj_in_bits {
struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj;
};
+struct mlx5_ifc_int_kek_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 state[0x8];
+ u8 auto_gen[0x1];
+ u8 reserved_at_49[0xb];
+ u8 key_size[0x4];
+ u8 reserved_at_58[0x8];
+
+ u8 reserved_at_60[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_80[0x180];
+ u8 key[8][0x80];
+
+ u8 reserved_at_600[0x200];
+};
+
+struct mlx5_ifc_create_int_kek_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
+struct mlx5_ifc_create_int_kek_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
struct mlx5_ifc_sampler_obj_bits {
u8 modify_field_select[0x40];
@@ -11933,9 +12188,9 @@ enum {
};
enum {
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC = 0x4,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
};
struct mlx5_ifc_tls_static_params_bits {
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 74f9d9a6d330..0e4ef9c5127a 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -102,6 +102,7 @@
#define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146
#define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149
#define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a
+#define SDIO_DEVICE_ID_MARVELL_8978_WLAN 0x9159
#define SDIO_VENDOR_ID_MEDIATEK 0x037a
#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
diff --git a/include/linux/module.h b/include/linux/module.h
index 8c5909c0076c..514bc81568c5 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -879,11 +879,13 @@ static inline bool module_sig_ok(struct module *module)
#endif /* CONFIG_MODULE_SIG */
#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS)
-int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *,
struct module *, unsigned long),
void *data);
#else
-static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+static inline int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *,
struct module *, unsigned long),
void *data)
{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e6e02184c25a..6a14b7b11766 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -47,6 +47,7 @@
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
+#include <uapi/linux/netdev.h>
#include <linux/hashtable.h>
#include <linux/rbtree.h>
#include <net/net_trackers.h>
@@ -74,6 +75,7 @@ struct udp_tunnel_nic_info;
struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;
+struct xdp_md;
void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
@@ -1035,14 +1037,14 @@ struct netdev_bpf {
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrmdev_ops {
- int (*xdo_dev_state_add) (struct xfrm_state *x);
+ int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
void (*xdo_dev_state_delete) (struct xfrm_state *x);
void (*xdo_dev_state_free) (struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
- int (*xdo_dev_policy_add) (struct xfrm_policy *x);
+ int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
};
@@ -1618,6 +1620,11 @@ struct net_device_ops {
bool cycles);
};
+struct xdp_metadata_ops {
+ int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
+ int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash);
+};
+
/**
* enum netdev_priv_flags - &struct net_device priv_flags
*
@@ -1801,6 +1808,7 @@ enum netdev_ml_priv_type {
*
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
+ * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
* @ethtool_ops: Management operations
* @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
@@ -1811,6 +1819,7 @@ enum netdev_ml_priv_type {
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
+ * @xdp_features: XDP capability supported by the device
* @priv_flags: Like 'flags' but invisible to userspace,
* see if.h for the definitions
* @gflags: Global flags ( kept as legacy )
@@ -1957,6 +1966,8 @@ enum netdev_ml_priv_type {
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
* @tso_max_segs: Device (as in HW) limit on the max TSO segment count
+ * @gso_ipv4_max_size: Maximum size of generic segmentation offload,
+ * for IPv4.
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
@@ -1997,6 +2008,8 @@ enum netdev_ml_priv_type {
* keep a list of interfaces to be deleted.
* @gro_max_size: Maximum size of aggregated packet in generic
* receive offload (GRO)
+ * @gro_ipv4_max_size: Maximum size of aggregated packet in generic
+ * receive offload (GRO), for IPv4.
*
* @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
* @linkwatch_dev_tracker: refcount tracker used by linkwatch.
@@ -2048,8 +2061,10 @@ struct net_device {
/* Read-mostly cache-line for fast-path access */
unsigned int flags;
+ xdp_features_t xdp_features;
unsigned long long priv_flags;
const struct net_device_ops *netdev_ops;
+ const struct xdp_metadata_ops *xdp_metadata_ops;
int ifindex;
unsigned short gflags;
unsigned short hard_header_len;
@@ -2199,6 +2214,7 @@ struct net_device {
*/
#define GRO_MAX_SIZE (8 * 65535u)
unsigned int gro_max_size;
+ unsigned int gro_ipv4_max_size;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
@@ -2322,6 +2338,7 @@ struct net_device {
u16 gso_max_segs;
#define TSO_MAX_SEGS U16_MAX
u16 tso_max_segs;
+ unsigned int gso_ipv4_max_size;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
@@ -2831,6 +2848,7 @@ enum netdev_cmd {
NETDEV_OFFLOAD_XSTATS_DISABLE,
NETDEV_OFFLOAD_XSTATS_REPORT_USED,
NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
+ NETDEV_XDP_FEAT_CHANGE,
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index d8817d381c14..6863e271a9de 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -437,11 +437,13 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#include <linux/netfilter/nf_conntrack_zones_common.h>
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_set_closing(struct nf_conntrack *nfct);
struct nf_conntrack_tuple;
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb);
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {}
struct nf_conntrack_tuple;
static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
@@ -459,6 +461,7 @@ struct nf_ct_hook {
bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
const struct sk_buff *);
void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
+ void (*set_closing)(struct nf_conntrack *nfct);
};
extern const struct nf_ct_hook __rcu *nf_ct_hook;
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index d81bde5a5844..c43ac7690eca 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -130,6 +130,16 @@ struct netlink_ext_ack {
#define NL_SET_ERR_MSG_FMT_MOD(extack, fmt, args...) \
NL_SET_ERR_MSG_FMT((extack), KBUILD_MODNAME ": " fmt, ##args)
+#define NL_SET_ERR_MSG_WEAK(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG((extack), msg); \
+} while (0)
+
+#define NL_SET_ERR_MSG_WEAK_MOD(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG_MOD((extack), msg); \
+} while (0)
+
#define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \
if ((extack)) { \
(extack)->bad_attr = (attr); \
@@ -263,6 +273,10 @@ struct netlink_callback {
};
};
+#define NL_ASSERT_DUMP_CTX_FITS(type_name) \
+ BUILD_BUG_ON(sizeof(type_name) > \
+ sizeof_field(struct netlink_callback, ctx))
+
struct netlink_notify {
struct net *net;
u32 portid;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6378c997ded5..727bff531a14 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -45,27 +45,32 @@
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
+#define PHY_BASIC_T1S_P2MP_FEATURES ((unsigned long *)&phy_basic_t1s_p2mp_features)
#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
+#define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features)
extern const int phy_basic_ports_array[3];
extern const int phy_fibre_port_array[1];
extern const int phy_all_ports_features_array[7];
extern const int phy_10_100_features_array[4];
extern const int phy_basic_t1_features_array[3];
+extern const int phy_basic_t1s_p2mp_features_array[2];
extern const int phy_gbit_features_array[2];
extern const int phy_10gbit_features_array[1];
@@ -364,6 +369,11 @@ struct mii_bus {
int (*read)(struct mii_bus *bus, int addr, int regnum);
/** @write: Perform a write transfer on the bus */
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ /** @read_c45: Perform a C45 read transfer on the bus */
+ int (*read_c45)(struct mii_bus *bus, int addr, int devnum, int regnum);
+ /** @write_c45: Perform a C45 write transfer on the bus */
+ int (*write_c45)(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val);
/** @reset: Perform a reset of the bus */
int (*reset)(struct mii_bus *bus);
@@ -411,14 +421,6 @@ struct mii_bus {
/** @reset_gpiod: Reset GPIO descriptor pointer */
struct gpio_desc *reset_gpiod;
- /** @probe_capabilities: bus capabilities, used for probing */
- enum {
- MDIOBUS_NO_CAP = 0,
- MDIOBUS_C22,
- MDIOBUS_C45,
- MDIOBUS_C22_C45,
- } probe_capabilities;
-
/** @shared_lock: protect access to the shared element */
struct mutex shared_lock;
@@ -456,7 +458,7 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
}
struct mii_bus *mdio_find_bus(const char *mdio_name);
-struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr);
#define PHY_INTERRUPT_DISABLED false
#define PHY_INTERRUPT_ENABLED true
@@ -572,6 +574,7 @@ struct macsec_ops;
* @supported: Combined MAC/PHY supported linkmodes
* @advertising: Currently advertised linkmodes
* @adv_old: Saved advertised while power saving for WoL
+ * @supported_eee: supported PHY EEE linkmodes
* @lp_advertising: Current link partner advertised linkmodes
* @host_interfaces: PHY interface modes supported by host
* @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
@@ -676,6 +679,8 @@ struct phy_device {
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
/* used with phy_speed_down */
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ /* used for eee validation */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee);
/* Host supported PHY interface types. Should be ignored if empty. */
DECLARE_PHY_INTERFACE_MASK(host_interfaces);
@@ -739,6 +744,9 @@ struct phy_device {
#endif
};
+/* Generic phy_device::dev_flags */
+#define PHY_F_NO_IRQ 0x80000000
+
static inline struct phy_device *to_phy_device(const struct device *dev)
{
return container_of(to_mdio_device(dev), struct phy_device, mdio);
@@ -766,6 +774,63 @@ struct phy_tdr_config {
#define PHY_PAIR_ALL -1
/**
+ * struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @version: read-only PLCA register map version. -1 = not available. Ignored
+ * when setting the configuration. Format is the same as reported by the PLCA
+ * IDVER register (31.CA00). -1 = not available.
+ * @enabled: PLCA configured mode (enabled/disabled). -1 = not available / don't
+ * set. 0 = disabled, anything else = enabled.
+ * @node_id: the PLCA local node identifier. -1 = not available / don't set.
+ * Allowed values [0 .. 254]. 255 = node disabled.
+ * @node_cnt: the PLCA node count (maximum number of nodes having a TO). Only
+ * meaningful for the coordinator (node_id = 0). -1 = not available / don't
+ * set. Allowed values [1 .. 255].
+ * @to_tmr: The value of the PLCA to_timer in bit-times, which determines the
+ * PLCA transmit opportunity window opening. See IEEE802.3 Clause 148 for
+ * more details. The to_timer shall be set equal over all nodes.
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ * @burst_cnt: controls how many additional frames a node is allowed to send in
+ * single transmit opportunity (TO). The default value of 0 means that the
+ * node is allowed exactly one frame per TO. A value of 1 allows two frames
+ * per TO, and so on. -1 = not available / don't set.
+ * Allowed values [0 .. 255].
+ * @burst_tmr: controls how many bit times to wait for the MAC to send a new
+ * frame before interrupting the burst. This value should be set to a value
+ * greater than the MAC inter-packet gap (which is typically 96 bits).
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ *
+ * A structure containing configuration parameters for setting/getting the PLCA
+ * RS configuration. The driver does not need to implement all the parameters,
+ * but should report what is actually used.
+ */
+struct phy_plca_cfg {
+ int version;
+ int enabled;
+ int node_id;
+ int node_cnt;
+ int to_tmr;
+ int burst_cnt;
+ int burst_tmr;
+};
+
+/**
+ * struct phy_plca_status - Status of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @pst: The PLCA status as reported by the PST bit in the PLCA STATUS
+ * register(31.CA03), indicating BEACON activity.
+ *
+ * A structure containing status information of the PLCA RS configuration.
+ * The driver does not need to implement all the parameters, but should report
+ * what is actually used.
+ */
+struct phy_plca_status {
+ bool pst;
+};
+
+/**
* struct phy_driver - Driver structure for a particular PHY type
*
* @mdiodrv: Data common to all MDIO devices
@@ -976,6 +1041,17 @@ struct phy_driver {
int (*get_sqi)(struct phy_device *dev);
/** @get_sqi_max: Get the maximum signal quality indication */
int (*get_sqi_max)(struct phy_device *dev);
+
+ /* PLCA RS interface */
+ /** @get_plca_cfg: Return the current PLCA configuration */
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ /** @set_plca_cfg: Set the PLCA configuration */
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg);
+ /** @get_plca_status: Return the current PLCA status info */
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -1543,6 +1619,7 @@ int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
int phy_speed_down(struct phy_device *phydev, bool sync);
int phy_speed_up(struct phy_device *phydev);
+bool phy_check_valid(int speed, int duplex, unsigned long *features);
int phy_restart_aneg(struct phy_device *phydev);
int phy_reset_after_clk_enable(struct phy_device *phydev);
@@ -1666,6 +1743,7 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
int genphy_c45_read_mdix(struct phy_device *phydev);
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+int genphy_c45_read_eee_abilities(struct phy_device *phydev);
int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);
int genphy_c45_read_status(struct phy_device *phydev);
int genphy_c45_baset1_read_status(struct phy_device *phydev);
@@ -1674,6 +1752,19 @@ int genphy_c45_loopback(struct phy_device *phydev, bool enable);
int genphy_c45_pma_resume(struct phy_device *phydev);
int genphy_c45_pma_suspend(struct phy_device *phydev);
int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable);
+int genphy_c45_plca_get_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_set_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_get_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
+ unsigned long *lp, bool *is_enabled);
+int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
+ struct ethtool_eee *data);
+int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
+ struct ethtool_eee *data);
+int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv);
/* Generic C45 PHY driver */
extern struct phy_driver genphy_c45_driver;
@@ -1772,6 +1863,13 @@ int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
int phy_ethtool_get_sset_count(struct phy_device *phydev);
int phy_ethtool_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data);
+int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+int phy_ethtool_get_plca_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
{
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 0e8a1f2ceb2f..851a855d3868 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -81,6 +81,9 @@
/********** net/core/page_pool.c **********/
#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
+/********** net/core/skbuff.c **********/
+#define SKB_LIST_POISON_NEXT ((void *)(0x800 + POISON_POINTER_DELTA))
+
/********** kernel/bpf/ **********/
#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index 2b6ea36ad162..1b5a953c6bbc 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -10,8 +10,12 @@
#ifndef _PTP_CLASSIFY_H_
#define _PTP_CLASSIFY_H_
+#include <asm/unaligned.h>
#include <linux/ip.h>
+#include <linux/ktime.h>
#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <net/checksum.h>
#define PTP_CLASS_NONE 0x00 /* not a PTP event message */
#define PTP_CLASS_V1 0x01 /* protocol version 1 */
@@ -130,6 +134,69 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
}
/**
+ * ptp_check_diff8 - Computes new checksum (when altering a 64-bit field)
+ * @old: old field value
+ * @new: new field value
+ * @oldsum: previous checksum
+ *
+ * This function can be used to calculate a new checksum when only a single
+ * field is changed. Similar as ip_vs_check_diff*() in ip_vs.h.
+ *
+ * Return: Updated checksum
+ */
+static inline __wsum ptp_check_diff8(__be64 old, __be64 new, __wsum oldsum)
+{
+ __be64 diff[2] = { ~old, new };
+
+ return csum_partial(diff, sizeof(diff), oldsum);
+}
+
+/**
+ * ptp_header_update_correction - Update PTP header's correction field
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ * @hdr: ptp header
+ * @correction: new correction value
+ *
+ * This updates the correction field of a PTP header and updates the UDP
+ * checksum (if UDP is used as transport). It is needed for hardware capable of
+ * one-step P2P that does not already modify the correction field of Pdelay_Req
+ * event messages on ingress.
+ */
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+ __be64 correction_old;
+ struct udphdr *uhdr;
+
+ /* previous correction value is required for checksum update. */
+ memcpy(&correction_old, &hdr->correction, sizeof(correction_old));
+
+ /* write new correction value */
+ put_unaligned_be64((u64)correction, &hdr->correction);
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ case PTP_CLASS_IPV6:
+ /* locate udp header */
+ uhdr = (struct udphdr *)((char *)hdr - sizeof(struct udphdr));
+ break;
+ default:
+ return;
+ }
+
+ /* update checksum */
+ uhdr->check = csum_fold(ptp_check_diff8(correction_old,
+ hdr->correction,
+ ~csum_unfold(uhdr->check)));
+ if (!uhdr->check)
+ uhdr->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/**
* ptp_msg_is_sync - Evaluates whether the given skb is a PTP Sync message
* @skb: packet buffer
* @type: type of the packet (see ptp_classify_raw())
@@ -166,5 +233,11 @@ static inline bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type)
{
return false;
}
+
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a3bc695bcca0..029b9e09d3ca 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -38,6 +38,14 @@ struct regmap_field;
struct snd_ac97;
struct sdw_slave;
+/*
+ * regmap_mdio address encoding. IEEE 802.3ae clause 45 addresses consist of a
+ * device address and a register address.
+ */
+#define REGMAP_MDIO_C45_DEVAD_SHIFT 16
+#define REGMAP_MDIO_C45_DEVAD_MASK GENMASK(20, 16)
+#define REGMAP_MDIO_C45_REGNUM_MASK GENMASK(15, 0)
+
/* An enum of all the supported cache types */
enum regcache_type {
REGCACHE_NONE,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4c8492401a10..ff7ad331fb82 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -23,17 +23,11 @@
#include <linux/atomic.h>
#include <asm/types.h>
#include <linux/spinlock.h>
-#include <linux/net.h>
-#include <linux/textsearch.h>
#include <net/checksum.h>
#include <linux/rcupdate.h>
-#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
#include <net/flow_dissector.h>
-#include <linux/splice.h>
#include <linux/in6.h>
#include <linux/if_packet.h>
#include <linux/llist.h>
@@ -261,6 +255,14 @@
#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* For X bytes available in skb->head, what is the minimal
+ * allocation needed, knowing struct skb_shared_info needs
+ * to be aligned.
+ */
+#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define SKB_MAX_ORDER(X, ORDER) \
SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
@@ -280,6 +282,7 @@ struct napi_struct;
struct bpf_prog;
union bpf_attr;
struct skb_ext;
+struct ts_config;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
@@ -316,12 +319,16 @@ struct nf_bridge_info {
* and read by ovs to recirc_id.
*/
struct tc_skb_ext {
- __u32 chain;
+ union {
+ u64 act_miss_cookie;
+ __u32 chain;
+ };
__u16 mru;
__u16 zone;
u8 post_ct:1;
u8 post_ct_snat:1;
u8 post_ct_dnat:1;
+ u8 act_miss:1; /* Set if act_miss_cookie is used */
};
#endif
@@ -1240,7 +1247,7 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
-extern struct kmem_cache *skbuff_head_cache;
+extern struct kmem_cache *skbuff_cache;
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
@@ -1743,6 +1750,13 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
skb->next = NULL;
}
+static inline void skb_poison_list(struct sk_buff *skb)
+{
+#ifdef CONFIG_DEBUG_NET
+ skb->next = SKB_LIST_POISON_NEXT;
+#endif
+}
+
/* Iterate through singly-linked GSO fragments of an skb. */
#define skb_list_walk_safe(first, skb, next_skb) \
for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
@@ -2621,13 +2635,24 @@ void *skb_pull_data(struct sk_buff *skb, size_t len);
void *__pskb_pull_tail(struct sk_buff *skb, int delta);
-static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
+static inline enum skb_drop_reason
+pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
if (likely(len <= skb_headlen(skb)))
- return true;
+ return SKB_NOT_DROPPED_YET;
+
if (unlikely(len > skb->len))
- return false;
- return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
+
+ if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb))))
+ return SKB_DROP_REASON_NOMEM;
+
+ return SKB_NOT_DROPPED_YET;
+}
+
+static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index a0746d4aec20..fd0b0605cf90 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -103,7 +103,6 @@ struct mtk_wed_device {
struct {
int size;
- struct page_frag_cache rx_page;
struct mtk_rxbm_desc *desc;
dma_addr_t desc_phys;
} rx_buf_ring;
@@ -151,6 +150,8 @@ struct mtk_wed_device {
void (*release_rx_buf)(struct mtk_wed_device *wed);
void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
struct mtk_wed_wo_rx_stats *stats);
+ int (*reset)(struct mtk_wed_device *wed);
+ void (*reset_complete)(struct mtk_wed_device *wed);
} wlan;
#endif
};
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
deleted file mode 100644
index d278576ab692..000000000000
--- a/include/linux/spi/at86rf230.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AT86RF230/RF231 driver
- *
- * Copyright (C) 2009-2012 Siemens AG
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
- */
-#ifndef AT86RF230_H
-#define AT86RF230_H
-
-struct at86rf230_platform_data {
- int rstn;
- int slp_tr;
- int dig2;
- u8 xtal_trim;
-};
-
-#endif
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
deleted file mode 100644
index 449bacf10700..000000000000
--- a/include/linux/spi/cc2520.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Header file for cc2520 radio driver
- *
- * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in>
- * Md.Jamal Mohiuddin <mjmohiuddin@cdac.in>
- * P Sowjanya <sowjanyap@cdac.in>
- */
-
-#ifndef __CC2520_H
-#define __CC2520_H
-
-struct cc2520_platform_data {
- int fifo;
- int fifop;
- int cca;
- int sfd;
- int reset;
- int vreg;
-};
-
-#endif
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 8530c7328269..fae6beaaa217 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -11,6 +11,11 @@ struct device;
struct file;
struct task_struct;
+static inline bool string_is_terminated(const char *s, int len)
+{
+ return memchr(s, '\0', len) ? true : false;
+}
+
/* Descriptions of the types of units to
* print in */
enum string_size_units {
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 4564faafd0e1..fea32377f7c7 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -245,5 +245,38 @@ static inline const struct cpumask *cpu_cpu_mask(int cpu)
return cpumask_of_node(cpu_to_node(cpu));
}
+#ifdef CONFIG_NUMA
+int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
+extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
+#else
+static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+{
+ return cpumask_nth(cpu, cpus);
+}
+
+static inline const struct cpumask *
+sched_numa_hop_mask(unsigned int node, unsigned int hops)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* CONFIG_NUMA */
+
+/**
+ * for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
+ * from a given node.
+ * @mask: the iteration variable.
+ * @node: the NUMA node to start the search from.
+ *
+ * Requires rcu_lock to be held.
+ *
+ * Yields cpu_online_mask for @node == NUMA_NO_NODE.
+ */
+#define for_each_numa_hop_mask(mask, node) \
+ for (unsigned int __hops = 0; \
+ mask = (node != NUMA_NO_NODE || __hops) ? \
+ sched_numa_hop_mask(node, __hops) : \
+ cpu_online_mask, \
+ !IS_ERR_OR_NULL(mask); \
+ __hops++)
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 46040d66334a..ffe48e69b3f3 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -213,16 +213,4 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
return __u64_stats_fetch_retry(syncp, start);
}
-/* Obsolete interfaces */
-static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
-{
- return u64_stats_fetch_begin(syncp);
-}
-
-static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
- unsigned int start)
-{
- return u64_stats_fetch_retry(syncp, start);
-}
-
#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 35d7eedb5e8e..3f9c16611306 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -7,6 +7,109 @@
#include <net/sock.h>
#include <net/af_vsock.h>
+#define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))
+
+struct virtio_vsock_skb_cb {
+ bool reply;
+ bool tap_delivered;
+};
+
+#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
+
+static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
+{
+ return (struct virtio_vsock_hdr *)skb->head;
+}
+
+static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
+{
+ return VIRTIO_VSOCK_SKB_CB(skb)->reply;
+}
+
+static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
+{
+ VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
+}
+
+static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
+{
+ return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
+}
+
+static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
+{
+ VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
+}
+
+static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
+{
+ VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
+}
+
+static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
+{
+ u32 len;
+
+ len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
+
+ if (len > 0)
+ skb_put(skb, len);
+}
+
+static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
+{
+ struct sk_buff *skb;
+
+ if (size < VIRTIO_VSOCK_SKB_HEADROOM)
+ return NULL;
+
+ skb = alloc_skb(size, mask);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
+ return skb;
+}
+
+static inline void
+virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
+{
+ spin_lock_bh(&list->lock);
+ __skb_queue_head(list, skb);
+ spin_unlock_bh(&list->lock);
+}
+
+static inline void
+virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
+{
+ spin_lock_bh(&list->lock);
+ __skb_queue_tail(list, skb);
+ spin_unlock_bh(&list->lock);
+}
+
+static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ spin_lock_bh(&list->lock);
+ skb = __skb_dequeue(list);
+ spin_unlock_bh(&list->lock);
+
+ return skb;
+}
+
+static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
+{
+ spin_lock_bh(&list->lock);
+ __skb_queue_purge(list);
+ spin_unlock_bh(&list->lock);
+}
+
+static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
+{
+ return (size_t)(skb_end_pointer(skb) - skb->head);
+}
+
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
@@ -35,23 +138,10 @@ struct virtio_vsock_sock {
u32 last_fwd_cnt;
u32 rx_bytes;
u32 buf_alloc;
- struct list_head rx_queue;
+ struct sk_buff_head rx_queue;
u32 msg_count;
};
-struct virtio_vsock_pkt {
- struct virtio_vsock_hdr hdr;
- struct list_head list;
- /* socket refcnt not held, only use for cancellation */
- struct vsock_sock *vsk;
- void *buf;
- u32 buf_len;
- u32 len;
- u32 off;
- bool reply;
- bool tap_delivered;
-};
-
struct virtio_vsock_pkt_info {
u32 remote_cid, remote_port;
struct vsock_sock *vsk;
@@ -68,7 +158,7 @@ struct virtio_transport {
struct vsock_transport transport;
/* Takes ownership of the packet */
- int (*send_pkt)(struct virtio_vsock_pkt *pkt);
+ int (*send_pkt)(struct sk_buff *skb);
};
ssize_t
@@ -149,11 +239,10 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
void virtio_transport_recv_pkt(struct virtio_transport *t,
- struct virtio_vsock_pkt *pkt);
-void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
-void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
+ struct sk_buff *skb);
+void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
-void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt);
-
+void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
+int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 2a6f443f0ef6..4ae0580b63ca 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -39,7 +39,7 @@ struct tc_action {
struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct tc_cookie __rcu *act_cookie;
+ struct tc_cookie __rcu *user_cookie;
struct tcf_chain __rcu *goto_chain;
u32 tcfa_flags;
u8 hw_stats;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 8d773b042c85..400f8a7d0c3f 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -2156,7 +2156,7 @@ struct hci_cp_le_big_create_sync {
__u8 mse;
__le16 timeout;
__u8 num_bis;
- __u8 bis[0];
+ __u8 bis[];
} __packed;
#define HCI_OP_LE_BIG_TERM_SYNC 0x206c
@@ -2174,7 +2174,7 @@ struct hci_cp_le_setup_iso_path {
__le16 codec_vid;
__u8 delay[3];
__u8 codec_cfg_len;
- __u8 codec_cfg[0];
+ __u8 codec_cfg[];
} __packed;
struct hci_rp_le_setup_iso_path {
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 743f6f59dff8..e18a927669c0 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -109,6 +109,8 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
#define MGMT_SETTING_WIDEBAND_SPEECH 0x00020000
+#define MGMT_SETTING_CIS_CENTRAL 0x00040000
+#define MGMT_SETTING_CIS_PERIPHERAL 0x00080000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 03d4f4deadae..f115b2550309 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1075,7 +1075,6 @@ struct survey_info {
s8 noise;
};
-#define CFG80211_MAX_WEP_KEYS 4
#define CFG80211_MAX_NUM_AKM_SUITES 10
/**
@@ -1099,9 +1098,6 @@ struct survey_info {
* port frames over NL80211 instead of the network interface.
* @control_port_no_preauth: disables pre-auth rx over the nl80211 control
* port for mac80211
- * @wep_keys: static WEP keys, if not NULL points to an array of
- * CFG80211_MAX_WEP_KEYS WEP keys
- * @wep_tx_key: key index (0..3) of the default TX static WEP key
* @psk: PSK (for devices supporting 4-way-handshake offload)
* @sae_pwd: password for SAE authentication (for devices supporting SAE
* offload)
@@ -1134,8 +1130,6 @@ struct cfg80211_crypto_settings {
bool control_port_no_encrypt;
bool control_port_over_nl80211;
bool control_port_no_preauth;
- struct key_params *wep_keys;
- int wep_tx_key;
const u8 *psk;
const u8 *sae_pwd;
u8 sae_pwd_len;
@@ -1322,6 +1316,9 @@ struct cfg80211_unsol_bcast_probe_resp {
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
+ * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
+ * a 20 MHz channel, lowest bit corresponding to the lowest channel.
+ * Bit set to 1 indicates that the channel is punctured.
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1356,6 +1353,7 @@ struct cfg80211_ap_settings {
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
+ u16 punct_bitmap;
};
/**
@@ -1373,6 +1371,9 @@ struct cfg80211_ap_settings {
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
+ * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
+ * a 20 MHz channel, lowest bit corresponding to the lowest channel.
+ * Bit set to 1 indicates that the channel is punctured.
*/
struct cfg80211_csa_settings {
struct cfg80211_chan_def chandef;
@@ -1385,6 +1386,7 @@ struct cfg80211_csa_settings {
bool radar_required;
bool block_tx;
u8 count;
+ u16 punct_bitmap;
};
/**
@@ -1882,6 +1884,24 @@ struct cfg80211_tid_stats {
* received packet with an FCS error matches the peer MAC address.
* @airtime_link_metric: mesh airtime link metric.
* @connected_to_as: true if mesh STA has a path to authentication server
+ * @mlo_params_valid: Indicates @assoc_link_id and @mld_addr fields are filled
+ * by driver. Drivers use this only in cfg80211_new_sta() calls when AP
+ * MLD's MLME/SME is offload to driver. Drivers won't fill this
+ * information in cfg80211_del_sta_sinfo(), get_station() and
+ * dump_station() callbacks.
+ * @assoc_link_id: Indicates MLO link ID of the AP, with which the station
+ * completed (re)association. This information filled for both MLO
+ * and non-MLO STA connections when the AP affiliated with an MLD.
+ * @mld_addr: For MLO STA connection, filled with MLD address of the station.
+ * For non-MLO STA connection, filled with all zeros.
+ * @assoc_resp_ies: IEs from (Re)Association Response.
+ * This is used only when in AP mode with drivers that do not use user
+ * space MLME/SME implementation. The information is provided only for the
+ * cfg80211_new_sta() calls to notify user space of the IEs. Drivers won't
+ * fill this information in cfg80211_del_sta_sinfo(), get_station() and
+ * dump_station() callbacks. User space needs this information to determine
+ * the accepted and rejected affiliated links of the connected station.
+ * @assoc_resp_ies_len: Length of @assoc_resp_ies buffer in octets.
*/
struct station_info {
u64 filled;
@@ -1941,6 +1961,12 @@ struct station_info {
u32 airtime_link_metric;
u8 connected_to_as;
+
+ bool mlo_params_valid;
+ u8 assoc_link_id;
+ u8 mld_addr[ETH_ALEN] __aligned(2);
+ const u8 *assoc_resp_ies;
+ size_t assoc_resp_ies_len;
};
/**
@@ -3606,6 +3632,17 @@ struct cfg80211_pmk_conf {
* the real status code for failures. Used only for the authentication
* response command interface (user space to driver).
* @pmkid: The identifier to refer a PMKSA.
+ * @mld_addr: MLD address of the peer. Used by the authentication request event
+ * interface. Driver indicates this to enable MLO during the authentication
+ * offload to user space. Driver shall look at %NL80211_ATTR_MLO_SUPPORT
+ * flag capability in NL80211_CMD_CONNECT to know whether the user space
+ * supports enabling MLO during the authentication offload.
+ * User space should use the address of the interface (on which the
+ * authentication request event reported) as self MLD address. User space
+ * and driver should use MLD addresses in RA, TA and BSSID fields of
+ * authentication frames sent or received via cfg80211. The driver
+ * translates the MLD addresses to/from link addresses based on the link
+ * chosen for the authentication.
*/
struct cfg80211_external_auth_params {
enum nl80211_external_auth_action action;
@@ -3614,6 +3651,7 @@ struct cfg80211_external_auth_params {
unsigned int key_mgmt_suite;
u16 status;
const u8 *pmkid;
+ u8 mld_addr[ETH_ALEN] __aligned(2);
};
/**
@@ -3868,12 +3906,22 @@ struct cfg80211_pmsr_request {
* the IEs of the remote peer in the event from the host driver and
* the constructed IEs by the user space in the request interface.
* @ie_len: Length of IEs in octets.
+ * @assoc_link_id: MLO link ID of the AP, with which (re)association requested
+ * by peer. This will be filled by driver for both MLO and non-MLO station
+ * connections when the AP affiliated with an MLD. For non-MLD AP mode, it
+ * will be -1. Used only with OWE update event (driver to user space).
+ * @peer_mld_addr: For MLO connection, MLD address of the peer. For non-MLO
+ * connection, it will be all zeros. This is applicable only when
+ * @assoc_link_id is not -1, i.e., the AP affiliated with an MLD. Used only
+ * with OWE update event (driver to user space).
*/
struct cfg80211_update_owe_info {
u8 peer[ETH_ALEN] __aligned(2);
u16 status;
const u8 *ie;
size_t ie_len;
+ int assoc_link_id;
+ u8 peer_mld_addr[ETH_ALEN] __aligned(2);
};
/**
@@ -4683,13 +4731,14 @@ struct cfg80211_ops {
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
* @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
* beaconing mode (AP, IBSS, Mesh, ...).
- * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
- * before connection.
* @WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK: The device supports bigger kek and kck keys
* @WIPHY_FLAG_SUPPORTS_MLO: This is a temporary flag gating the MLO APIs,
* in order to not have them reachable in normal drivers, until we have
* complete feature/interface combinations/etc. advertisement. No driver
* should set this flag for now.
+ * @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys.
+ * @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for
+ * NL80211_REGDOM_SET_BY_DRIVER.
*/
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
@@ -4702,7 +4751,7 @@ enum wiphy_flags {
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
WIPHY_FLAG_IBSS_RSN = BIT(8),
WIPHY_FLAG_MESH_AUTH = BIT(10),
- /* use hole at 11 */
+ WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
/* use hole at 12 */
WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
WIPHY_FLAG_AP_UAPSD = BIT(14),
@@ -4715,7 +4764,7 @@ enum wiphy_flags {
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
- WIPHY_FLAG_HAS_STATIC_WEP = BIT(24),
+ WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = BIT(24),
};
/**
@@ -6191,6 +6240,19 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
}
/**
+ * ieee80211_is_valid_amsdu - check if subframe lengths of an A-MSDU are valid
+ *
+ * This is used to detect non-standard A-MSDU frames, e.g. the ones generated
+ * by ath10k and ath11k, where the subframe length includes the length of the
+ * mesh control field.
+ *
+ * @skb: The input A-MSDU frame without any headers.
+ * @mesh_hdr: use standard compliant mesh A-MSDU subframe header
+ * Returns: true if subframe header lengths are valid for the @mesh_hdr mode
+ */
+bool ieee80211_is_valid_amsdu(struct sk_buff *skb, bool mesh_hdr);
+
+/**
* ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
*
* Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
@@ -6205,11 +6267,36 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
* @check_da: DA to check in the inner ethernet header, or NULL
* @check_sa: SA to check in the inner ethernet header, or NULL
+ * @mesh_control: A-MSDU subframe header includes the mesh control field
*/
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- const u8 *check_da, const u8 *check_sa);
+ const u8 *check_da, const u8 *check_sa,
+ bool mesh_control);
+
+/**
+ * ieee80211_get_8023_tunnel_proto - get RFC1042 or bridge tunnel encap protocol
+ *
+ * Check for RFC1042 or bridge tunnel header and fetch the encapsulated
+ * protocol.
+ *
+ * @hdr: pointer to the MSDU payload
+ * @proto: destination pointer to store the protocol
+ * Return: true if encapsulation was found
+ */
+bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto);
+
+/**
+ * ieee80211_strip_8023_mesh_hdr - strip mesh header from converted 802.3 frames
+ *
+ * Strip the mesh header, which was left in by ieee80211_data_to_8023 as part
+ * of the MSDU data. Also move any source/destination addresses from the mesh
+ * header to the ethernet header (if present).
+ *
+ * @skb: The 802.3 frame with embedded mesh header
+ */
+int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb);
/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
@@ -7783,7 +7870,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
/**
* cfg80211_del_sta_sinfo - notify userspace about deletion of a station
* @dev: the netdev
- * @mac_addr: the station's address
+ * @mac_addr: the station's address. For MLD station, MLD address is used.
* @sinfo: the station information/statistics
* @gfp: allocation flags
*/
@@ -7794,7 +7881,7 @@ void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
* cfg80211_del_sta - notify userspace about deletion of a station
*
* @dev: the netdev
- * @mac_addr: the station's address
+ * @mac_addr: the station's address. For MLD station, MLD address is used.
* @gfp: allocation flags
*/
static inline void cfg80211_del_sta(struct net_device *dev,
@@ -8280,13 +8367,14 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* @dev: the device which switched channels
* @chandef: the new channel definition
* @link_id: the link ID for MLO, must be 0 for non-MLO
+ * @punct_bitmap: the new puncturing bitmap
*
* Caller must acquire wdev_lock, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id);
+ unsigned int link_id, u16 punct_bitmap);
/*
* cfg80211_ch_switch_started_notify - notify channel switch start
@@ -8295,6 +8383,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
* @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
* @quiet: whether or not immediate quiet was requested by the AP
+ * @punct_bitmap: the future puncturing bitmap
*
* Inform the userspace about the channel switch that has just
* started, so that it can take appropriate actions (eg. starting
@@ -8303,7 +8392,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet);
+ bool quiet, u16 punct_bitmap);
/**
* ieee80211_operating_class_to_band - convert operating class to band
@@ -8850,12 +8939,11 @@ void cfg80211_bss_flush(struct wiphy *wiphy);
/**
* cfg80211_bss_color_notify - notify about bss color event
* @dev: network device
- * @gfp: allocation flags
* @cmd: the actual event we want to notify
* @count: the number of TBTTs until the color change happens
* @color_bitmap: representations of the colors that the local BSS is aware of
*/
-int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
+int cfg80211_bss_color_notify(struct net_device *dev,
enum nl80211_commands cmd, u8 count,
u64 color_bitmap);
@@ -8863,13 +8951,11 @@ int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
* cfg80211_obss_color_collision_notify - notify about bss color collision
* @dev: network device
* @color_bitmap: representations of the colors that the local BSS is aware of
- * @gfp: allocation flags
*/
static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
- u64 color_bitmap, gfp_t gfp)
+ u64 color_bitmap)
{
- return cfg80211_bss_color_notify(dev, gfp,
- NL80211_CMD_OBSS_COLOR_COLLISION,
+ return cfg80211_bss_color_notify(dev, NL80211_CMD_OBSS_COLOR_COLLISION,
0, color_bitmap);
}
@@ -8883,8 +8969,7 @@ static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
static inline int cfg80211_color_change_started_notify(struct net_device *dev,
u8 count)
{
- return cfg80211_bss_color_notify(dev, GFP_KERNEL,
- NL80211_CMD_COLOR_CHANGE_STARTED,
+ return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_STARTED,
count, 0);
}
@@ -8896,8 +8981,7 @@ static inline int cfg80211_color_change_started_notify(struct net_device *dev,
*/
static inline int cfg80211_color_change_aborted_notify(struct net_device *dev)
{
- return cfg80211_bss_color_notify(dev, GFP_KERNEL,
- NL80211_CMD_COLOR_CHANGE_ABORTED,
+ return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_ABORTED,
0, 0);
}
@@ -8909,9 +8993,21 @@ static inline int cfg80211_color_change_aborted_notify(struct net_device *dev)
*/
static inline int cfg80211_color_change_notify(struct net_device *dev)
{
- return cfg80211_bss_color_notify(dev, GFP_KERNEL,
+ return cfg80211_bss_color_notify(dev,
NL80211_CMD_COLOR_CHANGE_COMPLETED,
0, 0);
}
+/**
+ * cfg80211_valid_disable_subchannel_bitmap - validate puncturing bitmap
+ * @bitmap: bitmap to be validated
+ * @chandef: channel definition
+ *
+ * Validate the puncturing bitmap.
+ *
+ * Return: %true if the bitmap is valid. %false otherwise.
+ */
+bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
+ const struct cfg80211_chan_def *chandef);
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index d09c393d229f..0c2778a836db 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -18,6 +18,8 @@
struct wpan_phy;
struct wpan_phy_cca;
+struct cfg802154_scan_request;
+struct cfg802154_beacon_request;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
struct ieee802154_llsec_device_key;
@@ -67,6 +69,14 @@ struct cfg802154_ops {
struct wpan_dev *wpan_dev, bool mode);
int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, bool ackreq);
+ int (*trigger_scan)(struct wpan_phy *wpan_phy,
+ struct cfg802154_scan_request *request);
+ int (*abort_scan)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
+ int (*send_beacons)(struct wpan_phy *wpan_phy,
+ struct cfg802154_beacon_request *request);
+ int (*stop_beacons)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
void (*get_llsec_table)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
@@ -241,6 +251,17 @@ static inline void wpan_phy_net_set(struct wpan_phy *wpan_phy, struct net *net)
write_pnet(&wpan_phy->_net, net);
}
+static inline bool ieee802154_chan_is_valid(struct wpan_phy *phy,
+ u8 page, u8 channel)
+{
+ if (page > IEEE802154_MAX_PAGE ||
+ channel > IEEE802154_MAX_CHANNEL ||
+ !(phy->supported.channels[page] & BIT(channel)))
+ return false;
+
+ return true;
+}
+
/**
* struct ieee802154_addr - IEEE802.15.4 device address
* @mode: Address mode from frame header. Can be one of:
@@ -278,6 +299,60 @@ struct ieee802154_coord_desc {
bool gts_permit;
};
+/**
+ * struct cfg802154_scan_request - Scan request
+ *
+ * @type: type of scan to be performed
+ * @page: page on which to perform the scan
+ * @channels: channels in te %page to be scanned
+ * @duration: time spent on each channel, calculated with:
+ * aBaseSuperframeDuration * (2 ^ duration + 1)
+ * @wpan_dev: the wpan device on which to perform the scan
+ * @wpan_phy: the wpan phy on which to perform the scan
+ */
+struct cfg802154_scan_request {
+ enum nl802154_scan_types type;
+ u8 page;
+ u32 channels;
+ u8 duration;
+ struct wpan_dev *wpan_dev;
+ struct wpan_phy *wpan_phy;
+};
+
+/**
+ * struct cfg802154_beacon_request - Beacon request descriptor
+ *
+ * @interval: interval n between sendings, in multiple order of the super frame
+ * duration: aBaseSuperframeDuration * (2^n) unless the interval
+ * order is greater or equal to 15, in this case beacons won't be
+ * passively sent out at a fixed rate but instead inform the device
+ * that it should answer beacon requests as part of active scan
+ * procedures
+ * @wpan_dev: the concerned wpan device
+ * @wpan_phy: the wpan phy this was for
+ */
+struct cfg802154_beacon_request {
+ u8 interval;
+ struct wpan_dev *wpan_dev;
+ struct wpan_phy *wpan_phy;
+};
+
+/**
+ * struct cfg802154_mac_pkt - MAC packet descriptor (beacon/command)
+ * @node: MAC packets to process list member
+ * @skb: the received sk_buff
+ * @sdata: the interface on which @skb was received
+ * @page: page configuration when @skb was received
+ * @channel: channel configuration when @skb was received
+ */
+struct cfg802154_mac_pkt {
+ struct list_head node;
+ struct sk_buff *skb;
+ struct ieee802154_sub_if_data *sdata;
+ u8 page;
+ u8 channel;
+};
+
struct ieee802154_llsec_key_id {
u8 mode;
u8 id;
@@ -447,6 +522,7 @@ static inline const char *wpan_phy_name(struct wpan_phy *phy)
return dev_name(&phy->dev);
}
-void ieee802154_configure_durations(struct wpan_phy *phy);
+void ieee802154_configure_durations(struct wpan_phy *phy,
+ unsigned int page, unsigned int channel);
#endif /* __NET_CFG802154_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 6bc783b7a06c..1338cb92c8e7 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -18,8 +18,10 @@
#include <linux/errno.h>
#include <asm/types.h>
#include <asm/byteorder.h>
-#include <linux/uaccess.h>
#include <asm/checksum.h>
+#if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER)
+#include <linux/uaccess.h>
+#endif
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static __always_inline
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 8841ab6c2de7..42207fc44660 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -19,18 +19,32 @@ struct dcb_app_type {
u8 dcbx;
};
+u16 dcb_getrewr(struct net_device *dev, struct dcb_app *app);
+int dcb_setrewr(struct net_device *dev, struct dcb_app *app);
+int dcb_delrewr(struct net_device *dev, struct dcb_app *app);
+
int dcb_setapp(struct net_device *, struct dcb_app *);
u8 dcb_getapp(struct net_device *, struct dcb_app *);
int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
+struct dcb_rewr_prio_pcp_map {
+ u16 map[IEEE_8021QAZ_MAX_TCS];
+};
+
+void dcb_getrewr_prio_pcp_mask_map(const struct net_device *dev,
+ struct dcb_rewr_prio_pcp_map *p_map);
+
struct dcb_ieee_app_prio_map {
u64 map[IEEE_8021QAZ_MAX_TCS];
};
void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
struct dcb_ieee_app_prio_map *p_map);
+void dcb_getrewr_prio_dscp_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_prio_map *p_map);
+
struct dcb_ieee_app_dscp_map {
u8 map[64];
};
@@ -113,6 +127,10 @@ struct dcbnl_rtnl_ops {
/* apptrust */
int (*dcbnl_setapptrust)(struct net_device *, u8 *, int);
int (*dcbnl_getapptrust)(struct net_device *, u8 *, int *);
+
+ /* rewrite */
+ int (*dcbnl_setrewr)(struct net_device *dev, struct dcb_app *app);
+ int (*dcbnl_delrewr)(struct net_device *dev, struct dcb_app *app);
};
#endif /* __NET_DCBNL_H__ */
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 6a2e4f21779f..6a942e70e451 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -146,7 +146,6 @@ struct devlink_port {
initialized:1;
struct delayed_work type_warn_dw;
struct list_head reporter_list;
- struct mutex reporters_lock; /* Protects reporter_list */
struct devlink_rate *devlink_rate;
struct devlink_linecard *linecard;
@@ -490,6 +489,10 @@ struct devlink_param_item {
const struct devlink_param *param;
union devlink_param_value driverinit_value;
bool driverinit_value_valid;
+ union devlink_param_value driverinit_value_new; /* Not reachable
+ * until reload.
+ */
+ bool driverinit_value_new_valid;
};
enum devlink_param_generic_id {
@@ -1646,7 +1649,9 @@ static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
{
return devlink_alloc_ns(ops, priv_size, &init_net, dev);
}
-void devlink_set_features(struct devlink *devlink, u64 features);
+
+int devl_register(struct devlink *devlink);
+void devl_unregister(struct devlink *devlink);
void devlink_register(struct devlink *devlink);
void devlink_unregister(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
@@ -1685,9 +1690,9 @@ void devl_rate_nodes_destroy(struct devlink *devlink);
void devlink_port_linecard_set(struct devlink_port *devlink_port,
struct devlink_linecard *linecard);
struct devlink_linecard *
-devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
- const struct devlink_linecard_ops *ops, void *priv);
-void devlink_linecard_destroy(struct devlink_linecard *linecard);
+devl_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+ const struct devlink_linecard_ops *ops, void *priv);
+void devl_linecard_destroy(struct devlink_linecard *linecard);
void devlink_linecard_provision_set(struct devlink_linecard *linecard,
const char *type);
void devlink_linecard_provision_clear(struct devlink_linecard *linecard);
@@ -1766,21 +1771,23 @@ void devl_resource_occ_get_unregister(struct devlink *devlink,
void devlink_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
+int devl_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
int devlink_params_register(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
+void devl_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
void devlink_params_unregister(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
-int devlink_param_register(struct devlink *devlink,
- const struct devlink_param *param);
-void devlink_param_unregister(struct devlink *devlink,
- const struct devlink_param *param);
-int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
- union devlink_param_value *init_val);
-int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val);
-void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+int devl_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *val);
+void devl_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val);
+void devl_param_value_changed(struct devlink *devlink, u32 param_id);
struct devlink_region *devl_region_create(struct devlink *devlink,
const struct devlink_region_ops *ops,
u32 region_max_snapshots,
@@ -1863,20 +1870,30 @@ int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
const void *value, u32 value_len);
struct devlink_health_reporter *
-devlink_health_reporter_create(struct devlink *devlink,
- const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+devl_port_health_reporter_create(struct devlink_port *port,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
struct devlink_health_reporter *
devlink_port_health_reporter_create(struct devlink_port *port,
const struct devlink_health_reporter_ops *ops,
u64 graceful_period, void *priv);
+struct devlink_health_reporter *
+devl_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
+struct devlink_health_reporter *
+devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
void
-devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
+devl_health_reporter_destroy(struct devlink_health_reporter *reporter);
void
-devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter);
+devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
void *
devlink_health_reporter_priv(struct devlink_health_reporter *reporter);
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
index 70539288f995..c0a3ea806cd5 100644
--- a/include/net/dropreason.h
+++ b/include/net/dropreason.h
@@ -71,6 +71,13 @@
FN(DUP_FRAG) \
FN(FRAG_REASM_TIMEOUT) \
FN(FRAG_TOO_FAR) \
+ FN(TCP_MINTTL) \
+ FN(IPV6_BAD_EXTHDR) \
+ FN(IPV6_NDISC_FRAG) \
+ FN(IPV6_NDISC_HOP_LIMIT) \
+ FN(IPV6_NDISC_BAD_CODE) \
+ FN(IPV6_NDISC_BAD_OPTIONS) \
+ FN(IPV6_NDISC_NS_OTHERHOST) \
FNe(MAX)
/**
@@ -313,6 +320,25 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_FRAG_TOO_FAR,
/**
+ * @SKB_DROP_REASON_TCP_MINTTL: ipv4 ttl or ipv6 hoplimit below
+ * the threshold (IP_MINTTL or IPV6_MINHOPCOUNT).
+ */
+ SKB_DROP_REASON_TCP_MINTTL,
+ /** @SKB_DROP_REASON_IPV6_BAD_EXTHDR: Bad IPv6 extension header. */
+ SKB_DROP_REASON_IPV6_BAD_EXTHDR,
+ /** @SKB_DROP_REASON_IPV6_NDISC_FRAG: invalid frag (suppress_frag_ndisc). */
+ SKB_DROP_REASON_IPV6_NDISC_FRAG,
+ /** @SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT: invalid hop limit. */
+ SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT,
+ /** @SKB_DROP_REASON_IPV6_NDISC_BAD_CODE: invalid NDISC icmp6 code. */
+ SKB_DROP_REASON_IPV6_NDISC_BAD_CODE,
+ /** @SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS: invalid NDISC options. */
+ SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS,
+ /** @SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST: NEIGHBOUR SOLICITATION
+ * for another host.
+ */
+ SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST,
+ /**
* @SKB_DROP_REASON_MAX: the maximum of drop reason, which shouldn't be
* used as a real 'reason'
*/
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 96086289aa9b..a15f17a38eca 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -938,6 +938,17 @@ struct dsa_switch_ops {
struct ethtool_ts_info *ts);
/*
+ * ethtool MAC merge layer
+ */
+ int (*get_mm)(struct dsa_switch *ds, int port,
+ struct ethtool_mm_state *state);
+ int (*set_mm)(struct dsa_switch *ds, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+ void (*get_mm_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_mm_stats *stats);
+
+ /*
* DCB ops
*/
int (*port_get_default_prio)(struct dsa_switch *ds, int port);
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 88ff7bb2bb9b..632086b2f644 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -16,7 +16,7 @@ struct dst_ops {
unsigned short family;
unsigned int gc_thresh;
- int (*gc)(struct dst_ops *ops);
+ void (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*mtu)(const struct dst_entry *);
diff --git a/include/net/flow.h b/include/net/flow.h
index 2f0da4f0318b..bb8651a6eaa7 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -8,12 +8,13 @@
#ifndef _NET_FLOW_H
#define _NET_FLOW_H
-#include <linux/socket.h>
#include <linux/in6.h>
#include <linux/atomic.h>
-#include <net/flow_dissector.h>
+#include <linux/container_of.h>
#include <linux/uidgid.h>
+struct flow_keys;
+
/*
* ifindex generation is per-net namespace, and loopback is
* always the 1st device in ns (see net_dev_init), thus any
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 0400a0ac8a29..118082eae48c 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -228,6 +228,8 @@ void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
struct flow_action_entry {
enum flow_action_id id;
u32 hw_index;
+ unsigned long cookie;
+ u64 miss_cookie;
enum flow_action_hw_stats hw_stats;
action_destr destructor;
void *destructor_priv;
@@ -320,7 +322,7 @@ struct flow_action_entry {
u16 sid;
} pppoe;
};
- struct flow_action_cookie *cookie; /* user defined action cookie */
+ struct flow_action_cookie *user_cookie; /* user defined action cookie */
};
struct flow_action {
@@ -593,6 +595,7 @@ struct flow_cls_common_offload {
struct flow_cls_offload {
struct flow_cls_common_offload common;
enum flow_cls_command command;
+ bool use_act_stats;
unsigned long cookie;
struct flow_rule *rule;
struct flow_stats stats;
@@ -610,6 +613,7 @@ struct flow_offload_action {
enum offload_act_command command;
enum flow_action_id id;
u32 index;
+ unsigned long cookie;
struct flow_stats stats;
struct flow_action action;
};
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 4c33a20ea57f..da8a3e648c7a 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -38,6 +38,42 @@
#include <net/cfg802154.h>
+struct ieee802154_beacon_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 beacon_order:4,
+ superframe_order:4,
+ final_cap_slot:4,
+ battery_life_ext:1,
+ reserved0:1,
+ pan_coordinator:1,
+ assoc_permit:1;
+ u8 gts_count:3,
+ gts_reserved:4,
+ gts_permit:1;
+ u8 pend_short_addr_count:3,
+ reserved1:1,
+ pend_ext_addr_count:3,
+ reserved2:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 assoc_permit:1,
+ pan_coordinator:1,
+ reserved0:1,
+ battery_life_ext:1,
+ final_cap_slot:4,
+ superframe_order:4,
+ beacon_order:4;
+ u8 gts_permit:1,
+ gts_reserved:4,
+ gts_count:3;
+ u8 reserved2:1,
+ pend_ext_addr_count:3,
+ reserved1:1,
+ pend_short_addr_count:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
struct ieee802154_sechdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 level:3,
@@ -93,6 +129,13 @@ enum ieee802154_frame_version {
IEEE802154_MULTIPURPOSE_STD = IEEE802154_2003_STD,
};
+enum ieee802154_addressing_mode {
+ IEEE802154_NO_ADDRESSING,
+ IEEE802154_RESERVED,
+ IEEE802154_SHORT_ADDRESSING,
+ IEEE802154_EXTENDED_ADDRESSING,
+};
+
struct ieee802154_hdr {
struct ieee802154_hdr_fc fc;
u8 seq;
@@ -101,6 +144,11 @@ struct ieee802154_hdr {
struct ieee802154_sechdr sec;
};
+struct ieee802154_beacon_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_beacon_hdr mac_pl;
+};
+
/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from
* the contents of hdr will be, and the actual value of those bits in
* hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
@@ -126,6 +174,10 @@ int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
*/
int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
+/* pushes a beacon frame into an skb */
+int ieee802154_beacon_push(struct sk_buff *skb,
+ struct ieee802154_beacon_frame *beacon);
+
int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
static inline int
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index bf5654ce711e..51857117ac09 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -249,6 +249,10 @@ struct inet_sock {
__be32 mc_addr;
struct ip_mc_socklist __rcu *mc_list;
struct inet_cork_full cork;
+ struct {
+ __u16 lo;
+ __u16 hi;
+ } local_port_range;
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
diff --git a/include/net/ip.h b/include/net/ip.h
index 144bdfbb25af..c3fffaa92d6e 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -340,7 +340,8 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
} \
}
-void inet_get_local_port_range(struct net *net, int *low, int *high);
+void inet_get_local_port_range(const struct net *net, int *low, int *high);
+void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
#ifdef CONFIG_SYSCTL
static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 035d61d50a98..81ee387a1fc4 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -84,10 +84,6 @@ struct dst_entry *ip6_route_input_lookup(struct net *net,
struct flowi6 *fl6,
const struct sk_buff *skb, int flags);
-struct dst_entry *ip6_route_output_flags_noref(struct net *net,
- const struct sock *sk,
- struct flowi6 *fl6, int flags);
-
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index c6c61100d244..6d71a5ff52df 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -461,6 +461,7 @@ void ip_vs_stats_free(struct ip_vs_stats *stats);
/* Multiple chains processed in same tick */
struct ip_vs_est_tick_data {
+ struct rcu_head rcu_head;
struct hlist_head chains[IPVS_EST_TICK_CHAINS];
DECLARE_BITMAP(present, IPVS_EST_TICK_CHAINS);
DECLARE_BITMAP(full, IPVS_EST_TICK_CHAINS);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 03f3af02a9a6..7332296eca44 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -436,7 +436,8 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
atomic_dec(&fl->users);
}
-void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
+ u8 code, __be32 info);
void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct icmp6hdr *thdr, int len);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index e3235b9c02c2..219fd15893b0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -340,7 +340,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
* @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
* status changed.
- *
+ * @BSS_CHANGED_EHT_PUNCTURING: The channel puncturing bitmap changed.
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -375,6 +375,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
BSS_CHANGED_FILS_DISCOVERY = 1<<30,
BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
+ BSS_CHANGED_EHT_PUNCTURING = BIT_ULL(32),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -640,9 +641,11 @@ struct ieee80211_fils_discovery {
* @tx_pwr_env_num: number of @tx_pwr_env.
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
+ * @eht_puncturing: bitmap to indicate which channels are punctured in this BSS
* @csa_active: marks whether a channel switch is going on. Internally it is
* write-protected by sdata_lock and local->mtx so holding either is fine
* for read access.
+ * @csa_punct_bitmap: new puncturing bitmap for channel switch
* @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
* when it is not assigned. This pointer is RCU-protected due to the TX
@@ -653,6 +656,23 @@ struct ieee80211_fils_discovery {
* write-protected by sdata_lock and local->mtx so holding either is fine
* for read access.
* @color_change_color: the bss color that will be used after the change.
+ * @vht_su_beamformer: in AP mode, does this BSS support operation as an VHT SU
+ * beamformer
+ * @vht_su_beamformee: in AP mode, does this BSS support operation as an VHT SU
+ * beamformee
+ * @vht_mu_beamformer: in AP mode, does this BSS support operation as an VHT MU
+ * beamformer
+ * @vht_mu_beamformee: in AP mode, does this BSS support operation as an VHT MU
+ * beamformee
+ * @he_su_beamformer: in AP-mode, does this BSS support operation as an HE SU
+ * beamformer
+ * @he_su_beamformee: in AP-mode, does this BSS support operation as an HE SU
+ * beamformee
+ * @he_mu_beamformer: in AP-mode, does this BSS support operation as an HE MU
+ * beamformer
+ * @he_full_ul_mumimo: does this BSS support the reception (AP) or transmission
+ * (non-AP STA) of an HE TB PPDU on an RU that spans the entire PPDU
+ * bandwidth
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -719,13 +739,25 @@ struct ieee80211_bss_conf {
u8 tx_pwr_env_num;
u8 pwr_reduction;
bool eht_support;
+ u16 eht_puncturing;
bool csa_active;
+ u16 csa_punct_bitmap;
+
bool mu_mimo_owner;
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
bool color_change_active;
u8 color_change_color;
+
+ bool vht_su_beamformer;
+ bool vht_su_beamformee;
+ bool vht_mu_beamformer;
+ bool vht_mu_beamformee;
+ bool he_su_beamformer;
+ bool he_su_beamformee;
+ bool he_mu_beamformer;
+ bool he_full_ul_mumimo;
};
/**
@@ -1436,6 +1468,7 @@ enum mac80211_rx_encoding {
RX_ENC_HT,
RX_ENC_VHT,
RX_ENC_HE,
+ RX_ENC_EHT,
};
/**
@@ -1469,7 +1502,7 @@ enum mac80211_rx_encoding {
* @antenna: antenna used
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
- * @nss: number of streams (VHT and HE only)
+ * @nss: number of streams (VHT, HE and EHT only)
* @flag: %RX_FLAG_\*
* @encoding: &enum mac80211_rx_encoding
* @bw: &enum rate_info_bw
@@ -1477,6 +1510,9 @@ enum mac80211_rx_encoding {
* @he_ru: HE RU, from &enum nl80211_he_ru_alloc
* @he_gi: HE GI, from &enum nl80211_he_gi
* @he_dcm: HE DCM value
+ * @eht: EHT specific rate information
+ * @eht.ru: EHT RU, from &enum nl80211_eht_ru_alloc
+ * @eht.gi: EHT GI, from &enum nl80211_eht_gi
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1498,8 +1534,18 @@ struct ieee80211_rx_status {
u32 flag;
u16 freq: 13, freq_offset: 1;
u8 enc_flags;
- u8 encoding:2, bw:3, he_ru:3;
- u8 he_gi:2, he_dcm:1;
+ u8 encoding:3, bw:4;
+ union {
+ struct {
+ u8 he_ru:3;
+ u8 he_gi:2;
+ u8 he_dcm:1;
+ };
+ struct {
+ u8 ru:4;
+ u8 gi:2;
+ } eht;
+ };
u8 rate_idx;
u8 nss;
u8 rx_flags;
@@ -5884,9 +5930,6 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
* This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them.
* This version can only be used while holding the wiphy mutex.
- * The driver must not call this with a lock held that it can also take in
- * response to callbacks from mac80211, and it must not call this within
- * callbacks made by mac80211 - both would result in deadlocks.
*
* @hw: the hardware struct of which the interfaces should be iterated over
* @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
@@ -5901,24 +5944,6 @@ void ieee80211_iterate_active_interfaces_mtx(struct ieee80211_hw *hw,
void *data);
/**
- * ieee80211_iterate_stations - iterate stations
- *
- * This function iterates over all stations associated with a given
- * hardware that are currently uploaded to the driver and calls the callback
- * function for them.
- * This function allows the iterator function to sleep, when the iterator
- * function is atomic @ieee80211_iterate_stations_atomic can be used.
- *
- * @hw: the hardware struct of which the interfaces should be iterated over
- * @iterator: the iterator function to call, cannot sleep
- * @data: first argument of the iterator function
- */
-void ieee80211_iterate_stations(struct ieee80211_hw *hw,
- void (*iterator)(void *data,
- struct ieee80211_sta *sta),
- void *data);
-
-/**
* ieee80211_iterate_stations_atomic - iterate stations
*
* This function iterates over all stations associated with a given
@@ -7191,7 +7216,7 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
/**
- * ieeee80211_obss_color_collision_notify - notify userland about a BSS color
+ * ieee80211_obss_color_collision_notify - notify userland about a BSS color
* collision.
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -7200,8 +7225,8 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
* @gfp: allocation flags
*/
void
-ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap, gfp_t gfp);
+ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
+ u64 color_bitmap, gfp_t gfp);
/**
* ieee80211_is_tx_data - check if frame is a data frame
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index da7eec8669ec..07e5168cdaf9 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -445,7 +445,7 @@ int ndisc_late_init(void);
void ndisc_late_cleanup(void);
void ndisc_cleanup(void);
-int ndisc_rcv(struct sk_buff *skb);
+enum skb_drop_reason ndisc_rcv(struct sk_buff *skb);
struct sk_buff *ndisc_ns_create(struct net_device *dev, const struct in6_addr *solicit,
const struct in6_addr *saddr, u64 nonce);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 6a2019aaa464..a72028dbef0c 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -126,6 +126,12 @@ struct nf_conn {
};
static inline struct nf_conn *
+nf_ct_to_nf_conn(const struct nf_conntrack *nfct)
+{
+ return container_of(nfct, struct nf_conn, ct_general);
+}
+
+static inline struct nf_conn *
nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
{
return container_of(hash, struct nf_conn,
@@ -175,6 +181,8 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
void nf_ct_destroy(struct nf_conntrack *nfct);
+void nf_conntrack_tcp_set_closing(struct nf_conn *ct);
+
/* decrement reference count on a conntrack */
static inline void nf_ct_put(struct nf_conn *ct)
{
@@ -362,6 +370,10 @@ static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net)
return net_generic(net, nf_conntrack_net_id);
}
+int nf_ct_skb_network_trim(struct sk_buff *skb, int family);
+int nf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
+ u16 zone, u8 family, u8 *proto, u16 *mru);
+
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index cd982f4a0f50..ebb28ec5b6fa 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -57,7 +57,7 @@ struct nf_flowtable_type {
struct net_device *dev,
enum flow_block_command cmd);
int (*action)(struct net *net,
- const struct flow_offload *flow,
+ struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
@@ -164,6 +164,8 @@ enum nf_flow_flags {
NF_FLOW_HW_DYING,
NF_FLOW_HW_DEAD,
NF_FLOW_HW_PENDING,
+ NF_FLOW_HW_BIDIRECTIONAL,
+ NF_FLOW_HW_ESTABLISHED,
};
enum flow_offload_type {
@@ -312,10 +314,10 @@ void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd);
-int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
-int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
+int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 3e825381ac5c..780a5f6ad4a6 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -61,6 +61,16 @@ struct nft_immediate_expr {
extern const struct nft_expr_ops nft_cmp_fast_ops;
extern const struct nft_expr_ops nft_cmp16_fast_ops;
+struct nft_ct {
+ enum nft_ct_keys key:8;
+ enum ip_conntrack_dir dir:8;
+ u8 len;
+ union {
+ u8 dreg;
+ u8 sreg;
+ };
+};
+
struct nft_payload {
enum nft_payload_bases base:8;
u8 offset;
@@ -140,6 +150,8 @@ void nft_rt_get_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
void nft_counter_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
+void nft_ct_get_fast_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
enum {
NFT_PAYLOAD_CTX_INNER_TUN = (1 << 0),
@@ -164,4 +176,8 @@ void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt,
struct nft_inner_tun_ctx *ctx);
+void nft_objref_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+void nft_objref_map_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 112708f7a6b4..947973623dc7 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -29,7 +29,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
if (iph->ihl < 5 || iph->version != 4)
return -1;
- len = ntohs(iph->tot_len);
+ len = iph_totlen(pkt->skb, iph);
thoff = iph->ihl * 4;
if (pkt->skb->len < len)
return -1;
@@ -64,7 +64,7 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
- len = ntohs(iph->tot_len);
+ len = iph_totlen(pkt->skb, iph);
thoff = iph->ihl * 4;
if (pkt->skb->len < len) {
__IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS);
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 6e1e670e06bc..b12cd957abb4 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -276,7 +276,8 @@ enum nla_policy_validation {
* Note that in the interest of code simplicity and
* struct size both limits are s16, so you cannot
* enforce a range that doesn't fall within the range
- * of s16 - do that as usual in the code instead.
+ * of s16 - do that using the NLA_POLICY_FULL_RANGE()
+ * or NLA_POLICY_FULL_RANGE_SIGNED() macros instead.
* Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and
* NLA_POLICY_RANGE() macros.
* NLA_U8,
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 8249060cf5d0..a91ef9f8de60 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -6,6 +6,7 @@
struct ctl_table_header;
struct prot_inuse;
+struct cpumask;
struct netns_core {
/* core sysctls */
@@ -17,6 +18,10 @@ struct netns_core {
#ifdef CONFIG_PROC_FS
struct prot_inuse __percpu *prot_inuse;
#endif
+
+#if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL)
+ struct cpumask *rps_default_mask;
+#endif
};
#endif
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index b79a89d5207c..8cd9d141f5af 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -73,6 +73,11 @@ enum nl802154_commands {
NL802154_CMD_DEL_SEC_LEVEL,
NL802154_CMD_SCAN_EVENT,
+ NL802154_CMD_TRIGGER_SCAN,
+ NL802154_CMD_ABORT_SCAN,
+ NL802154_CMD_SCAN_DONE,
+ NL802154_CMD_SEND_BEACONS,
+ NL802154_CMD_STOP_BEACONS,
/* add new commands above here */
@@ -134,6 +139,14 @@ enum nl802154_attrs {
NL802154_ATTR_NETNS_FD,
NL802154_ATTR_COORDINATOR,
+ NL802154_ATTR_SCAN_TYPE,
+ NL802154_ATTR_SCAN_FLAGS,
+ NL802154_ATTR_SCAN_CHANNELS,
+ NL802154_ATTR_SCAN_PREAMBLE_CODES,
+ NL802154_ATTR_SCAN_MEAN_PRF,
+ NL802154_ATTR_SCAN_DURATION,
+ NL802154_ATTR_SCAN_DONE_REASON,
+ NL802154_ATTR_BEACON_INTERVAL,
/* add attributes here, update the policy in nl802154.c */
@@ -260,6 +273,54 @@ enum nl802154_coord {
};
/**
+ * enum nl802154_scan_types - Scan types
+ *
+ * @__NL802154_SCAN_INVALID: scan type number 0 is reserved
+ * @NL802154_SCAN_ED: An ED scan allows a device to obtain a measure of the peak
+ * energy in each requested channel
+ * @NL802154_SCAN_ACTIVE: Locate any coordinator transmitting Beacon frames using
+ * a Beacon Request command
+ * @NL802154_SCAN_PASSIVE: Locate any coordinator transmitting Beacon frames
+ * @NL802154_SCAN_ORPHAN: Relocate coordinator following a loss of synchronisation
+ * @NL802154_SCAN_ENHANCED_ACTIVE: Same as Active using Enhanced Beacon Request
+ * command instead of Beacon Request command
+ * @NL802154_SCAN_RIT_PASSIVE: Passive scan for RIT Data Request command frames
+ * instead of Beacon frames
+ * @NL802154_SCAN_ATTR_MAX: Maximum SCAN attribute number
+ */
+enum nl802154_scan_types {
+ __NL802154_SCAN_INVALID,
+ NL802154_SCAN_ED,
+ NL802154_SCAN_ACTIVE,
+ NL802154_SCAN_PASSIVE,
+ NL802154_SCAN_ORPHAN,
+ NL802154_SCAN_ENHANCED_ACTIVE,
+ NL802154_SCAN_RIT_PASSIVE,
+
+ /* keep last */
+ NL802154_SCAN_ATTR_MAX,
+};
+
+/**
+ * enum nl802154_scan_done_reasons - End of scan reasons
+ *
+ * @__NL802154_SCAN_DONE_REASON_INVALID: scan done reason number 0 is reserved.
+ * @NL802154_SCAN_DONE_REASON_FINISHED: The scan just finished naturally after
+ * going through all the requested and possible (complex) channels.
+ * @NL802154_SCAN_DONE_REASON_ABORTED: The scan was aborted upon user request.
+ * a Beacon Request command
+ * @NL802154_SCAN_DONE_REASON_MAX: Maximum scan done reason attribute number.
+ */
+enum nl802154_scan_done_reasons {
+ __NL802154_SCAN_DONE_REASON_INVALID,
+ NL802154_SCAN_DONE_REASON_FINISHED,
+ NL802154_SCAN_DONE_REASON_ABORTED,
+
+ /* keep last */
+ NL802154_SCAN_DONE_REASON_MAX,
+};
+
+/**
* enum nl802154_cca_modes - cca modes
*
* @__NL802154_CCA_INVALID: cca mode number 0 is reserved
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 813c93499f20..ddfa0b328677 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -277,6 +277,16 @@ void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size,
bool allow_direct);
+/* pp_frag_count represents the number of writers who can update the page
+ * either by updating skb->data or via DMA mappings for the device.
+ * We can't rely on the page refcnt for that as we don't know who might be
+ * holding page references and we can't reliably destroy or sync DMA mappings
+ * of the fragments.
+ *
+ * When pp_frag_count reaches 0 we can either recycle the page if the page
+ * refcnt is 1 or return it back to the memory allocator and destroy any
+ * mappings we have.
+ */
static inline void page_pool_fragment_page(struct page *page, long nr)
{
atomic_long_set(&page->pp_frag_count, nr);
@@ -386,7 +396,7 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
static inline void page_pool_ring_lock(struct page_pool *pool)
__acquires(&pool->ring.producer_lock)
{
- if (in_serving_softirq())
+ if (in_softirq())
spin_lock(&pool->ring.producer_lock);
else
spin_lock_bh(&pool->ring.producer_lock);
@@ -395,7 +405,7 @@ static inline void page_pool_ring_lock(struct page_pool *pool)
static inline void page_pool_ring_unlock(struct page_pool *pool)
__releases(&pool->ring.producer_lock)
{
- if (in_serving_softirq())
+ if (in_softirq())
spin_unlock(&pool->ring.producer_lock);
else
spin_unlock_bh(&pool->ring.producer_lock);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 4cabb32a2ad9..b3b5b0b62f16 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -59,6 +59,8 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
void tcf_block_put(struct tcf_block *block);
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei);
+int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
+ int police, struct tcf_proto *tp, u32 handle, bool used_action_miss);
static inline bool tcf_block_shared(struct tcf_block *block)
{
@@ -229,6 +231,7 @@ struct tcf_exts {
struct tc_action **actions;
struct net *net;
netns_tracker ns_tracker;
+ struct tcf_exts_miss_cookie_node *miss_cookie_node;
#endif
/* Map to export classifier specific extension TLV types to the
* generic extensions API. Unsupported extensions must be set to 0.
@@ -240,21 +243,11 @@ struct tcf_exts {
static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
int action, int police)
{
-#ifdef CONFIG_NET_CLS_ACT
- exts->type = 0;
- exts->nr_actions = 0;
- /* Note: we do not own yet a reference on net.
- * This reference might be taken later from tcf_exts_get_net().
- */
- exts->net = net;
- exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
- GFP_KERNEL);
- if (!exts->actions)
- return -ENOMEM;
+#ifdef CONFIG_NET_CLS
+ return tcf_exts_init_ex(exts, net, action, police, NULL, 0, false);
+#else
+ return -EOPNOTSUPP;
#endif
- exts->action = action;
- exts->police = police;
- return 0;
}
/* Return false if the netns is being destroyed in cleanup_net(). Callers
@@ -292,10 +285,15 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
#define tcf_act_for_each_action(i, a, actions) \
for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
+static inline bool tc_act_in_hw(struct tc_action *act)
+{
+ return !!act->in_hw_count;
+}
+
static inline void
tcf_exts_hw_stats_update(const struct tcf_exts *exts,
- u64 bytes, u64 packets, u64 drops, u64 lastuse,
- u8 used_hw_stats, bool used_hw_stats_valid)
+ struct flow_stats *stats,
+ bool use_act_stats)
{
#ifdef CONFIG_NET_CLS_ACT
int i;
@@ -303,16 +301,18 @@ tcf_exts_hw_stats_update(const struct tcf_exts *exts,
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- /* if stats from hw, just skip */
- if (tcf_action_update_hw_stats(a)) {
- preempt_disable();
- tcf_action_stats_update(a, bytes, packets, drops,
- lastuse, true);
- preempt_enable();
-
- a->used_hw_stats = used_hw_stats;
- a->used_hw_stats_valid = used_hw_stats_valid;
+ if (use_act_stats || tc_act_in_hw(a)) {
+ if (!tcf_action_update_hw_stats(a))
+ continue;
}
+
+ preempt_disable();
+ tcf_action_stats_update(a, stats->bytes, stats->pkts, stats->drops,
+ stats->lastused, true);
+ preempt_enable();
+
+ a->used_hw_stats = stats->used_hw_stats;
+ a->used_hw_stats_valid = stats->used_hw_stats_valid;
}
#endif
}
@@ -353,6 +353,18 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
return TC_ACT_OK;
}
+static inline int
+tcf_exts_exec_ex(struct sk_buff *skb, struct tcf_exts *exts, int act_index,
+ struct tcf_result *res)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return tcf_action_exec(skb, exts->actions + act_index,
+ exts->nr_actions - act_index, res);
+#else
+ return TC_ACT_OK;
+#endif
+}
+
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
struct tcf_exts *exts, u32 flags,
@@ -577,6 +589,7 @@ int tc_setup_offload_action(struct flow_action *flow_action,
void tc_cleanup_offload_action(struct flow_action *flow_action);
int tc_setup_action(struct flow_action *flow_action,
struct tc_action *actions[],
+ u32 miss_cookie_base,
struct netlink_ext_ack *extack);
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
@@ -770,6 +783,7 @@ struct tc_cls_matchall_offload {
enum tc_matchall_command command;
struct flow_rule *rule;
struct flow_stats stats;
+ bool use_act_stats;
unsigned long cookie;
};
@@ -788,16 +802,6 @@ struct tc_cls_bpf_offload {
bool exts_integrated;
};
-struct tc_mqprio_qopt_offload {
- /* struct tc_mqprio_qopt must always be the first element */
- struct tc_mqprio_qopt qopt;
- u16 mode;
- u16 shaper;
- u32 flags;
- u64 min_rate[TC_QOPT_MAX_QUEUE];
- u64 max_rate[TC_QOPT_MAX_QUEUE];
-};
-
/* This structure holds cookie structure that is passed from user
* to the kernel for actions and classifiers
*/
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 38207873eda6..2016839991a4 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -160,8 +160,28 @@ struct tc_etf_qopt_offload {
s32 queue;
};
+struct tc_mqprio_caps {
+ bool validate_queue_counts:1;
+};
+
+struct tc_mqprio_qopt_offload {
+ /* struct tc_mqprio_qopt must always be the first element */
+ struct tc_mqprio_qopt qopt;
+ u16 mode;
+ u16 shaper;
+ u32 flags;
+ u64 min_rate[TC_QOPT_MAX_QUEUE];
+ u64 max_rate[TC_QOPT_MAX_QUEUE];
+};
+
struct tc_taprio_caps {
bool supports_queue_max_sdu:1;
+ bool gate_mask_per_txq:1;
+ /* Device expects lower TXQ numbers to have higher priority over higher
+ * TXQs, regardless of their TC mapping. DO NOT USE FOR NEW DRIVERS,
+ * INSTEAD ENFORCE A PROPER TC:TXQ MAPPING COMING FROM USER SPACE.
+ */
+ bool broken_mqprio:1;
};
struct tc_taprio_sched_entry {
@@ -173,6 +193,7 @@ struct tc_taprio_sched_entry {
};
struct tc_taprio_qopt_offload {
+ struct tc_mqprio_qopt_offload mqprio;
u8 enable;
ktime_t base_time;
u64 cycle_time;
diff --git a/include/net/raw.h b/include/net/raw.h
index 5e665934ebc7..2c004c20ed99 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -15,6 +15,8 @@
#include <net/inet_sock.h>
#include <net/protocol.h>
+#include <net/netns/hash.h>
+#include <linux/hash.h>
#include <linux/icmp.h>
extern struct proto raw_prot;
@@ -29,13 +31,20 @@ int raw_local_deliver(struct sk_buff *, int);
int raw_rcv(struct sock *, struct sk_buff *);
-#define RAW_HTABLE_SIZE MAX_INET_PROTOS
+#define RAW_HTABLE_LOG 8
+#define RAW_HTABLE_SIZE (1U << RAW_HTABLE_LOG)
struct raw_hashinfo {
spinlock_t lock;
- struct hlist_nulls_head ht[RAW_HTABLE_SIZE];
+
+ struct hlist_nulls_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
};
+static inline u32 raw_hashfunc(const struct net *net, u32 proto)
+{
+ return hash_32(net_hash_mix(net) ^ proto, RAW_HTABLE_LOG);
+}
+
static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
{
int i;
diff --git a/include/net/route.h b/include/net/route.h
index 6e92dd5bcd61..fe00b0a2e475 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -35,9 +35,6 @@
#include <linux/cache.h>
#include <linux/security.h>
-/* IPv4 datagram length is stored into 16bit field (tot_len) */
-#define IP_MAX_MTU 0xFFFFU
-
#define RTO_ONLINK 0x01
#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index af4aa66aaa4e..fab5ba3e61b7 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -369,6 +369,8 @@ struct tcf_proto_ops {
struct nlattr **tca,
struct netlink_ext_ack *extack);
void (*tmplt_destroy)(void *tmplt_priv);
+ struct tcf_exts * (*get_exts)(const struct tcf_proto *tp,
+ u32 handle);
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
diff --git a/include/net/smc.h b/include/net/smc.h
index c926d3313e05..597cb9381182 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include "linux/ism.h"
struct sock;
@@ -48,20 +49,14 @@ struct smcd_dmb {
#define ISM_ERROR 0xFFFF
-struct smcd_event {
- u32 type;
- u32 code;
- u64 tok;
- u64 time;
- u64 info;
-};
-
struct smcd_dev;
+struct ism_client;
struct smcd_ops {
int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
u32 vid);
- int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb,
+ struct ism_client *client);
int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
@@ -73,14 +68,14 @@ struct smcd_ops {
bool sf, unsigned int offset, void *data,
unsigned int size);
u8* (*get_system_eid)(void);
+ u64 (*get_local_gid)(struct smcd_dev *dev);
u16 (*get_chid)(struct smcd_dev *dev);
+ struct device* (*get_dev)(struct smcd_dev *dev);
};
struct smcd_dev {
const struct smcd_ops *ops;
- struct device dev;
void *priv;
- u64 local_gid;
struct list_head list;
spinlock_t lock;
struct smc_connection **conn;
@@ -95,11 +90,4 @@ struct smcd_dev {
u8 going_away : 1;
};
-struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
- const struct smcd_ops *ops, int max_dmbs);
-int smcd_register_dev(struct smcd_dev *smcd);
-void smcd_unregister_dev(struct smcd_dev *smcd);
-void smcd_free_dev(struct smcd_dev *smcd);
-void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
-void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit, u16 dmbemask);
#endif /* _SMC_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index 556209727633..573f2bf7e0de 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1349,9 +1349,6 @@ struct proto {
char name[32];
struct list_head node;
-#ifdef SOCK_REFCNT_DEBUG
- atomic_t socks;
-#endif
int (*diag_destroy)(struct sock *sk, int err);
} __randomize_layout;
@@ -1359,31 +1356,6 @@ int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
int sock_load_diag_module(int family, int protocol);
-#ifdef SOCK_REFCNT_DEBUG
-static inline void sk_refcnt_debug_inc(struct sock *sk)
-{
- atomic_inc(&sk->sk_prot->socks);
-}
-
-static inline void sk_refcnt_debug_dec(struct sock *sk)
-{
- atomic_dec(&sk->sk_prot->socks);
- printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
- sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
-}
-
-static inline void sk_refcnt_debug_release(const struct sock *sk)
-{
- if (refcount_read(&sk->sk_refcnt) != 1)
- printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
- sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
-}
-#else /* SOCK_REFCNT_DEBUG */
-#define sk_refcnt_debug_inc(sk) do { } while (0)
-#define sk_refcnt_debug_dec(sk) do { } while (0)
-#define sk_refcnt_debug_release(sk) do { } while (0)
-#endif /* SOCK_REFCNT_DEBUG */
-
INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
static inline int sk_forward_alloc_get(const struct sock *sk)
@@ -1956,7 +1928,12 @@ void sk_common_release(struct sock *sk);
* Default socket callbacks and setup code
*/
-/* Initialise core socket variables */
+/* Initialise core socket variables using an explicit uid. */
+void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
+
+/* Initialise core socket variables.
+ * Assumes struct socket *sock is embedded in a struct socket_alloc.
+ */
void sock_init_data(struct socket *sock, struct sock *sk);
/*
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
index 1f4cb477bb5d..e8dd77a96748 100644
--- a/include/net/tc_act/tc_connmark.h
+++ b/include/net/tc_act/tc_connmark.h
@@ -4,10 +4,15 @@
#include <net/act_api.h>
-struct tcf_connmark_info {
- struct tc_action common;
+struct tcf_connmark_parms {
struct net *net;
u16 zone;
+ struct rcu_head rcu;
+};
+
+struct tcf_connmark_info {
+ struct tc_action common;
+ struct tcf_connmark_parms __rcu *parms;
};
#define to_connmark(a) ((struct tcf_connmark_info *)a)
diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h
index c14407160812..c869274ac529 100644
--- a/include/net/tc_act/tc_nat.h
+++ b/include/net/tc_act/tc_nat.h
@@ -5,13 +5,17 @@
#include <linux/types.h>
#include <net/act_api.h>
-struct tcf_nat {
- struct tc_action common;
-
+struct tcf_nat_parms {
__be32 old_addr;
__be32 new_addr;
__be32 mask;
u32 flags;
+ struct rcu_head rcu;
+};
+
+struct tcf_nat {
+ struct tc_action common;
+ struct tcf_nat_parms __rcu *parms;
};
#define to_tcf_nat(a) ((struct tcf_nat *)a)
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 3e02709a1df6..83fe39931781 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -4,22 +4,29 @@
#include <net/act_api.h>
#include <linux/tc_act/tc_pedit.h>
+#include <linux/types.h>
struct tcf_pedit_key_ex {
enum pedit_header_type htype;
enum pedit_cmd cmd;
};
-struct tcf_pedit {
- struct tc_action common;
- unsigned char tcfp_nkeys;
- unsigned char tcfp_flags;
- u32 tcfp_off_max_hint;
+struct tcf_pedit_parms {
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
+ u32 tcfp_off_max_hint;
+ unsigned char tcfp_nkeys;
+ unsigned char tcfp_flags;
+ struct rcu_head rcu;
+};
+
+struct tcf_pedit {
+ struct tc_action common;
+ struct tcf_pedit_parms __rcu *parms;
};
#define to_pedit(a) ((struct tcf_pedit *)a)
+#define to_pedit_parms(a) (rcu_dereference(to_pedit(a)->parms))
static inline bool is_tcf_pedit(const struct tc_action *a)
{
@@ -32,37 +39,81 @@ static inline bool is_tcf_pedit(const struct tc_action *a)
static inline int tcf_pedit_nkeys(const struct tc_action *a)
{
- return to_pedit(a)->tcfp_nkeys;
+ struct tcf_pedit_parms *parms;
+ int nkeys;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ nkeys = parms->tcfp_nkeys;
+ rcu_read_unlock();
+
+ return nkeys;
}
static inline u32 tcf_pedit_htype(const struct tc_action *a, int index)
{
- if (to_pedit(a)->tcfp_keys_ex)
- return to_pedit(a)->tcfp_keys_ex[index].htype;
+ u32 htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ struct tcf_pedit_parms *parms;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ if (parms->tcfp_keys_ex)
+ htype = parms->tcfp_keys_ex[index].htype;
+ rcu_read_unlock();
- return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ return htype;
}
static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index)
{
- if (to_pedit(a)->tcfp_keys_ex)
- return to_pedit(a)->tcfp_keys_ex[index].cmd;
+ struct tcf_pedit_parms *parms;
+ u32 cmd = __PEDIT_CMD_MAX;
- return __PEDIT_CMD_MAX;
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ if (parms->tcfp_keys_ex)
+ cmd = parms->tcfp_keys_ex[index].cmd;
+ rcu_read_unlock();
+
+ return cmd;
}
static inline u32 tcf_pedit_mask(const struct tc_action *a, int index)
{
- return to_pedit(a)->tcfp_keys[index].mask;
+ struct tcf_pedit_parms *parms;
+ u32 mask;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ mask = parms->tcfp_keys[index].mask;
+ rcu_read_unlock();
+
+ return mask;
}
static inline u32 tcf_pedit_val(const struct tc_action *a, int index)
{
- return to_pedit(a)->tcfp_keys[index].val;
+ struct tcf_pedit_parms *parms;
+ u32 val;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ val = parms->tcfp_keys[index].val;
+ rcu_read_unlock();
+
+ return val;
}
static inline u32 tcf_pedit_offset(const struct tc_action *a, int index)
{
- return to_pedit(a)->tcfp_keys[index].off;
+ struct tcf_pedit_parms *parms;
+ u32 off;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ off = parms->tcfp_keys[index].off;
+ rcu_read_unlock();
+
+ return off;
}
#endif /* __NET_TC_PED_H */
diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
index d323fffb839a..a6d481b5bcbc 100644
--- a/include/net/tc_wrapper.h
+++ b/include/net/tc_wrapper.h
@@ -152,9 +152,6 @@ TC_INDIRECT_FILTER_DECLARE(flow_classify);
TC_INDIRECT_FILTER_DECLARE(fw_classify);
TC_INDIRECT_FILTER_DECLARE(mall_classify);
TC_INDIRECT_FILTER_DECLARE(route4_classify);
-TC_INDIRECT_FILTER_DECLARE(rsvp_classify);
-TC_INDIRECT_FILTER_DECLARE(rsvp6_classify);
-TC_INDIRECT_FILTER_DECLARE(tcindex_classify);
TC_INDIRECT_FILTER_DECLARE(u32_classify);
static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -199,18 +196,6 @@ static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
if (tp->classify == route4_classify)
return route4_classify(skb, tp, res);
#endif
-#if IS_BUILTIN(CONFIG_NET_CLS_RSVP)
- if (tp->classify == rsvp_classify)
- return rsvp_classify(skb, tp, res);
-#endif
-#if IS_BUILTIN(CONFIG_NET_CLS_RSVP6)
- if (tp->classify == rsvp6_classify)
- return rsvp6_classify(skb, tp, res);
-#endif
-#if IS_BUILTIN(CONFIG_NET_CLS_TCINDEX)
- if (tp->classify == tcindex_classify)
- return tcindex_classify(skb, tp, res);
-#endif
skip:
return tp->classify(skb, tp, res);
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 55dbc68bfffc..d517bfac937b 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -7,6 +7,7 @@
#define __LINUX_NET_XDP_H__
#include <linux/skbuff.h> /* skb_shared_info */
+#include <uapi/linux/netdev.h>
/**
* DOC: XDP RX-queue information
@@ -43,6 +44,8 @@ enum xdp_mem_type {
MEM_TYPE_MAX,
};
+typedef u32 xdp_features_t;
+
/* XDP flags for ndo_xdp_xmit */
#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */
#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
@@ -409,4 +412,37 @@ void xdp_attachment_setup(struct xdp_attachment_info *info,
#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
+#define XDP_METADATA_KFUNC_xxx \
+ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
+ bpf_xdp_metadata_rx_timestamp) \
+ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
+ bpf_xdp_metadata_rx_hash) \
+
+enum {
+#define XDP_METADATA_KFUNC(name, _) name,
+XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+MAX_XDP_METADATA_KFUNC,
+};
+
+#ifdef CONFIG_NET
+u32 bpf_xdp_metadata_kfunc_id(int id);
+bool bpf_dev_bound_kfunc_id(u32 btf_id);
+void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
+void xdp_features_clear_redirect_target(struct net_device *dev);
+#else
+static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
+static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
+
+static inline void
+xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
+{
+}
+
+static inline void
+xdp_features_clear_redirect_target(struct net_device *dev)
+{
+}
+#endif
+
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index f787c3f524b0..3e952e569418 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -19,8 +19,11 @@ struct xdp_sock;
struct device;
struct page;
+#define XSK_PRIV_MAX 24
+
struct xdp_buff_xsk {
struct xdp_buff xdp;
+ u8 cb[XSK_PRIV_MAX];
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
@@ -28,6 +31,8 @@ struct xdp_buff_xsk {
struct list_head free_list_node;
};
+#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
+
struct xsk_dma_map {
dma_addr_t *dma_pages;
struct device *dev;
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index df62be80a193..2080879e4134 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -362,6 +362,29 @@ enum ocelot_reg {
SYS_COUNT_RX_GREEN_PRIO_5,
SYS_COUNT_RX_GREEN_PRIO_6,
SYS_COUNT_RX_GREEN_PRIO_7,
+ SYS_COUNT_RX_ASSEMBLY_ERRS,
+ SYS_COUNT_RX_SMD_ERRS,
+ SYS_COUNT_RX_ASSEMBLY_OK,
+ SYS_COUNT_RX_MERGE_FRAGMENTS,
+ SYS_COUNT_RX_PMAC_OCTETS,
+ SYS_COUNT_RX_PMAC_UNICAST,
+ SYS_COUNT_RX_PMAC_MULTICAST,
+ SYS_COUNT_RX_PMAC_BROADCAST,
+ SYS_COUNT_RX_PMAC_SHORTS,
+ SYS_COUNT_RX_PMAC_FRAGMENTS,
+ SYS_COUNT_RX_PMAC_JABBERS,
+ SYS_COUNT_RX_PMAC_CRC_ALIGN_ERRS,
+ SYS_COUNT_RX_PMAC_SYM_ERRS,
+ SYS_COUNT_RX_PMAC_64,
+ SYS_COUNT_RX_PMAC_65_127,
+ SYS_COUNT_RX_PMAC_128_255,
+ SYS_COUNT_RX_PMAC_256_511,
+ SYS_COUNT_RX_PMAC_512_1023,
+ SYS_COUNT_RX_PMAC_1024_1526,
+ SYS_COUNT_RX_PMAC_1527_MAX,
+ SYS_COUNT_RX_PMAC_PAUSE,
+ SYS_COUNT_RX_PMAC_CONTROL,
+ SYS_COUNT_RX_PMAC_LONGS,
SYS_COUNT_TX_OCTETS,
SYS_COUNT_TX_UNICAST,
SYS_COUNT_TX_MULTICAST,
@@ -393,6 +416,20 @@ enum ocelot_reg {
SYS_COUNT_TX_GREEN_PRIO_6,
SYS_COUNT_TX_GREEN_PRIO_7,
SYS_COUNT_TX_AGED,
+ SYS_COUNT_TX_MM_HOLD,
+ SYS_COUNT_TX_MERGE_FRAGMENTS,
+ SYS_COUNT_TX_PMAC_OCTETS,
+ SYS_COUNT_TX_PMAC_UNICAST,
+ SYS_COUNT_TX_PMAC_MULTICAST,
+ SYS_COUNT_TX_PMAC_BROADCAST,
+ SYS_COUNT_TX_PMAC_PAUSE,
+ SYS_COUNT_TX_PMAC_64,
+ SYS_COUNT_TX_PMAC_65_127,
+ SYS_COUNT_TX_PMAC_128_255,
+ SYS_COUNT_TX_PMAC_256_511,
+ SYS_COUNT_TX_PMAC_512_1023,
+ SYS_COUNT_TX_PMAC_1024_1526,
+ SYS_COUNT_TX_PMAC_1527_MAX,
SYS_COUNT_DROP_LOCAL,
SYS_COUNT_DROP_TAIL,
SYS_COUNT_DROP_YELLOW_PRIO_0,
@@ -478,6 +515,9 @@ enum ocelot_reg {
DEV_MAC_FC_MAC_LOW_CFG,
DEV_MAC_FC_MAC_HIGH_CFG,
DEV_MAC_STICKY,
+ DEV_MM_ENABLE_CONFIG,
+ DEV_MM_VERIF_CONFIG,
+ DEV_MM_STATUS,
PCS1G_CFG,
PCS1G_MODE_CFG,
PCS1G_SD_CFG,
@@ -702,6 +742,12 @@ struct ocelot_mirror {
int to;
};
+struct ocelot_mm_state {
+ struct mutex lock;
+ enum ethtool_mm_verify_status verify_status;
+ bool tx_active;
+};
+
struct ocelot_port;
struct ocelot_port {
@@ -814,6 +860,7 @@ struct ocelot {
struct workqueue_struct *owq;
u8 ptp:1;
+ u8 mm_supported:1;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
struct hwtstamp_config hwtstamp_config;
@@ -826,6 +873,8 @@ struct ocelot {
spinlock_t ptp_clock_lock;
struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM];
+ struct ocelot_mm_state *mm;
+
struct ocelot_fdma *fdma;
};
@@ -918,6 +967,7 @@ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
+int ocelot_reset(struct ocelot *ocelot);
int ocelot_init(struct ocelot *ocelot);
void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
@@ -929,6 +979,11 @@ void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, int cpu);
void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port);
u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port);
+/* Watermark interface */
+u16 ocelot_wm_enc(u16 value);
+u16 ocelot_wm_dec(u16 wm);
+void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse);
+
/* DSA callbacks */
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data);
void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data);
@@ -937,6 +992,8 @@ void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
struct rtnl_link_stats64 *stats);
void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
struct ethtool_pause_stats *pause_stats);
+void ocelot_port_get_mm_stats(struct ocelot *ocelot, int port,
+ struct ethtool_mm_stats *stats);
void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges);
@@ -1082,6 +1139,13 @@ int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
struct ocelot_policer *pol);
int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix);
+void ocelot_port_mm_irq(struct ocelot *ocelot, int port);
+int ocelot_port_set_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+int ocelot_port_get_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_state *state);
+
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
int ocelot_mrp_add(struct ocelot *ocelot, int port,
const struct switchdev_obj_mrp *mrp);
diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h
index 0c6021f02fee..fcf02baa76b2 100644
--- a/include/soc/mscc/ocelot_dev.h
+++ b/include/soc/mscc/ocelot_dev.h
@@ -93,6 +93,29 @@
#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
+#define DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0)
+#define DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4)
+#define DEV_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8)
+
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0)
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4))
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4)
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4)
+#define DEV_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12))
+#define DEV_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12)
+#define DEV_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12)
+
+#define DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0)
+#define DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4)
+#define DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8))
+#define DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8)
+#define DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12)
+#define DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16)
+#define DEV_MM_STAT_MM_STATUS_MM_RX_FRAME_STATUS BIT(20)
+#define DEV_MM_STAT_MM_STATUS_MM_TX_FRAME_STATUS BIT(24)
+#define DEV_MM_STAT_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28)
+
#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
#define PCS1G_CFG_PCS_ENA BIT(0)
diff --git a/include/soc/mscc/vsc7514_regs.h b/include/soc/mscc/vsc7514_regs.h
index ceee26c96959..ffe343a9c04b 100644
--- a/include/soc/mscc/vsc7514_regs.h
+++ b/include/soc/mscc/vsc7514_regs.h
@@ -10,20 +10,10 @@
#include <soc/mscc/ocelot_vcap.h>
-extern const u32 vsc7514_ana_regmap[];
-extern const u32 vsc7514_qs_regmap[];
-extern const u32 vsc7514_qsys_regmap[];
-extern const u32 vsc7514_rew_regmap[];
-extern const u32 vsc7514_sys_regmap[];
-extern const u32 vsc7514_vcap_regmap[];
-extern const u32 vsc7514_ptp_regmap[];
-extern const u32 vsc7514_dev_gmii_regmap[];
+extern struct vcap_props vsc7514_vcap_props[];
-extern const struct vcap_field vsc7514_vcap_es0_keys[];
-extern const struct vcap_field vsc7514_vcap_es0_actions[];
-extern const struct vcap_field vsc7514_vcap_is1_keys[];
-extern const struct vcap_field vsc7514_vcap_is1_actions[];
-extern const struct vcap_field vsc7514_vcap_is2_keys[];
-extern const struct vcap_field vsc7514_vcap_is2_actions[];
+extern const struct reg_field vsc7514_regfields[REGFIELD_MAX];
+
+extern const u32 *vsc7514_regmap[TARGET_MAX];
#endif
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 6b200059c2c5..a6b3a4e409f0 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -122,6 +122,64 @@ TRACE_EVENT(br_fdb_update,
__entry->flags)
);
+TRACE_EVENT(br_mdb_full,
+
+ TP_PROTO(const struct net_device *dev,
+ const struct br_ip *group),
+
+ TP_ARGS(dev, group),
+
+ TP_STRUCT__entry(
+ __string(dev, dev->name)
+ __field(int, af)
+ __field(u16, vid)
+ __array(__u8, src, 16)
+ __array(__u8, grp, 16)
+ __array(__u8, grpmac, ETH_ALEN) /* For af == 0. */
+ ),
+
+ TP_fast_assign(
+ struct in6_addr *in6;
+
+ __assign_str(dev, dev->name);
+ __entry->vid = group->vid;
+
+ if (!group->proto) {
+ __entry->af = 0;
+
+ memset(__entry->src, 0, sizeof(__entry->src));
+ memset(__entry->grp, 0, sizeof(__entry->grp));
+ memcpy(__entry->grpmac, group->dst.mac_addr, ETH_ALEN);
+ } else if (group->proto == htons(ETH_P_IP)) {
+ __entry->af = AF_INET;
+
+ in6 = (struct in6_addr *)__entry->src;
+ ipv6_addr_set_v4mapped(group->src.ip4, in6);
+
+ in6 = (struct in6_addr *)__entry->grp;
+ ipv6_addr_set_v4mapped(group->dst.ip4, in6);
+
+ memset(__entry->grpmac, 0, ETH_ALEN);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ __entry->af = AF_INET6;
+
+ in6 = (struct in6_addr *)__entry->src;
+ *in6 = group->src.ip6;
+
+ in6 = (struct in6_addr *)__entry->grp;
+ *in6 = group->dst.ip6;
+
+ memset(__entry->grpmac, 0, ETH_ALEN);
+#endif
+ }
+ ),
+
+ TP_printk("dev %s af %u src %pI6c grp %pI6c/%pM vid %u",
+ __get_str(dev), __entry->af, __entry->src, __entry->grp,
+ __entry->grpmac, __entry->vid)
+);
#endif /* _TRACE_BRIDGE_H */
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 24969184c534..77ff7cfc6049 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -88,7 +88,7 @@ TRACE_EVENT(devlink_health_report,
__string(bus_name, devlink_to_dev(devlink)->bus->name)
__string(dev_name, dev_name(devlink_to_dev(devlink)))
__string(driver_name, devlink_to_dev(devlink)->driver->name)
- __string(reporter_name, msg)
+ __string(reporter_name, reporter_name)
__string(msg, msg)
),
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 283db0ea3db4..4c53a5ef6257 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -163,7 +163,7 @@
EM(rxrpc_local_put_for_use, "PUT for-use ") \
EM(rxrpc_local_put_kill_conn, "PUT conn-kil") \
EM(rxrpc_local_put_peer, "PUT peer ") \
- EM(rxrpc_local_put_prealloc_conn, "PUT conn-pre") \
+ EM(rxrpc_local_put_prealloc_peer, "PUT peer-pre") \
EM(rxrpc_local_put_release_sock, "PUT rel-sock") \
EM(rxrpc_local_stop, "STOP ") \
EM(rxrpc_local_stopped, "STOPPED ") \
@@ -318,6 +318,7 @@
EM(rxrpc_recvmsg_return, "RETN") \
EM(rxrpc_recvmsg_terminal, "TERM") \
EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \
+ EM(rxrpc_recvmsg_unqueue, "UNQU") \
E_(rxrpc_recvmsg_wait, "WAIT")
#define rxrpc_rtt_tx_traces \
@@ -360,11 +361,12 @@
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
EM(rxrpc_propose_ack_input_data_hole, "DataInH") \
- EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
+ EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
+ EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \
EM(rxrpc_propose_ack_processing_op, "ProcOp ") \
EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \
EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \
@@ -421,6 +423,13 @@
EM(RXRPC_ACK_IDLE, "IDL") \
E_(RXRPC_ACK__INVALID, "-?-")
+#define rxrpc_sack_traces \
+ EM(rxrpc_sack_advance, "ADV") \
+ EM(rxrpc_sack_fill, "FIL") \
+ EM(rxrpc_sack_nack, "NAK") \
+ EM(rxrpc_sack_none, "---") \
+ E_(rxrpc_sack_oos, "OOS")
+
#define rxrpc_completions \
EM(RXRPC_CALL_SUCCEEDED, "Succeeded") \
EM(RXRPC_CALL_REMOTELY_ABORTED, "RemoteAbort") \
@@ -496,6 +505,7 @@ enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte);
enum rxrpc_req_ack_trace { rxrpc_req_ack_traces } __mode(byte);
enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte);
enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte);
+enum rxrpc_sack_trace { rxrpc_sack_traces } __mode(byte);
enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte);
enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte);
enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte);
@@ -530,6 +540,7 @@ rxrpc_recvmsg_traces;
rxrpc_req_ack_traces;
rxrpc_rtt_rx_traces;
rxrpc_rtt_tx_traces;
+rxrpc_sack_traces;
rxrpc_skb_traces;
rxrpc_timer_traces;
rxrpc_tx_points;
@@ -552,10 +563,10 @@ TRACE_EVENT(rxrpc_local,
TP_ARGS(local_debug_id, op, ref, usage),
TP_STRUCT__entry(
- __field(unsigned int, local )
- __field(int, op )
- __field(int, ref )
- __field(int, usage )
+ __field(unsigned int, local)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, usage)
),
TP_fast_assign(
@@ -578,9 +589,9 @@ TRACE_EVENT(rxrpc_peer,
TP_ARGS(peer_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, peer )
- __field(int, ref )
- __field(enum rxrpc_peer_trace, why )
+ __field(unsigned int, peer)
+ __field(int, ref)
+ __field(enum rxrpc_peer_trace, why)
),
TP_fast_assign(
@@ -601,9 +612,9 @@ TRACE_EVENT(rxrpc_bundle,
TP_ARGS(bundle_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, bundle )
- __field(int, ref )
- __field(int, why )
+ __field(unsigned int, bundle)
+ __field(int, ref)
+ __field(int, why)
),
TP_fast_assign(
@@ -624,9 +635,9 @@ TRACE_EVENT(rxrpc_conn,
TP_ARGS(conn_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(int, ref )
- __field(int, why )
+ __field(unsigned int, conn)
+ __field(int, ref)
+ __field(int, why)
),
TP_fast_assign(
@@ -648,11 +659,11 @@ TRACE_EVENT(rxrpc_client,
TP_ARGS(conn, channel, op),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(u32, cid )
- __field(int, channel )
- __field(int, usage )
- __field(enum rxrpc_client_trace, op )
+ __field(unsigned int, conn)
+ __field(u32, cid)
+ __field(int, channel)
+ __field(int, usage)
+ __field(enum rxrpc_client_trace, op)
),
TP_fast_assign(
@@ -678,10 +689,10 @@ TRACE_EVENT(rxrpc_call,
TP_ARGS(call_debug_id, ref, aux, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, ref )
- __field(int, why )
- __field(unsigned long, aux )
+ __field(unsigned int, call)
+ __field(int, ref)
+ __field(int, why)
+ __field(unsigned long, aux)
),
TP_fast_assign(
@@ -705,10 +716,10 @@ TRACE_EVENT(rxrpc_skb,
TP_ARGS(skb, usage, mod_count, why),
TP_STRUCT__entry(
- __field(struct sk_buff *, skb )
- __field(int, usage )
- __field(int, mod_count )
- __field(enum rxrpc_skb_trace, why )
+ __field(struct sk_buff *, skb)
+ __field(int, usage)
+ __field(int, mod_count)
+ __field(enum rxrpc_skb_trace, why)
),
TP_fast_assign(
@@ -731,7 +742,7 @@ TRACE_EVENT(rxrpc_rx_packet,
TP_ARGS(sp),
TP_STRUCT__entry(
- __field_struct(struct rxrpc_host_header, hdr )
+ __field_struct(struct rxrpc_host_header, hdr)
),
TP_fast_assign(
@@ -742,9 +753,8 @@ TRACE_EVENT(rxrpc_rx_packet,
__entry->hdr.epoch, __entry->hdr.cid,
__entry->hdr.callNumber, __entry->hdr.serviceId,
__entry->hdr.serial, __entry->hdr.seq,
- __entry->hdr.type, __entry->hdr.flags,
- __entry->hdr.type <= 15 ?
- __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK")
+ __entry->hdr.securityIndex, __entry->hdr.flags,
+ __print_symbolic(__entry->hdr.type, rxrpc_pkts))
);
TRACE_EVENT(rxrpc_rx_done,
@@ -753,8 +763,8 @@ TRACE_EVENT(rxrpc_rx_done,
TP_ARGS(result, abort_code),
TP_STRUCT__entry(
- __field(int, result )
- __field(int, abort_code )
+ __field(int, result)
+ __field(int, abort_code)
),
TP_fast_assign(
@@ -772,13 +782,13 @@ TRACE_EVENT(rxrpc_abort,
TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
TP_STRUCT__entry(
- __field(unsigned int, call_nr )
- __field(enum rxrpc_abort_reason, why )
- __field(u32, cid )
- __field(u32, call_id )
- __field(rxrpc_seq_t, seq )
- __field(int, abort_code )
- __field(int, error )
+ __field(unsigned int, call_nr)
+ __field(enum rxrpc_abort_reason, why)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(rxrpc_seq_t, seq)
+ __field(int, abort_code)
+ __field(int, error)
),
TP_fast_assign(
@@ -804,10 +814,10 @@ TRACE_EVENT(rxrpc_call_complete,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_call_completion, compl )
- __field(int, error )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(enum rxrpc_call_completion, compl)
+ __field(int, error)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -830,13 +840,13 @@ TRACE_EVENT(rxrpc_txqueue,
TP_ARGS(call, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_txqueue_trace, why )
- __field(rxrpc_seq_t, acks_hard_ack )
- __field(rxrpc_seq_t, tx_bottom )
- __field(rxrpc_seq_t, tx_top )
- __field(rxrpc_seq_t, tx_prepared )
- __field(int, tx_winsize )
+ __field(unsigned int, call)
+ __field(enum rxrpc_txqueue_trace, why)
+ __field(rxrpc_seq_t, acks_hard_ack)
+ __field(rxrpc_seq_t, tx_bottom)
+ __field(rxrpc_seq_t, tx_top)
+ __field(rxrpc_seq_t, tx_prepared)
+ __field(int, tx_winsize)
),
TP_fast_assign(
@@ -867,10 +877,10 @@ TRACE_EVENT(rxrpc_rx_data,
TP_ARGS(call, seq, serial, flags),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_serial_t, serial )
- __field(u8, flags )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, flags)
),
TP_fast_assign(
@@ -895,13 +905,13 @@ TRACE_EVENT(rxrpc_rx_ack,
TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_serial_t, ack_serial )
- __field(rxrpc_seq_t, first )
- __field(rxrpc_seq_t, prev )
- __field(u8, reason )
- __field(u8, n_acks )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, first)
+ __field(rxrpc_seq_t, prev)
+ __field(u8, reason)
+ __field(u8, n_acks)
),
TP_fast_assign(
@@ -931,9 +941,9 @@ TRACE_EVENT(rxrpc_rx_abort,
TP_ARGS(call, serial, abort_code),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -955,11 +965,11 @@ TRACE_EVENT(rxrpc_rx_challenge,
TP_ARGS(conn, serial, version, nonce, min_level),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(rxrpc_serial_t, serial )
- __field(u32, version )
- __field(u32, nonce )
- __field(u32, min_level )
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, nonce)
+ __field(u32, min_level)
),
TP_fast_assign(
@@ -985,11 +995,11 @@ TRACE_EVENT(rxrpc_rx_response,
TP_ARGS(conn, serial, version, kvno, ticket_len),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(rxrpc_serial_t, serial )
- __field(u32, version )
- __field(u32, kvno )
- __field(u32, ticket_len )
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, kvno)
+ __field(u32, ticket_len)
),
TP_fast_assign(
@@ -1015,10 +1025,10 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
TP_ARGS(call, serial, rwind, wake),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(u32, rwind )
- __field(bool, wake )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, rwind)
+ __field(bool, wake)
),
TP_fast_assign(
@@ -1042,9 +1052,9 @@ TRACE_EVENT(rxrpc_tx_packet,
TP_ARGS(call_id, whdr, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_tx_point, where )
- __field_struct(struct rxrpc_wire_header, whdr )
+ __field(unsigned int, call)
+ __field(enum rxrpc_tx_point, where)
+ __field_struct(struct rxrpc_wire_header, whdr)
),
TP_fast_assign(
@@ -1074,14 +1084,14 @@ TRACE_EVENT(rxrpc_tx_data,
TP_ARGS(call, seq, serial, flags, retrans, lose),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_serial_t, serial )
- __field(u32, cid )
- __field(u32, call_id )
- __field(u8, flags )
- __field(bool, retrans )
- __field(bool, lose )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(u8, flags)
+ __field(bool, retrans)
+ __field(bool, lose)
),
TP_fast_assign(
@@ -1109,17 +1119,18 @@ TRACE_EVENT(rxrpc_tx_data,
TRACE_EVENT(rxrpc_tx_ack,
TP_PROTO(unsigned int call, rxrpc_serial_t serial,
rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
- u8 reason, u8 n_acks),
+ u8 reason, u8 n_acks, u16 rwind),
- TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_seq_t, ack_first )
- __field(rxrpc_serial_t, ack_serial )
- __field(u8, reason )
- __field(u8, n_acks )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, ack_first)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(u8, reason)
+ __field(u8, n_acks)
+ __field(u16, rwind)
),
TP_fast_assign(
@@ -1129,15 +1140,17 @@ TRACE_EVENT(rxrpc_tx_ack,
__entry->ack_serial = ack_serial;
__entry->reason = reason;
__entry->n_acks = n_acks;
+ __entry->rwind = rwind;
),
- TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u",
+ TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
__entry->ack_first,
__entry->ack_serial,
- __entry->n_acks)
+ __entry->n_acks,
+ __entry->rwind)
);
TRACE_EVENT(rxrpc_receive,
@@ -1147,11 +1160,12 @@ TRACE_EVENT(rxrpc_receive,
TP_ARGS(call, why, serial, seq),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_receive_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_seq_t, seq )
- __field(u64, window )
+ __field(unsigned int, call)
+ __field(enum rxrpc_receive_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, window)
+ __field(rxrpc_seq_t, wtop)
),
TP_fast_assign(
@@ -1159,7 +1173,8 @@ TRACE_EVENT(rxrpc_receive,
__entry->why = why;
__entry->serial = serial;
__entry->seq = seq;
- __entry->window = atomic64_read(&call->ackr_window);
+ __entry->window = call->ackr_window;
+ __entry->wtop = call->ackr_wtop;
),
TP_printk("c=%08x %s r=%08x q=%08x w=%08x-%08x",
@@ -1167,8 +1182,8 @@ TRACE_EVENT(rxrpc_receive,
__print_symbolic(__entry->why, rxrpc_receive_traces),
__entry->serial,
__entry->seq,
- lower_32_bits(__entry->window),
- upper_32_bits(__entry->window))
+ __entry->window,
+ __entry->wtop)
);
TRACE_EVENT(rxrpc_recvmsg,
@@ -1178,9 +1193,9 @@ TRACE_EVENT(rxrpc_recvmsg,
TP_ARGS(call_debug_id, why, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_recvmsg_trace, why )
- __field(int, ret )
+ __field(unsigned int, call)
+ __field(enum rxrpc_recvmsg_trace, why)
+ __field(int, ret)
),
TP_fast_assign(
@@ -1203,12 +1218,12 @@ TRACE_EVENT(rxrpc_recvdata,
TP_ARGS(call, why, seq, offset, len, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_recvmsg_trace, why )
- __field(rxrpc_seq_t, seq )
- __field(unsigned int, offset )
- __field(unsigned int, len )
- __field(int, ret )
+ __field(unsigned int, call)
+ __field(enum rxrpc_recvmsg_trace, why)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, offset)
+ __field(unsigned int, len)
+ __field(int, ret)
),
TP_fast_assign(
@@ -1236,10 +1251,10 @@ TRACE_EVENT(rxrpc_rtt_tx,
TP_ARGS(call, why, slot, send_serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_rtt_tx_trace, why )
- __field(int, slot )
- __field(rxrpc_serial_t, send_serial )
+ __field(unsigned int, call)
+ __field(enum rxrpc_rtt_tx_trace, why)
+ __field(int, slot)
+ __field(rxrpc_serial_t, send_serial)
),
TP_fast_assign(
@@ -1265,13 +1280,13 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_rtt_rx_trace, why )
- __field(int, slot )
- __field(rxrpc_serial_t, send_serial )
- __field(rxrpc_serial_t, resp_serial )
- __field(u32, rtt )
- __field(u32, rto )
+ __field(unsigned int, call)
+ __field(enum rxrpc_rtt_rx_trace, why)
+ __field(int, slot)
+ __field(rxrpc_serial_t, send_serial)
+ __field(rxrpc_serial_t, resp_serial)
+ __field(u32, rtt)
+ __field(u32, rto)
),
TP_fast_assign(
@@ -1301,17 +1316,17 @@ TRACE_EVENT(rxrpc_timer,
TP_ARGS(call, why, now),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_timer_trace, why )
- __field(long, now )
- __field(long, ack_at )
- __field(long, ack_lost_at )
- __field(long, resend_at )
- __field(long, ping_at )
- __field(long, expect_rx_by )
- __field(long, expect_req_by )
- __field(long, expect_term_by )
- __field(long, timer )
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ __field(long, now)
+ __field(long, ack_at)
+ __field(long, ack_lost_at)
+ __field(long, resend_at)
+ __field(long, ping_at)
+ __field(long, expect_rx_by)
+ __field(long, expect_req_by)
+ __field(long, expect_term_by)
+ __field(long, timer)
),
TP_fast_assign(
@@ -1345,16 +1360,16 @@ TRACE_EVENT(rxrpc_timer_expired,
TP_ARGS(call, now),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(long, now )
- __field(long, ack_at )
- __field(long, ack_lost_at )
- __field(long, resend_at )
- __field(long, ping_at )
- __field(long, expect_rx_by )
- __field(long, expect_req_by )
- __field(long, expect_term_by )
- __field(long, timer )
+ __field(unsigned int, call)
+ __field(long, now)
+ __field(long, ack_at)
+ __field(long, ack_lost_at)
+ __field(long, resend_at)
+ __field(long, ping_at)
+ __field(long, expect_rx_by)
+ __field(long, expect_req_by)
+ __field(long, expect_term_by)
+ __field(long, timer)
),
TP_fast_assign(
@@ -1386,7 +1401,7 @@ TRACE_EVENT(rxrpc_rx_lose,
TP_ARGS(sp),
TP_STRUCT__entry(
- __field_struct(struct rxrpc_host_header, hdr )
+ __field_struct(struct rxrpc_host_header, hdr)
),
TP_fast_assign(
@@ -1409,10 +1424,10 @@ TRACE_EVENT(rxrpc_propose_ack,
TP_ARGS(call, why, ack_reason, serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
),
TP_fast_assign(
@@ -1436,10 +1451,10 @@ TRACE_EVENT(rxrpc_send_ack,
TP_ARGS(call, why, ack_reason, serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
),
TP_fast_assign(
@@ -1463,11 +1478,11 @@ TRACE_EVENT(rxrpc_drop_ack,
TP_ARGS(call, why, ack_reason, serial, nobuf),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
- __field(bool, nobuf )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
+ __field(bool, nobuf)
),
TP_fast_assign(
@@ -1491,9 +1506,9 @@ TRACE_EVENT(rxrpc_retransmit,
TP_ARGS(call, seq, expiry),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(s64, expiry )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(s64, expiry)
),
TP_fast_assign(
@@ -1515,13 +1530,13 @@ TRACE_EVENT(rxrpc_congest,
TP_ARGS(call, summary, ack_serial, change),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_congest_change, change )
- __field(rxrpc_seq_t, hard_ack )
- __field(rxrpc_seq_t, top )
- __field(rxrpc_seq_t, lowest_nak )
- __field(rxrpc_serial_t, ack_serial )
- __field_struct(struct rxrpc_ack_summary, sum )
+ __field(unsigned int, call)
+ __field(enum rxrpc_congest_change, change)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(rxrpc_seq_t, top)
+ __field(rxrpc_seq_t, lowest_nak)
+ __field(rxrpc_serial_t, ack_serial)
+ __field_struct(struct rxrpc_ack_summary, sum)
),
TP_fast_assign(
@@ -1559,14 +1574,14 @@ TRACE_EVENT(rxrpc_reset_cwnd,
TP_ARGS(call, now),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_congest_mode, mode )
- __field(unsigned short, cwnd )
- __field(unsigned short, extra )
- __field(rxrpc_seq_t, hard_ack )
- __field(rxrpc_seq_t, prepared )
- __field(ktime_t, since_last_tx )
- __field(bool, has_data )
+ __field(unsigned int, call)
+ __field(enum rxrpc_congest_mode, mode)
+ __field(unsigned short, cwnd)
+ __field(unsigned short, extra)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(rxrpc_seq_t, prepared)
+ __field(ktime_t, since_last_tx)
+ __field(bool, has_data)
),
TP_fast_assign(
@@ -1597,8 +1612,8 @@ TRACE_EVENT(rxrpc_disconnect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -1617,8 +1632,8 @@ TRACE_EVENT(rxrpc_improper_term,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -1637,11 +1652,11 @@ TRACE_EVENT(rxrpc_connect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned long, user_call_ID )
- __field(u32, cid )
- __field(u32, call_id )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, call)
+ __field(unsigned long, user_call_ID)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1666,10 +1681,10 @@ TRACE_EVENT(rxrpc_resend,
TP_ARGS(call, ack),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_seq_t, transmitted )
- __field(rxrpc_serial_t, ack_serial )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, transmitted)
+ __field(rxrpc_serial_t, ack_serial)
),
TP_fast_assign(
@@ -1694,9 +1709,9 @@ TRACE_EVENT(rxrpc_rx_icmp,
TP_ARGS(peer, ee, srx),
TP_STRUCT__entry(
- __field(unsigned int, peer )
- __field_struct(struct sock_extended_err, ee )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, peer)
+ __field_struct(struct sock_extended_err, ee)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1723,10 +1738,10 @@ TRACE_EVENT(rxrpc_tx_fail,
TP_ARGS(debug_id, serial, ret, where),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
- __field(int, ret )
- __field(enum rxrpc_tx_point, where )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
+ __field(int, ret)
+ __field(enum rxrpc_tx_point, where)
),
TP_fast_assign(
@@ -1749,13 +1764,13 @@ TRACE_EVENT(rxrpc_call_reset,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(u32, cid )
- __field(u32, call_id )
- __field(rxrpc_serial_t, call_serial )
- __field(rxrpc_serial_t, conn_serial )
- __field(rxrpc_seq_t, tx_seq )
- __field(rxrpc_seq_t, rx_seq )
+ __field(unsigned int, debug_id)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(rxrpc_serial_t, call_serial)
+ __field(rxrpc_serial_t, conn_serial)
+ __field(rxrpc_seq_t, tx_seq)
+ __field(rxrpc_seq_t, rx_seq)
),
TP_fast_assign(
@@ -1781,8 +1796,8 @@ TRACE_EVENT(rxrpc_notify_socket,
TP_ARGS(debug_id, serial),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
),
TP_fast_assign(
@@ -1804,8 +1819,8 @@ TRACE_EVENT(rxrpc_rx_discard_ack,
prev_pkt, call_ackr_prev),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
__field(rxrpc_seq_t, first_soft_ack)
__field(rxrpc_seq_t, call_ackr_first)
__field(rxrpc_seq_t, prev_pkt)
@@ -1837,9 +1852,9 @@ TRACE_EVENT(rxrpc_req_ack,
TP_ARGS(call_debug_id, seq, why),
TP_STRUCT__entry(
- __field(unsigned int, call_debug_id )
- __field(rxrpc_seq_t, seq )
- __field(enum rxrpc_req_ack_trace, why )
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_req_ack_trace, why)
),
TP_fast_assign(
@@ -1862,11 +1877,11 @@ TRACE_EVENT(rxrpc_txbuf,
TP_ARGS(debug_id, call_debug_id, seq, ref, what),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(unsigned int, call_debug_id )
- __field(rxrpc_seq_t, seq )
- __field(int, ref )
- __field(enum rxrpc_txbuf_trace, what )
+ __field(unsigned int, debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(int, ref)
+ __field(enum rxrpc_txbuf_trace, what)
),
TP_fast_assign(
@@ -1892,9 +1907,9 @@ TRACE_EVENT(rxrpc_poke_call,
TP_ARGS(call, busy, what),
TP_STRUCT__entry(
- __field(unsigned int, call_debug_id )
- __field(bool, busy )
- __field(enum rxrpc_call_poke_trace, what )
+ __field(unsigned int, call_debug_id)
+ __field(bool, busy)
+ __field(enum rxrpc_call_poke_trace, what)
),
TP_fast_assign(
@@ -1915,7 +1930,7 @@ TRACE_EVENT(rxrpc_call_poked,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call_debug_id )
+ __field(unsigned int, call_debug_id)
),
TP_fast_assign(
@@ -1926,6 +1941,33 @@ TRACE_EVENT(rxrpc_call_poked,
__entry->call_debug_id)
);
+TRACE_EVENT(rxrpc_sack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ unsigned int sack, enum rxrpc_sack_trace what),
+
+ TP_ARGS(call, seq, sack, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, sack)
+ __field(enum rxrpc_sack_trace, what)
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ __entry->seq = seq;
+ __entry->sack = sack;
+ __entry->what = what;
+ ),
+
+ TP_printk("c=%08x q=%08x %s k=%x",
+ __entry->call_debug_id,
+ __entry->seq,
+ __print_symbolic(__entry->what, rxrpc_sack_traces),
+ __entry->sack)
+ );
+
#undef EM
#undef E_
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 25ab1ff9423d..07e0715628ec 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -53,19 +53,21 @@ TRACE_EVENT(kfree_skb,
TRACE_EVENT(consume_skb,
- TP_PROTO(struct sk_buff *skb),
+ TP_PROTO(struct sk_buff *skb, void *location),
- TP_ARGS(skb),
+ TP_ARGS(skb, location),
TP_STRUCT__entry(
- __field( void *, skbaddr )
+ __field( void *, skbaddr)
+ __field( void *, location)
),
TP_fast_assign(
__entry->skbaddr = skb;
+ __entry->location = location;
),
- TP_printk("skbaddr=%p", __entry->skbaddr)
+ TP_printk("skbaddr=%p location=%pS", __entry->skbaddr, __entry->location)
);
TRACE_EVENT(skb_copy_datagram_iovec,
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 777ee6cbe933..03d19fc562f8 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -263,6 +263,75 @@ TRACE_EVENT(inet_sk_error_report,
__entry->error)
);
+TRACE_EVENT(sk_data_ready,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __field(unsigned long, ip)
+ ),
+
+ TP_fast_assign(
+ __entry->skaddr = sk;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->ip = _RET_IP_;
+ ),
+
+ TP_printk("family=%u protocol=%u func=%ps",
+ __entry->family, __entry->protocol, (void *)__entry->ip)
+);
+
+/*
+ * sock send/recv msg length
+ */
+DECLARE_EVENT_CLASS(sock_msg_length,
+
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags),
+
+ TP_STRUCT__entry(
+ __field(void *, sk)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __field(int, ret)
+ __field(int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->ret = ret;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("sk address = %p, family = %s protocol = %s, length = %d, error = %d, flags = 0x%x",
+ __entry->sk, show_family_name(__entry->family),
+ show_inet_protocol_name(__entry->protocol),
+ !(__entry->flags & MSG_PEEK) ?
+ (__entry->ret > 0 ? __entry->ret : 0) : 0,
+ __entry->ret < 0 ? __entry->ret : 0,
+ __entry->flags)
+);
+
+DEFINE_EVENT(sock_msg_length, sock_send_length,
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags)
+);
+
+DEFINE_EVENT(sock_msg_length, sock_recv_length,
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags)
+);
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index ea4692c339ce..9204e4494b25 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -26,6 +26,7 @@
* @BATADV_CODED: network coded packets
* @BATADV_ELP: echo location packets for B.A.T.M.A.N. V
* @BATADV_OGM2: originator messages for B.A.T.M.A.N. V
+ * @BATADV_MCAST: multicast packet with multiple destination addresses
*
* @BATADV_UNICAST: unicast packets carrying unicast payload traffic
* @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
@@ -42,6 +43,7 @@ enum batadv_packettype {
BATADV_CODED = 0x02,
BATADV_ELP = 0x03,
BATADV_OGM2 = 0x04,
+ BATADV_MCAST = 0x05,
/* 0x40 - 0x7f: unicast */
#define BATADV_UNICAST_MIN 0x40
BATADV_UNICAST = 0x40,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 464ca3f01fe7..62ce1f5d1b1d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1156,6 +1156,11 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_HAS_FRAGS (1U << 5)
+/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded
+ * program becomes device-bound but can access XDP metadata.
+ */
+#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
+
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
@@ -2001,6 +2006,9 @@ union bpf_attr {
* sending the packet. This flag was added for GRE
* encapsulation, but might be used with other protocols
* as well in the future.
+ * **BPF_F_NO_TUNNEL_KEY**
+ * Add a flag to tunnel metadata indicating that no tunnel
+ * key should be set in the resulting tunnel header.
*
* Here is a typical usage on the transmit path:
*
@@ -2644,6 +2652,11 @@ union bpf_attr {
* Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
* L2 type as Ethernet.
*
+ * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**,
+ * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**:
+ * Indicate the new IP header version after decapsulating the outer
+ * IP header. Used when the inner and outer IP versions are different.
+ *
* A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
@@ -2788,7 +2801,7 @@ union bpf_attr {
*
* long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
- * For en eBPF program attached to a perf event, retrieve the
+ * For an eBPF program attached to a perf event, retrieve the
* value of the event counter associated to *ctx* and store it in
* the structure pointed by *buf* and of size *buf_size*. Enabled
* and running times are also stored in the structure (see
@@ -3121,6 +3134,11 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
+ * **BPF_FIB_LOOKUP_SKIP_NEIGH**
+ * Skip the neighbour table lookup. *params*->dmac
+ * and *params*->smac will not be set as output. A common
+ * use case is to call **bpf_redirect_neigh**\ () after
+ * doing **bpf_fib_lookup**\ ().
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@@ -5764,6 +5782,7 @@ enum {
BPF_F_ZERO_CSUM_TX = (1ULL << 1),
BPF_F_DONT_FRAGMENT = (1ULL << 2),
BPF_F_SEQ_NUMBER = (1ULL << 3),
+ BPF_F_NO_TUNNEL_KEY = (1ULL << 4),
};
/* BPF_FUNC_skb_get_tunnel_key flags. */
@@ -5803,6 +5822,8 @@ enum {
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8),
};
enum {
@@ -6734,6 +6755,7 @@ struct bpf_raw_tracepoint_args {
enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
};
enum {
@@ -6901,6 +6923,17 @@ struct bpf_list_node {
__u64 :64;
} __attribute__((aligned(8)));
+struct bpf_rb_root {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_rb_node {
+ __u64 :64;
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 99047223ab26..7e15214aa5dd 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -411,6 +411,7 @@ enum dcbnl_attrs {
* @DCB_ATTR_IEEE_PEER_PFC: peer PFC configuration - get only
* @DCB_ATTR_IEEE_PEER_APP: peer APP tlv - get only
* @DCB_ATTR_DCB_APP_TRUST_TABLE: selector trust table
+ * @DCB_ATTR_DCB_REWR_TABLE: rewrite configuration
*/
enum ieee_attrs {
DCB_ATTR_IEEE_UNSPEC,
@@ -425,6 +426,7 @@ enum ieee_attrs {
DCB_ATTR_IEEE_QCN_STATS,
DCB_ATTR_DCB_BUFFER,
DCB_ATTR_DCB_APP_TRUST_TABLE,
+ DCB_ATTR_DCB_REWR_TABLE,
__DCB_ATTR_IEEE_MAX
};
#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 58e587ba0450..f7fba0dc87e5 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -712,6 +712,24 @@ enum ethtool_stringset {
};
/**
+ * enum ethtool_mac_stats_src - source of ethtool MAC statistics
+ * @ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ * if device supports a MAC merge layer, this retrieves the aggregate
+ * statistics of the eMAC and pMAC. Otherwise, it retrieves just the
+ * statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_EMAC:
+ * if device supports a MM layer, this retrieves the eMAC statistics.
+ * Otherwise, it retrieves the statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_PMAC:
+ * if device supports a MM layer, this retrieves the pMAC statistics.
+ */
+enum ethtool_mac_stats_src {
+ ETHTOOL_MAC_STATS_SRC_AGGREGATE,
+ ETHTOOL_MAC_STATS_SRC_EMAC,
+ ETHTOOL_MAC_STATS_SRC_PMAC,
+};
+
+/**
* enum ethtool_module_power_mode_policy - plug-in module power mode policy
* @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode.
* @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host
@@ -780,6 +798,31 @@ enum ethtool_podl_pse_pw_d_status {
};
/**
+ * enum ethtool_mm_verify_status - status of MAC Merge Verify function
+ * @ETHTOOL_MM_VERIFY_STATUS_UNKNOWN:
+ * verification status is unknown
+ * @ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ * the 802.3 Verify State diagram is in the state INIT_VERIFICATION
+ * @ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ * the Verify State diagram is in the state VERIFICATION_IDLE,
+ * SEND_VERIFY or WAIT_FOR_RESPONSE
+ * @ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ * indicates that the Verify State diagram is in the state VERIFIED
+ * @ETHTOOL_MM_VERIFY_STATUS_FAILED:
+ * the Verify State diagram is in the state VERIFY_FAIL
+ * @ETHTOOL_MM_VERIFY_STATUS_DISABLED:
+ * verification of preemption operation is disabled
+ */
+enum ethtool_mm_verify_status {
+ ETHTOOL_MM_VERIFY_STATUS_UNKNOWN,
+ ETHTOOL_MM_VERIFY_STATUS_INITIAL,
+ ETHTOOL_MM_VERIFY_STATUS_VERIFYING,
+ ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED,
+ ETHTOOL_MM_VERIFY_STATUS_FAILED,
+ ETHTOOL_MM_VERIFY_STATUS_DISABLED,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
@@ -1183,7 +1226,7 @@ struct ethtool_rxnfc {
__u32 rule_cnt;
__u32 rss_context;
};
- __u32 rule_locs[0];
+ __u32 rule_locs[];
};
@@ -1741,6 +1784,9 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96,
ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97,
ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98,
+ ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99,
+ ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100,
+ ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 5799a9db034e..d39ce21381c5 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -52,6 +52,11 @@ enum {
ETHTOOL_MSG_PSE_GET,
ETHTOOL_MSG_PSE_SET,
ETHTOOL_MSG_RSS_GET,
+ ETHTOOL_MSG_PLCA_GET_CFG,
+ ETHTOOL_MSG_PLCA_SET_CFG,
+ ETHTOOL_MSG_PLCA_GET_STATUS,
+ ETHTOOL_MSG_MM_GET,
+ ETHTOOL_MSG_MM_SET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -99,6 +104,11 @@ enum {
ETHTOOL_MSG_MODULE_NTF,
ETHTOOL_MSG_PSE_GET_REPLY,
ETHTOOL_MSG_RSS_GET_REPLY,
+ ETHTOOL_MSG_PLCA_GET_CFG_REPLY,
+ ETHTOOL_MSG_PLCA_GET_STATUS_REPLY,
+ ETHTOOL_MSG_PLCA_NTF,
+ ETHTOOL_MSG_MM_GET_REPLY,
+ ETHTOOL_MSG_MM_NTF,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -346,6 +356,7 @@ enum {
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
+ ETHTOOL_A_RINGS_RX_PUSH, /* u8 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,
@@ -400,6 +411,9 @@ enum {
ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, /* u32 */
ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, /* u8 */
ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, /* u8 */
+ ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, /* u32 */
+ ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, /* u32 */
+ ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, /* u32 */
/* add new constants above here */
__ETHTOOL_A_COALESCE_CNT,
@@ -415,6 +429,7 @@ enum {
ETHTOOL_A_PAUSE_RX, /* u8 */
ETHTOOL_A_PAUSE_TX, /* u8 */
ETHTOOL_A_PAUSE_STATS, /* nest - _PAUSE_STAT_* */
+ ETHTOOL_A_PAUSE_STATS_SRC, /* u32 */
/* add new constants above here */
__ETHTOOL_A_PAUSE_CNT,
@@ -731,6 +746,8 @@ enum {
ETHTOOL_A_STATS_GRP, /* nest - _A_STATS_GRP_* */
+ ETHTOOL_A_STATS_SRC, /* u32 */
+
/* add new constants above here */
__ETHTOOL_A_STATS_CNT,
ETHTOOL_A_STATS_MAX = (__ETHTOOL_A_STATS_CNT - 1)
@@ -894,6 +911,68 @@ enum {
ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1),
};
+/* PLCA */
+
+enum {
+ ETHTOOL_A_PLCA_UNSPEC,
+ ETHTOOL_A_PLCA_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PLCA_VERSION, /* u16 */
+ ETHTOOL_A_PLCA_ENABLED, /* u8 */
+ ETHTOOL_A_PLCA_STATUS, /* u8 */
+ ETHTOOL_A_PLCA_NODE_CNT, /* u32 */
+ ETHTOOL_A_PLCA_NODE_ID, /* u32 */
+ ETHTOOL_A_PLCA_TO_TMR, /* u32 */
+ ETHTOOL_A_PLCA_BURST_CNT, /* u32 */
+ ETHTOOL_A_PLCA_BURST_TMR, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PLCA_CNT,
+ ETHTOOL_A_PLCA_MAX = (__ETHTOOL_A_PLCA_CNT - 1)
+};
+
+/* MAC Merge (802.3) */
+
+enum {
+ ETHTOOL_A_MM_STAT_UNSPEC,
+ ETHTOOL_A_MM_STAT_PAD,
+
+ /* aMACMergeFrameAssErrorCount */
+ ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS, /* u64 */
+ /* aMACMergeFrameSmdErrorCount */
+ ETHTOOL_A_MM_STAT_SMD_ERRORS, /* u64 */
+ /* aMACMergeFrameAssOkCount */
+ ETHTOOL_A_MM_STAT_REASSEMBLY_OK, /* u64 */
+ /* aMACMergeFragCountRx */
+ ETHTOOL_A_MM_STAT_RX_FRAG_COUNT, /* u64 */
+ /* aMACMergeFragCountTx */
+ ETHTOOL_A_MM_STAT_TX_FRAG_COUNT, /* u64 */
+ /* aMACMergeHoldCount */
+ ETHTOOL_A_MM_STAT_HOLD_COUNT, /* u64 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MM_STAT_CNT,
+ ETHTOOL_A_MM_STAT_MAX = (__ETHTOOL_A_MM_STAT_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_MM_UNSPEC,
+ ETHTOOL_A_MM_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_MM_PMAC_ENABLED, /* u8 */
+ ETHTOOL_A_MM_TX_ENABLED, /* u8 */
+ ETHTOOL_A_MM_TX_ACTIVE, /* u8 */
+ ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, /* u32 */
+ ETHTOOL_A_MM_RX_MIN_FRAG_SIZE, /* u32 */
+ ETHTOOL_A_MM_VERIFY_ENABLED, /* u8 */
+ ETHTOOL_A_MM_VERIFY_STATUS, /* u8 */
+ ETHTOOL_A_MM_VERIFY_TIME, /* u32 */
+ ETHTOOL_A_MM_MAX_VERIFY_TIME, /* u32 */
+ ETHTOOL_A_MM_STATS, /* nest - _A_MM_STAT_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MM_CNT,
+ ETHTOOL_A_MM_MAX = (__ETHTOOL_A_MM_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
index 87c2c9f08803..19ebbef41a63 100644
--- a/include/uapi/linux/fou.h
+++ b/include/uapi/linux/fou.h
@@ -1,32 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* fou.h - FOU Interface */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/fou.yaml */
+/* YNL-GEN uapi header */
#ifndef _UAPI_LINUX_FOU_H
#define _UAPI_LINUX_FOU_H
-/* NETLINK_GENERIC related info
- */
#define FOU_GENL_NAME "fou"
-#define FOU_GENL_VERSION 0x1
+#define FOU_GENL_VERSION 1
enum {
- FOU_ATTR_UNSPEC,
- FOU_ATTR_PORT, /* u16 */
- FOU_ATTR_AF, /* u8 */
- FOU_ATTR_IPPROTO, /* u8 */
- FOU_ATTR_TYPE, /* u8 */
- FOU_ATTR_REMCSUM_NOPARTIAL, /* flag */
- FOU_ATTR_LOCAL_V4, /* u32 */
- FOU_ATTR_LOCAL_V6, /* in6_addr */
- FOU_ATTR_PEER_V4, /* u32 */
- FOU_ATTR_PEER_V6, /* in6_addr */
- FOU_ATTR_PEER_PORT, /* u16 */
- FOU_ATTR_IFINDEX, /* s32 */
-
- __FOU_ATTR_MAX,
+ FOU_ENCAP_UNSPEC,
+ FOU_ENCAP_DIRECT,
+ FOU_ENCAP_GUE,
};
-#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1)
+enum {
+ FOU_ATTR_UNSPEC,
+ FOU_ATTR_PORT,
+ FOU_ATTR_AF,
+ FOU_ATTR_IPPROTO,
+ FOU_ATTR_TYPE,
+ FOU_ATTR_REMCSUM_NOPARTIAL,
+ FOU_ATTR_LOCAL_V4,
+ FOU_ATTR_LOCAL_V6,
+ FOU_ATTR_PEER_V4,
+ FOU_ATTR_PEER_V6,
+ FOU_ATTR_PEER_PORT,
+ FOU_ATTR_IFINDEX,
+
+ __FOU_ATTR_MAX
+};
+#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1)
enum {
FOU_CMD_UNSPEC,
@@ -34,15 +39,8 @@ enum {
FOU_CMD_DEL,
FOU_CMD_GET,
- __FOU_CMD_MAX,
+ __FOU_CMD_MAX
};
-
-enum {
- FOU_ENCAP_UNSPEC,
- FOU_ENCAP_DIRECT,
- FOU_ENCAP_GUE,
-};
-
-#define FOU_CMD_MAX (__FOU_CMD_MAX - 1)
+#define FOU_CMD_MAX (__FOU_CMD_MAX - 1)
#endif /* _UAPI_LINUX_FOU_H */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index d9de241d90f9..d60c456710b3 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -523,6 +523,8 @@ enum {
BRIDGE_VLANDB_ENTRY_TUNNEL_INFO,
BRIDGE_VLANDB_ENTRY_STATS,
BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
+ BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS,
+ BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS,
__BRIDGE_VLANDB_ENTRY_MAX,
};
#define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 1021a7e47a86..57ceb788250f 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -374,6 +374,9 @@ enum {
IFLA_DEVLINK_PORT,
+ IFLA_GSO_IPV4_MAX_SIZE,
+ IFLA_GRO_IPV4_MAX_SIZE,
+
__IFLA_MAX
};
@@ -564,6 +567,8 @@ enum {
IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
IFLA_BRPORT_LOCKED,
IFLA_BRPORT_MAB,
+ IFLA_BRPORT_MCAST_N_GROUPS,
+ IFLA_BRPORT_MCAST_MAX_GROUPS,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index a8516b3594a4..78c981d6a9d4 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -115,6 +115,7 @@ struct tpacket_auxdata {
#define TP_STATUS_BLK_TMO (1 << 5)
#define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */
#define TP_STATUS_CSUM_VALID (1 << 7)
+#define TP_STATUS_GSO_TCP (1 << 8)
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 07a4cb149305..4b7f2df66b99 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -162,6 +162,7 @@ struct in_addr {
#define MCAST_MSFILTER 48
#define IP_MULTICAST_ALL 49
#define IP_UNICAST_IF 50
+#define IP_LOCAL_PORT_RANGE 51
#define MCAST_EXCLUDE 0
#define MCAST_INCLUDE 1
diff --git a/include/uapi/linux/ioam6.h b/include/uapi/linux/ioam6.h
index ac4de376f0ce..8f72b24fefb3 100644
--- a/include/uapi/linux/ioam6.h
+++ b/include/uapi/linux/ioam6.h
@@ -127,7 +127,7 @@ struct ioam6_trace_hdr {
#endif
#define IOAM6_TRACE_DATA_SIZE_MAX 244
- __u8 data[0];
+ __u8 data[];
} __attribute__((packed));
#endif /* _UAPI_LINUX_IOAM6_H */
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index 75b7257a51e1..256b463e47a6 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -79,6 +79,8 @@
#define MDIO_AN_T1_LP_L 517 /* BASE-T1 AN LP Base Page ability register [15:0] */
#define MDIO_AN_T1_LP_M 518 /* BASE-T1 AN LP Base Page ability register [31:16] */
#define MDIO_AN_T1_LP_H 519 /* BASE-T1 AN LP Base Page ability register [47:32] */
+#define MDIO_AN_10BT1_AN_CTRL 526 /* 10BASE-T1 AN control register */
+#define MDIO_AN_10BT1_AN_STAT 527 /* 10BASE-T1 AN status register */
#define MDIO_PMA_PMD_BT1_CTRL 2100 /* BASE-T1 PMA/PMD control register */
/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
@@ -340,6 +342,12 @@
#define MDIO_AN_T1_LP_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level LP Transmit Request */
#define MDIO_AN_T1_LP_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level LP Transmit Ability */
+/* 10BASE-T1 AN control register */
+#define MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L 0x4000 /* 10BASE-T1L EEE ability advertisement */
+
+/* 10BASE-T1 AN status register */
+#define MDIO_AN_10BT1_AN_STAT_LPA_EEE_T1L 0x4000 /* 10BASE-T1L LP EEE ability advertisement */
+
/* BASE-T1 PMA/PMD control register */
#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
new file mode 100644
index 000000000000..9ee459872600
--- /dev/null
+++ b/include/uapi/linux/netdev.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NETDEV_H
+#define _UAPI_LINUX_NETDEV_H
+
+#define NETDEV_FAMILY_NAME "netdev"
+#define NETDEV_FAMILY_VERSION 1
+
+/**
+ * enum netdev_xdp_act
+ * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers
+ * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
+ * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
+ * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
+ * ndo_xdp_xmit callback.
+ * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP
+ * in zero copy mode.
+ * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw
+ * oflloading.
+ * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear
+ * XDP buffer support in the driver napi callback.
+ * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements
+ * non-linear XDP buffer support in ndo_xdp_xmit callback.
+ */
+enum netdev_xdp_act {
+ NETDEV_XDP_ACT_BASIC = 1,
+ NETDEV_XDP_ACT_REDIRECT = 2,
+ NETDEV_XDP_ACT_NDO_XMIT = 4,
+ NETDEV_XDP_ACT_XSK_ZEROCOPY = 8,
+ NETDEV_XDP_ACT_HW_OFFLOAD = 16,
+ NETDEV_XDP_ACT_RX_SG = 32,
+ NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+};
+
+enum {
+ NETDEV_A_DEV_IFINDEX = 1,
+ NETDEV_A_DEV_PAD,
+ NETDEV_A_DEV_XDP_FEATURES,
+
+ __NETDEV_A_DEV_MAX,
+ NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
+};
+
+enum {
+ NETDEV_CMD_DEV_GET = 1,
+ NETDEV_CMD_DEV_ADD_NTF,
+ NETDEV_CMD_DEV_DEL_NTF,
+ NETDEV_CMD_DEV_CHANGE_NTF,
+
+ __NETDEV_CMD_MAX,
+ NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
+};
+
+#define NETDEV_MCGRP_MGMT "mgmt"
+
+#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index cfa844da1ce6..ff677f3a6cad 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -98,6 +98,13 @@ enum nft_verdicts {
* @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes)
* @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes)
* @NFT_MSG_GETRULE_RESET: get rules and reset stateful expressions (enum nft_obj_attributes)
+ * @NFT_MSG_DESTROYTABLE: destroy a table (enum nft_table_attributes)
+ * @NFT_MSG_DESTROYCHAIN: destroy a chain (enum nft_chain_attributes)
+ * @NFT_MSG_DESTROYRULE: destroy a rule (enum nft_rule_attributes)
+ * @NFT_MSG_DESTROYSET: destroy a set (enum nft_set_attributes)
+ * @NFT_MSG_DESTROYSETELEM: destroy a set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_DESTROYOBJ: destroy a stateful object (enum nft_object_attributes)
+ * @NFT_MSG_DESTROYFLOWTABLE: destroy flow table (enum nft_flowtable_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
@@ -126,6 +133,13 @@ enum nf_tables_msg_types {
NFT_MSG_GETFLOWTABLE,
NFT_MSG_DELFLOWTABLE,
NFT_MSG_GETRULE_RESET,
+ NFT_MSG_DESTROYTABLE,
+ NFT_MSG_DESTROYCHAIN,
+ NFT_MSG_DESTROYRULE,
+ NFT_MSG_DESTROYSET,
+ NFT_MSG_DESTROYSETELEM,
+ NFT_MSG_DESTROYOBJ,
+ NFT_MSG_DESTROYFLOWTABLE,
NFT_MSG_MAX,
};
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index c14a91bbca7c..f14621a954e1 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -424,7 +424,8 @@
* interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all stations, on the interface identified
- * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and
+ * by %NL80211_ATTR_IFINDEX. For MLD station, MLD address is used in
+ * %NL80211_ATTR_MAC. %NL80211_ATTR_MGMT_SUBTYPE and
* %NL80211_ATTR_REASON_CODE can optionally be used to specify which type
* of disconnection indication should be sent to the station
* (Deauthentication or Disassociation frame and reason code for that
@@ -1166,6 +1167,23 @@
* %NL80211_ATTR_STATUS_CODE attribute in %NL80211_CMD_EXTERNAL_AUTH
* command interface.
*
+ * Host driver sends MLD address of the AP with %NL80211_ATTR_MLD_ADDR in
+ * %NL80211_CMD_EXTERNAL_AUTH event to indicate user space to enable MLO
+ * during the authentication offload in STA mode while connecting to MLD
+ * APs. Host driver should check %NL80211_ATTR_MLO_SUPPORT flag capability
+ * in %NL80211_CMD_CONNECT to know whether the user space supports enabling
+ * MLO during the authentication offload or not.
+ * User space should enable MLO during the authentication only when it
+ * receives the AP MLD address in authentication offload request. User
+ * space shouldn't enable MLO when the authentication offload request
+ * doesn't indicate the AP MLD address even if the AP is MLO capable.
+ * User space should use %NL80211_ATTR_MLD_ADDR as peer's MLD address and
+ * interface address identified by %NL80211_ATTR_IFINDEX as self MLD
+ * address. User space and host driver to use MLD addresses in RA, TA and
+ * BSSID fields of the frames between them, and host driver translates the
+ * MLD addresses to/from link addresses based on the link chosen for the
+ * authentication.
+ *
* Host driver reports this status on an authentication failure to the
* user space through the connect result as the user space would have
* initiated the connection through the connect request.
@@ -2751,6 +2769,12 @@ enum nl80211_commands {
* the incoming frame RX timestamp.
* @NL80211_ATTR_TD_BITMAP: Transition Disable bitmap, for subsequent
* (re)associations.
+ *
+ * @NL80211_ATTR_PUNCT_BITMAP: (u32) Preamble puncturing bitmap, lowest
+ * bit corresponds to the lowest 20 MHz channel. Each bit set to 1
+ * indicates that the sub-channel is punctured. Higher 16 bits are
+ * reserved.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3280,6 +3304,8 @@ enum nl80211_attrs {
NL80211_ATTR_RX_HW_TIMESTAMP,
NL80211_ATTR_TD_BITMAP,
+ NL80211_ATTR_PUNCT_BITMAP,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -5869,6 +5895,7 @@ enum plink_actions {
#define NL80211_KEK_LEN 16
#define NL80211_KCK_EXT_LEN 24
#define NL80211_KEK_EXT_LEN 32
+#define NL80211_KCK_EXT_LEN_32 32
#define NL80211_REPLAY_CTR_LEN 8
/**
@@ -6294,6 +6321,11 @@ enum nl80211_feature_flags {
* might apply, e.g. no scans in progress, no offchannel operations
* in progress, and no active connections.
*
+ * @NL80211_EXT_FEATURE_PUNCT: Driver supports preamble puncturing in AP mode.
+ *
+ * @NL80211_EXT_FEATURE_SECURE_NAN: Device supports NAN Pairing which enables
+ * authentication, data encryption and message integrity.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6362,6 +6394,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD,
NL80211_EXT_FEATURE_RADAR_BACKGROUND,
NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE,
+ NL80211_EXT_FEATURE_PUNCT,
+ NL80211_EXT_FEATURE_SECURE_NAN,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/rpl.h b/include/uapi/linux/rpl.h
index 708adddf9f13..7c8970e5b84b 100644
--- a/include/uapi/linux/rpl.h
+++ b/include/uapi/linux/rpl.h
@@ -37,8 +37,8 @@ struct ipv6_rpl_sr_hdr {
#endif
union {
- struct in6_addr addr[0];
- __u8 data[0];
+ __DECLARE_FLEX_ARRAY(struct in6_addr, addr);
+ __DECLARE_FLEX_ARRAY(__u8, data);
} segments;
} __attribute__((packed));
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index eb2747d58a81..25a0af57dd5e 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -635,6 +635,7 @@ enum {
TCA_INGRESS_BLOCK,
TCA_EGRESS_BLOCK,
TCA_DUMP_FLAGS,
+ TCA_EXT_WARN_MSG,
__TCA_MAX
};
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 6600cb0164c2..26f33a4c253d 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -95,6 +95,8 @@ enum
ICMP_MIB_OUTADDRMASKS, /* OutAddrMasks */
ICMP_MIB_OUTADDRMASKREPS, /* OutAddrMaskReps */
ICMP_MIB_CSUMERRORS, /* InCsumErrors */
+ ICMP_MIB_RATELIMITGLOBAL, /* OutRateLimitGlobal */
+ ICMP_MIB_RATELIMITHOST, /* OutRateLimitHost */
__ICMP_MIB_MAX
};
@@ -112,6 +114,7 @@ enum
ICMP6_MIB_OUTMSGS, /* OutMsgs */
ICMP6_MIB_OUTERRORS, /* OutErrors */
ICMP6_MIB_CSUMERRORS, /* InCsumErrors */
+ ICMP6_MIB_RATELIMITHOST, /* OutRateLimitHost */
__ICMP6_MIB_MAX
};
diff --git a/init/Kconfig b/init/Kconfig
index 18f0bf50c468..77a4318a6043 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1942,7 +1942,7 @@ config RUST
depends on !MODVERSIONS
depends on !GCC_PLUGINS
depends on !RANDSTRUCT
- depends on !DEBUG_INFO_BTF
+ depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
select CONSTRUCTORS
help
Enables Rust support in the kernel.
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 3a12e6b400a2..02242614dcc7 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o
endif
ifeq ($(CONFIG_BPF_JIT),y)
obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
+obj-$(CONFIG_BPF_SYSCALL) += cpumask.o
obj-${CONFIG_BPF_LSM} += bpf_lsm.o
endif
obj-$(CONFIG_BPF_PRELOAD) += preload/
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index b39a46e8fb08..35f4138a54dc 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -568,8 +568,8 @@ static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_att
nbuckets = max_t(u32, 2, nbuckets);
smap->bucket_log = ilog2(nbuckets);
- smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
- GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
+ smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
+ nbuckets, GFP_USER | __GFP_NOWARN);
if (!smap->buckets) {
bpf_map_area_free(smap);
return ERR_PTR(-ENOMEM);
@@ -580,8 +580,8 @@ static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_att
raw_spin_lock_init(&smap->buckets[i].lock);
}
- smap->elem_size =
- sizeof(struct bpf_local_storage_elem) + attr->value_size;
+ smap->elem_size = offsetof(struct bpf_local_storage_elem,
+ sdata.data[attr->value_size]);
return smap;
}
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index b7017cae6fd1..fa22ec79ac0e 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -337,6 +337,12 @@ const char *btf_type_str(const struct btf_type *t)
#define BTF_SHOW_NAME_SIZE 80
/*
+ * The suffix of a type that indicates it cannot alias another type when
+ * comparing BTF IDs for kfunc invocations.
+ */
+#define NOCAST_ALIAS_SUFFIX "___init"
+
+/*
* Common data to all BTF show operations. Private show functions can add
* their own data to a structure containing a struct btf_show and consult it
* in the show callback. See btf_type_show() below.
@@ -1397,12 +1403,18 @@ __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
if (!bpf_verifier_log_needed(log))
return;
- /* btf verifier prints all types it is processing via
- * btf_verifier_log_type(..., fmt = NULL).
- * Skip those prints for in-kernel BTF verification.
- */
- if (log->level == BPF_LOG_KERNEL && !fmt)
- return;
+ if (log->level == BPF_LOG_KERNEL) {
+ /* btf verifier prints all types it is processing via
+ * btf_verifier_log_type(..., fmt = NULL).
+ * Skip those prints for in-kernel BTF verification.
+ */
+ if (!fmt)
+ return;
+
+ /* Skip logging when loading module BTF with mismatches permitted */
+ if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
+ return;
+ }
__btf_verifier_log(log, "[%u] %s %s%s",
env->log_type_id,
@@ -1441,8 +1453,15 @@ static void btf_verifier_log_member(struct btf_verifier_env *env,
if (!bpf_verifier_log_needed(log))
return;
- if (log->level == BPF_LOG_KERNEL && !fmt)
- return;
+ if (log->level == BPF_LOG_KERNEL) {
+ if (!fmt)
+ return;
+
+ /* Skip logging when loading module BTF with mismatches permitted */
+ if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
+ return;
+ }
+
/* The CHECK_META phase already did a btf dump.
*
* If member is logged again, it must hit an error in
@@ -3228,7 +3247,7 @@ struct btf_field_info {
struct {
const char *node_name;
u32 value_btf_id;
- } list_head;
+ } graph_root;
};
};
@@ -3305,12 +3324,14 @@ static const char *btf_find_decl_tag_value(const struct btf *btf,
return NULL;
}
-static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
- const struct btf_type *t, int comp_idx,
- u32 off, int sz, struct btf_field_info *info)
+static int
+btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
+ const struct btf_type *t, int comp_idx, u32 off,
+ int sz, struct btf_field_info *info,
+ enum btf_field_type head_type)
{
+ const char *node_field_name;
const char *value_type;
- const char *list_node;
s32 id;
if (!__btf_type_is_struct(t))
@@ -3320,26 +3341,32 @@ static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
if (!value_type)
return -EINVAL;
- list_node = strstr(value_type, ":");
- if (!list_node)
+ node_field_name = strstr(value_type, ":");
+ if (!node_field_name)
return -EINVAL;
- value_type = kstrndup(value_type, list_node - value_type, GFP_KERNEL | __GFP_NOWARN);
+ value_type = kstrndup(value_type, node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN);
if (!value_type)
return -ENOMEM;
id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
kfree(value_type);
if (id < 0)
return id;
- list_node++;
- if (str_is_empty(list_node))
+ node_field_name++;
+ if (str_is_empty(node_field_name))
return -EINVAL;
- info->type = BPF_LIST_HEAD;
+ info->type = head_type;
info->off = off;
- info->list_head.value_btf_id = id;
- info->list_head.node_name = list_node;
+ info->graph_root.value_btf_id = id;
+ info->graph_root.node_name = node_field_name;
return BTF_FIELD_FOUND;
}
+#define field_mask_test_name(field_type, field_type_str) \
+ if (field_mask & field_type && !strcmp(name, field_type_str)) { \
+ type = field_type; \
+ goto end; \
+ }
+
static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
int *align, int *sz)
{
@@ -3363,18 +3390,11 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
goto end;
}
}
- if (field_mask & BPF_LIST_HEAD) {
- if (!strcmp(name, "bpf_list_head")) {
- type = BPF_LIST_HEAD;
- goto end;
- }
- }
- if (field_mask & BPF_LIST_NODE) {
- if (!strcmp(name, "bpf_list_node")) {
- type = BPF_LIST_NODE;
- goto end;
- }
- }
+ field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
+ field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
+ field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root");
+ field_mask_test_name(BPF_RB_NODE, "bpf_rb_node");
+
/* Only return BPF_KPTR when all other types with matchable names fail */
if (field_mask & BPF_KPTR) {
type = BPF_KPTR_REF;
@@ -3387,6 +3407,8 @@ end:
return type;
}
+#undef field_mask_test_name
+
static int btf_find_struct_field(const struct btf *btf,
const struct btf_type *t, u32 field_mask,
struct btf_field_info *info, int info_cnt)
@@ -3419,6 +3441,7 @@ static int btf_find_struct_field(const struct btf *btf,
case BPF_SPIN_LOCK:
case BPF_TIMER:
case BPF_LIST_NODE:
+ case BPF_RB_NODE:
ret = btf_find_struct(btf, member_type, off, sz, field_type,
idx < info_cnt ? &info[idx] : &tmp);
if (ret < 0)
@@ -3432,8 +3455,11 @@ static int btf_find_struct_field(const struct btf *btf,
return ret;
break;
case BPF_LIST_HEAD:
- ret = btf_find_list_head(btf, t, member_type, i, off, sz,
- idx < info_cnt ? &info[idx] : &tmp);
+ case BPF_RB_ROOT:
+ ret = btf_find_graph_root(btf, t, member_type,
+ i, off, sz,
+ idx < info_cnt ? &info[idx] : &tmp,
+ field_type);
if (ret < 0)
return ret;
break;
@@ -3480,6 +3506,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
case BPF_SPIN_LOCK:
case BPF_TIMER:
case BPF_LIST_NODE:
+ case BPF_RB_NODE:
ret = btf_find_struct(btf, var_type, off, sz, field_type,
idx < info_cnt ? &info[idx] : &tmp);
if (ret < 0)
@@ -3493,8 +3520,11 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
return ret;
break;
case BPF_LIST_HEAD:
- ret = btf_find_list_head(btf, var, var_type, -1, off, sz,
- idx < info_cnt ? &info[idx] : &tmp);
+ case BPF_RB_ROOT:
+ ret = btf_find_graph_root(btf, var, var_type,
+ -1, off, sz,
+ idx < info_cnt ? &info[idx] : &tmp,
+ field_type);
if (ret < 0)
return ret;
break;
@@ -3596,21 +3626,25 @@ end_btf:
return ret;
}
-static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
- struct btf_field_info *info)
+static int btf_parse_graph_root(const struct btf *btf,
+ struct btf_field *field,
+ struct btf_field_info *info,
+ const char *node_type_name,
+ size_t node_type_align)
{
const struct btf_type *t, *n = NULL;
const struct btf_member *member;
u32 offset;
int i;
- t = btf_type_by_id(btf, info->list_head.value_btf_id);
+ t = btf_type_by_id(btf, info->graph_root.value_btf_id);
/* We've already checked that value_btf_id is a struct type. We
* just need to figure out the offset of the list_node, and
* verify its type.
*/
for_each_member(i, t, member) {
- if (strcmp(info->list_head.node_name, __btf_name_by_offset(btf, member->name_off)))
+ if (strcmp(info->graph_root.node_name,
+ __btf_name_by_offset(btf, member->name_off)))
continue;
/* Invalid BTF, two members with same name */
if (n)
@@ -3618,24 +3652,38 @@ static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
n = btf_type_by_id(btf, member->type);
if (!__btf_type_is_struct(n))
return -EINVAL;
- if (strcmp("bpf_list_node", __btf_name_by_offset(btf, n->name_off)))
+ if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
return -EINVAL;
offset = __btf_member_bit_offset(n, member);
if (offset % 8)
return -EINVAL;
offset /= 8;
- if (offset % __alignof__(struct bpf_list_node))
+ if (offset % node_type_align)
return -EINVAL;
- field->list_head.btf = (struct btf *)btf;
- field->list_head.value_btf_id = info->list_head.value_btf_id;
- field->list_head.node_offset = offset;
+ field->graph_root.btf = (struct btf *)btf;
+ field->graph_root.value_btf_id = info->graph_root.value_btf_id;
+ field->graph_root.node_offset = offset;
}
if (!n)
return -ENOENT;
return 0;
}
+static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
+ struct btf_field_info *info)
+{
+ return btf_parse_graph_root(btf, field, info, "bpf_list_node",
+ __alignof__(struct bpf_list_node));
+}
+
+static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field,
+ struct btf_field_info *info)
+{
+ return btf_parse_graph_root(btf, field, info, "bpf_rb_node",
+ __alignof__(struct bpf_rb_node));
+}
+
struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
u32 field_mask, u32 value_size)
{
@@ -3698,7 +3746,13 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
if (ret < 0)
goto end;
break;
+ case BPF_RB_ROOT:
+ ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
+ if (ret < 0)
+ goto end;
+ break;
case BPF_LIST_NODE:
+ case BPF_RB_NODE:
break;
default:
ret = -EFAULT;
@@ -3707,8 +3761,33 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
rec->cnt++;
}
- /* bpf_list_head requires bpf_spin_lock */
- if (btf_record_has_field(rec, BPF_LIST_HEAD) && rec->spin_lock_off < 0) {
+ /* bpf_{list_head, rb_node} require bpf_spin_lock */
+ if ((btf_record_has_field(rec, BPF_LIST_HEAD) ||
+ btf_record_has_field(rec, BPF_RB_ROOT)) && rec->spin_lock_off < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* need collection identity for non-owning refs before allowing this
+ *
+ * Consider a node type w/ both list and rb_node fields:
+ * struct node {
+ * struct bpf_list_node l;
+ * struct bpf_rb_node r;
+ * }
+ *
+ * Used like so:
+ * struct node *n = bpf_obj_new(....);
+ * bpf_list_push_front(&list_head, &n->l);
+ * bpf_rbtree_remove(&rb_root, &n->r);
+ *
+ * It should not be possible to rbtree_remove the node since it hasn't
+ * been added to a tree. But push_front converts n to a non-owning
+ * reference, and rbtree_remove accepts the non-owning reference to
+ * a type w/ bpf_rb_node field.
+ */
+ if (btf_record_has_field(rec, BPF_LIST_NODE) &&
+ btf_record_has_field(rec, BPF_RB_NODE)) {
ret = -EINVAL;
goto end;
}
@@ -3719,62 +3798,76 @@ end:
return ERR_PTR(ret);
}
+#define GRAPH_ROOT_MASK (BPF_LIST_HEAD | BPF_RB_ROOT)
+#define GRAPH_NODE_MASK (BPF_LIST_NODE | BPF_RB_NODE)
+
int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
{
int i;
- /* There are two owning types, kptr_ref and bpf_list_head. The former
- * only supports storing kernel types, which can never store references
- * to program allocated local types, atleast not yet. Hence we only need
- * to ensure that bpf_list_head ownership does not form cycles.
+ /* There are three types that signify ownership of some other type:
+ * kptr_ref, bpf_list_head, bpf_rb_root.
+ * kptr_ref only supports storing kernel types, which can't store
+ * references to program allocated local types.
+ *
+ * Hence we only need to ensure that bpf_{list_head,rb_root} ownership
+ * does not form cycles.
*/
- if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & BPF_LIST_HEAD))
+ if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & GRAPH_ROOT_MASK))
return 0;
for (i = 0; i < rec->cnt; i++) {
struct btf_struct_meta *meta;
u32 btf_id;
- if (!(rec->fields[i].type & BPF_LIST_HEAD))
+ if (!(rec->fields[i].type & GRAPH_ROOT_MASK))
continue;
- btf_id = rec->fields[i].list_head.value_btf_id;
+ btf_id = rec->fields[i].graph_root.value_btf_id;
meta = btf_find_struct_meta(btf, btf_id);
if (!meta)
return -EFAULT;
- rec->fields[i].list_head.value_rec = meta->record;
+ rec->fields[i].graph_root.value_rec = meta->record;
- if (!(rec->field_mask & BPF_LIST_NODE))
+ /* We need to set value_rec for all root types, but no need
+ * to check ownership cycle for a type unless it's also a
+ * node type.
+ */
+ if (!(rec->field_mask & GRAPH_NODE_MASK))
continue;
/* We need to ensure ownership acyclicity among all types. The
* proper way to do it would be to topologically sort all BTF
* IDs based on the ownership edges, since there can be multiple
- * bpf_list_head in a type. Instead, we use the following
- * reasoning:
+ * bpf_{list_head,rb_node} in a type. Instead, we use the
+ * following resaoning:
*
* - A type can only be owned by another type in user BTF if it
- * has a bpf_list_node.
+ * has a bpf_{list,rb}_node. Let's call these node types.
* - A type can only _own_ another type in user BTF if it has a
- * bpf_list_head.
+ * bpf_{list_head,rb_root}. Let's call these root types.
*
- * We ensure that if a type has both bpf_list_head and
- * bpf_list_node, its element types cannot be owning types.
+ * We ensure that if a type is both a root and node, its
+ * element types cannot be root types.
*
* To ensure acyclicity:
*
- * When A only has bpf_list_head, ownership chain can be:
+ * When A is an root type but not a node, its ownership
+ * chain can be:
* A -> B -> C
* Where:
- * - B has both bpf_list_head and bpf_list_node.
- * - C only has bpf_list_node.
+ * - A is an root, e.g. has bpf_rb_root.
+ * - B is both a root and node, e.g. has bpf_rb_node and
+ * bpf_list_head.
+ * - C is only an root, e.g. has bpf_list_node
*
- * When A has both bpf_list_head and bpf_list_node, some other
- * type already owns it in the BTF domain, hence it can not own
- * another owning type through any of the bpf_list_head edges.
+ * When A is both a root and node, some other type already
+ * owns it in the BTF domain, hence it can not own
+ * another root type through any of the ownership edges.
* A -> B
* Where:
- * - B only has bpf_list_node.
+ * - A is both an root and node.
+ * - B is only an node.
*/
- if (meta->record->field_mask & BPF_LIST_HEAD)
+ if (meta->record->field_mask & GRAPH_ROOT_MASK)
return -ELOOP;
}
return 0;
@@ -5236,6 +5329,8 @@ static const char *alloc_obj_fields[] = {
"bpf_spin_lock",
"bpf_list_head",
"bpf_list_node",
+ "bpf_rb_root",
+ "bpf_rb_node",
};
static struct btf_struct_metas *
@@ -5309,7 +5404,8 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
type = &tab->types[tab->cnt];
type->btf_id = i;
- record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE, t->size);
+ record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
+ BPF_RB_ROOT | BPF_RB_NODE, t->size);
/* The record cannot be unset, treat it as an error if so */
if (IS_ERR_OR_NULL(record)) {
ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
@@ -5573,6 +5669,7 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
if (!ctx_struct)
/* should not happen */
return NULL;
+again:
ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
if (!ctx_tname) {
/* should not happen */
@@ -5586,8 +5683,16 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
* int socket_filter_bpf_prog(struct __sk_buff *skb)
* { // no fields of skb are ever used }
*/
- if (strcmp(ctx_tname, tname))
- return NULL;
+ if (strcmp(ctx_tname, tname)) {
+ /* bpf_user_pt_regs_t is a typedef, so resolve it to
+ * underlying struct and check name again
+ */
+ if (!btf_type_is_modifier(ctx_struct))
+ return NULL;
+ while (btf_type_is_modifier(ctx_struct))
+ ctx_struct = btf_type_by_id(btf_vmlinux, ctx_struct->type);
+ goto again;
+ }
return ctx_type;
}
@@ -6433,6 +6538,18 @@ static int __get_type_size(struct btf *btf, u32 btf_id,
return -EINVAL;
}
+static u8 __get_type_fmodel_flags(const struct btf_type *t)
+{
+ u8 flags = 0;
+
+ if (__btf_type_is_struct(t))
+ flags |= BTF_FMODEL_STRUCT_ARG;
+ if (btf_type_is_signed_int(t))
+ flags |= BTF_FMODEL_SIGNED_ARG;
+
+ return flags;
+}
+
int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
const struct btf_type *func,
@@ -6453,6 +6570,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
m->arg_flags[i] = 0;
}
m->ret_size = 8;
+ m->ret_flags = 0;
m->nr_args = MAX_BPF_FUNC_REG_ARGS;
return 0;
}
@@ -6472,6 +6590,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
return -EINVAL;
}
m->ret_size = ret;
+ m->ret_flags = __get_type_fmodel_flags(t);
for (i = 0; i < nargs; i++) {
if (i == nargs - 1 && args[i].type == 0) {
@@ -6496,7 +6615,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
return -EINVAL;
}
m->arg_size[i] = ret;
- m->arg_flags[i] = __btf_type_is_struct(t) ? BTF_FMODEL_STRUCT_ARG : 0;
+ m->arg_flags[i] = __get_type_fmodel_flags(t);
}
m->nr_args = nargs;
return 0;
@@ -7260,11 +7379,14 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op,
}
btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size);
if (IS_ERR(btf)) {
- pr_warn("failed to validate module [%s] BTF: %ld\n",
- mod->name, PTR_ERR(btf));
kfree(btf_mod);
- if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
+ if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) {
+ pr_warn("failed to validate module [%s] BTF: %ld\n",
+ mod->name, PTR_ERR(btf));
err = PTR_ERR(btf);
+ } else {
+ pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n");
+ }
goto out;
}
err = btf_alloc_id(btf);
@@ -8210,3 +8332,119 @@ out:
}
return err;
}
+
+bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off)
+{
+ struct btf *btf = reg->btf;
+ const struct btf_type *walk_type, *safe_type;
+ const char *tname;
+ char safe_tname[64];
+ long ret, safe_id;
+ const struct btf_member *member, *m_walk = NULL;
+ u32 i;
+ const char *walk_name;
+
+ walk_type = btf_type_by_id(btf, reg->btf_id);
+ if (!walk_type)
+ return false;
+
+ tname = btf_name_by_offset(btf, walk_type->name_off);
+
+ ret = snprintf(safe_tname, sizeof(safe_tname), "%s__safe_fields", tname);
+ if (ret < 0)
+ return false;
+
+ safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
+ if (safe_id < 0)
+ return false;
+
+ safe_type = btf_type_by_id(btf, safe_id);
+ if (!safe_type)
+ return false;
+
+ for_each_member(i, walk_type, member) {
+ u32 moff;
+
+ /* We're looking for the PTR_TO_BTF_ID member in the struct
+ * type we're walking which matches the specified offset.
+ * Below, we'll iterate over the fields in the safe variant of
+ * the struct and see if any of them has a matching type /
+ * name.
+ */
+ moff = __btf_member_bit_offset(walk_type, member) / 8;
+ if (off == moff) {
+ m_walk = member;
+ break;
+ }
+ }
+ if (m_walk == NULL)
+ return false;
+
+ walk_name = __btf_name_by_offset(btf, m_walk->name_off);
+ for_each_member(i, safe_type, member) {
+ const char *m_name = __btf_name_by_offset(btf, member->name_off);
+
+ /* If we match on both type and name, the field is considered trusted. */
+ if (m_walk->type == member->type && !strcmp(walk_name, m_name))
+ return true;
+ }
+
+ return false;
+}
+
+bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
+ const struct btf *reg_btf, u32 reg_id,
+ const struct btf *arg_btf, u32 arg_id)
+{
+ const char *reg_name, *arg_name, *search_needle;
+ const struct btf_type *reg_type, *arg_type;
+ int reg_len, arg_len, cmp_len;
+ size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char);
+
+ reg_type = btf_type_by_id(reg_btf, reg_id);
+ if (!reg_type)
+ return false;
+
+ arg_type = btf_type_by_id(arg_btf, arg_id);
+ if (!arg_type)
+ return false;
+
+ reg_name = btf_name_by_offset(reg_btf, reg_type->name_off);
+ arg_name = btf_name_by_offset(arg_btf, arg_type->name_off);
+
+ reg_len = strlen(reg_name);
+ arg_len = strlen(arg_name);
+
+ /* Exactly one of the two type names may be suffixed with ___init, so
+ * if the strings are the same size, they can't possibly be no-cast
+ * aliases of one another. If you have two of the same type names, e.g.
+ * they're both nf_conn___init, it would be improper to return true
+ * because they are _not_ no-cast aliases, they are the same type.
+ */
+ if (reg_len == arg_len)
+ return false;
+
+ /* Either of the two names must be the other name, suffixed with ___init. */
+ if ((reg_len != arg_len + pattern_len) &&
+ (arg_len != reg_len + pattern_len))
+ return false;
+
+ if (reg_len < arg_len) {
+ search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX);
+ cmp_len = reg_len;
+ } else {
+ search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX);
+ cmp_len = arg_len;
+ }
+
+ if (!search_needle)
+ return false;
+
+ /* ___init suffix must come at the end of the name */
+ if (*(search_needle + pattern_len) != '\0')
+ return false;
+
+ return !strncmp(reg_name, arg_name, cmp_len);
+}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 430c66d59ec7..933869983e2a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -35,6 +35,7 @@
#include <linux/bpf_verifier.h>
#include <linux/nodemask.h>
#include <linux/bpf_mem_alloc.h>
+#include <linux/memcontrol.h>
#include <asm/barrier.h>
#include <asm/unaligned.h>
@@ -87,7 +88,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
{
- gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
+ gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
struct bpf_prog_aux *aux;
struct bpf_prog *fp;
@@ -96,12 +97,12 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
if (fp == NULL)
return NULL;
- aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
+ aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
if (aux == NULL) {
vfree(fp);
return NULL;
}
- fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
+ fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
if (!fp->active) {
vfree(fp);
kfree(aux);
@@ -126,7 +127,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
{
- gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
+ gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
struct bpf_prog *prog;
int cpu;
@@ -159,7 +160,7 @@ int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
sizeof(*prog->aux->jited_linfo),
- GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
+ bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
if (!prog->aux->jited_linfo)
return -ENOMEM;
@@ -234,7 +235,7 @@ void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags)
{
- gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
+ gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
struct bpf_prog *fp;
u32 pages;
@@ -2094,6 +2095,14 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
if (fp->kprobe_override)
return false;
+ /* XDP programs inserted into maps are not guaranteed to run on
+ * a particular netdev (and can run outside driver context entirely
+ * in the case of devmap and cpumap). Until device checks
+ * are implemented, prohibit adding dev-bound programs to program maps.
+ */
+ if (bpf_prog_is_dev_bound(fp->aux))
+ return false;
+
spin_lock(&map->owner.lock);
if (!map->owner.type) {
/* There's no owner yet where we could check for
@@ -2180,7 +2189,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
* valid program, which in this case would simply not
* be JITed, but falls back to the interpreter.
*/
- if (!bpf_prog_is_dev_bound(fp->aux)) {
+ if (!bpf_prog_is_offloaded(fp->aux)) {
*err = bpf_prog_alloc_jited_linfo(fp);
if (*err)
return fp;
@@ -2552,7 +2561,7 @@ static void bpf_prog_free_deferred(struct work_struct *work)
bpf_free_used_maps(aux);
bpf_free_used_btfs(aux);
if (bpf_prog_is_dev_bound(aux))
- bpf_prog_offload_destroy(aux->prog);
+ bpf_prog_dev_bound_destroy(aux->prog);
#ifdef CONFIG_PERF_EVENTS
if (aux->prog->has_callchain_buf)
put_callchain_buffers();
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index e0b2d016f0bf..d2110c1f6fa6 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -361,7 +361,7 @@ static int cpu_map_kthread_run(void *data)
/* Support running another XDP prog on this CPU */
nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
if (nframes) {
- m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs);
+ m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs);
if (unlikely(m == 0)) {
for (i = 0; i < nframes; i++)
skbs[i] = NULL; /* effect: xdp_return_frame */
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
new file mode 100644
index 000000000000..52b981512a35
--- /dev/null
+++ b/kernel/bpf/cpumask.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Meta, Inc */
+#include <linux/bpf.h>
+#include <linux/bpf_mem_alloc.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/cpumask.h>
+
+/**
+ * struct bpf_cpumask - refcounted BPF cpumask wrapper structure
+ * @cpumask: The actual cpumask embedded in the struct.
+ * @usage: Object reference counter. When the refcount goes to 0, the
+ * memory is released back to the BPF allocator, which provides
+ * RCU safety.
+ *
+ * Note that we explicitly embed a cpumask_t rather than a cpumask_var_t. This
+ * is done to avoid confusing the verifier due to the typedef of cpumask_var_t
+ * changing depending on whether CONFIG_CPUMASK_OFFSTACK is defined or not. See
+ * the details in <linux/cpumask.h>. The consequence is that this structure is
+ * likely a bit larger than it needs to be when CONFIG_CPUMASK_OFFSTACK is
+ * defined due to embedding the whole NR_CPUS-size bitmap, but the extra memory
+ * overhead is minimal. For the more typical case of CONFIG_CPUMASK_OFFSTACK
+ * not being defined, the structure is the same size regardless.
+ */
+struct bpf_cpumask {
+ cpumask_t cpumask;
+ refcount_t usage;
+};
+
+static struct bpf_mem_alloc bpf_cpumask_ma;
+
+static bool cpu_valid(u32 cpu)
+{
+ return cpu < nr_cpu_ids;
+}
+
+__diag_push();
+__diag_ignore_all("-Wmissing-prototypes",
+ "Global kfuncs as their definitions will be in BTF");
+
+/**
+ * bpf_cpumask_create() - Create a mutable BPF cpumask.
+ *
+ * Allocates a cpumask that can be queried, mutated, acquired, and released by
+ * a BPF program. The cpumask returned by this function must either be embedded
+ * in a map as a kptr, or freed with bpf_cpumask_release().
+ *
+ * bpf_cpumask_create() allocates memory using the BPF memory allocator, and
+ * will not block. It may return NULL if no memory is available.
+ */
+__bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void)
+{
+ struct bpf_cpumask *cpumask;
+
+ /* cpumask must be the first element so struct bpf_cpumask be cast to struct cpumask. */
+ BUILD_BUG_ON(offsetof(struct bpf_cpumask, cpumask) != 0);
+
+ cpumask = bpf_mem_alloc(&bpf_cpumask_ma, sizeof(*cpumask));
+ if (!cpumask)
+ return NULL;
+
+ memset(cpumask, 0, sizeof(*cpumask));
+ refcount_set(&cpumask->usage, 1);
+
+ return cpumask;
+}
+
+/**
+ * bpf_cpumask_acquire() - Acquire a reference to a BPF cpumask.
+ * @cpumask: The BPF cpumask being acquired. The cpumask must be a trusted
+ * pointer.
+ *
+ * Acquires a reference to a BPF cpumask. The cpumask returned by this function
+ * must either be embedded in a map as a kptr, or freed with
+ * bpf_cpumask_release().
+ */
+__bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
+{
+ refcount_inc(&cpumask->usage);
+ return cpumask;
+}
+
+/**
+ * bpf_cpumask_kptr_get() - Attempt to acquire a reference to a BPF cpumask
+ * stored in a map.
+ * @cpumaskp: A pointer to a BPF cpumask map value.
+ *
+ * Attempts to acquire a reference to a BPF cpumask stored in a map value. The
+ * cpumask returned by this function must either be embedded in a map as a
+ * kptr, or freed with bpf_cpumask_release(). This function may return NULL if
+ * no BPF cpumask was found in the specified map value.
+ */
+__bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
+{
+ struct bpf_cpumask *cpumask;
+
+ /* The BPF memory allocator frees memory backing its caches in an RCU
+ * callback. Thus, we can safely use RCU to ensure that the cpumask is
+ * safe to read.
+ */
+ rcu_read_lock();
+
+ cpumask = READ_ONCE(*cpumaskp);
+ if (cpumask && !refcount_inc_not_zero(&cpumask->usage))
+ cpumask = NULL;
+
+ rcu_read_unlock();
+ return cpumask;
+}
+
+/**
+ * bpf_cpumask_release() - Release a previously acquired BPF cpumask.
+ * @cpumask: The cpumask being released.
+ *
+ * Releases a previously acquired reference to a BPF cpumask. When the final
+ * reference of the BPF cpumask has been released, it is subsequently freed in
+ * an RCU callback in the BPF memory allocator.
+ */
+__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
+{
+ if (!cpumask)
+ return;
+
+ if (refcount_dec_and_test(&cpumask->usage)) {
+ migrate_disable();
+ bpf_mem_free(&bpf_cpumask_ma, cpumask);
+ migrate_enable();
+ }
+}
+
+/**
+ * bpf_cpumask_first() - Get the index of the first nonzero bit in the cpumask.
+ * @cpumask: The cpumask being queried.
+ *
+ * Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask
+ * pointer may be safely passed to this function.
+ */
+__bpf_kfunc u32 bpf_cpumask_first(const struct cpumask *cpumask)
+{
+ return cpumask_first(cpumask);
+}
+
+/**
+ * bpf_cpumask_first_zero() - Get the index of the first unset bit in the
+ * cpumask.
+ * @cpumask: The cpumask being queried.
+ *
+ * Find the index of the first unset bit of the cpumask. A struct bpf_cpumask
+ * pointer may be safely passed to this function.
+ */
+__bpf_kfunc u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
+{
+ return cpumask_first_zero(cpumask);
+}
+
+/**
+ * bpf_cpumask_set_cpu() - Set a bit for a CPU in a BPF cpumask.
+ * @cpu: The CPU to be set in the cpumask.
+ * @cpumask: The BPF cpumask in which a bit is being set.
+ */
+__bpf_kfunc void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
+{
+ if (!cpu_valid(cpu))
+ return;
+
+ cpumask_set_cpu(cpu, (struct cpumask *)cpumask);
+}
+
+/**
+ * bpf_cpumask_clear_cpu() - Clear a bit for a CPU in a BPF cpumask.
+ * @cpu: The CPU to be cleared from the cpumask.
+ * @cpumask: The BPF cpumask in which a bit is being cleared.
+ */
+__bpf_kfunc void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
+{
+ if (!cpu_valid(cpu))
+ return;
+
+ cpumask_clear_cpu(cpu, (struct cpumask *)cpumask);
+}
+
+/**
+ * bpf_cpumask_test_cpu() - Test whether a CPU is set in a cpumask.
+ * @cpu: The CPU being queried for.
+ * @cpumask: The cpumask being queried for containing a CPU.
+ *
+ * Return:
+ * * true - @cpu is set in the cpumask
+ * * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu.
+ */
+__bpf_kfunc bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
+{
+ if (!cpu_valid(cpu))
+ return false;
+
+ return cpumask_test_cpu(cpu, (struct cpumask *)cpumask);
+}
+
+/**
+ * bpf_cpumask_test_and_set_cpu() - Atomically test and set a CPU in a BPF cpumask.
+ * @cpu: The CPU being set and queried for.
+ * @cpumask: The BPF cpumask being set and queried for containing a CPU.
+ *
+ * Return:
+ * * true - @cpu is set in the cpumask
+ * * false - @cpu was not set in the cpumask, or @cpu is invalid.
+ */
+__bpf_kfunc bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
+{
+ if (!cpu_valid(cpu))
+ return false;
+
+ return cpumask_test_and_set_cpu(cpu, (struct cpumask *)cpumask);
+}
+
+/**
+ * bpf_cpumask_test_and_clear_cpu() - Atomically test and clear a CPU in a BPF
+ * cpumask.
+ * @cpu: The CPU being cleared and queried for.
+ * @cpumask: The BPF cpumask being cleared and queried for containing a CPU.
+ *
+ * Return:
+ * * true - @cpu is set in the cpumask
+ * * false - @cpu was not set in the cpumask, or @cpu is invalid.
+ */
+__bpf_kfunc bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
+{
+ if (!cpu_valid(cpu))
+ return false;
+
+ return cpumask_test_and_clear_cpu(cpu, (struct cpumask *)cpumask);
+}
+
+/**
+ * bpf_cpumask_setall() - Set all of the bits in a BPF cpumask.
+ * @cpumask: The BPF cpumask having all of its bits set.
+ */
+__bpf_kfunc void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
+{
+ cpumask_setall((struct cpumask *)cpumask);
+}
+
+/**
+ * bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask.
+ * @cpumask: The BPF cpumask being cleared.
+ */
+__bpf_kfunc void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
+{
+ cpumask_clear((struct cpumask *)cpumask);
+}
+
+/**
+ * bpf_cpumask_and() - AND two cpumasks and store the result.
+ * @dst: The BPF cpumask where the result is being stored.
+ * @src1: The first input.
+ * @src2: The second input.
+ *
+ * Return:
+ * * true - @dst has at least one bit set following the operation
+ * * false - @dst is empty following the operation
+ *
+ * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
+ */
+__bpf_kfunc bool bpf_cpumask_and(struct bpf_cpumask *dst,
+ const struct cpumask *src1,
+ const struct cpumask *src2)
+{
+ return cpumask_and((struct cpumask *)dst, src1, src2);
+}
+
+/**
+ * bpf_cpumask_or() - OR two cpumasks and store the result.
+ * @dst: The BPF cpumask where the result is being stored.
+ * @src1: The first input.
+ * @src2: The second input.
+ *
+ * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
+ */
+__bpf_kfunc void bpf_cpumask_or(struct bpf_cpumask *dst,
+ const struct cpumask *src1,
+ const struct cpumask *src2)
+{
+ cpumask_or((struct cpumask *)dst, src1, src2);
+}
+
+/**
+ * bpf_cpumask_xor() - XOR two cpumasks and store the result.
+ * @dst: The BPF cpumask where the result is being stored.
+ * @src1: The first input.
+ * @src2: The second input.
+ *
+ * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
+ */
+__bpf_kfunc void bpf_cpumask_xor(struct bpf_cpumask *dst,
+ const struct cpumask *src1,
+ const struct cpumask *src2)
+{
+ cpumask_xor((struct cpumask *)dst, src1, src2);
+}
+
+/**
+ * bpf_cpumask_equal() - Check two cpumasks for equality.
+ * @src1: The first input.
+ * @src2: The second input.
+ *
+ * Return:
+ * * true - @src1 and @src2 have the same bits set.
+ * * false - @src1 and @src2 differ in at least one bit.
+ *
+ * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
+ */
+__bpf_kfunc bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
+{
+ return cpumask_equal(src1, src2);
+}
+
+/**
+ * bpf_cpumask_intersects() - Check two cpumasks for overlap.
+ * @src1: The first input.
+ * @src2: The second input.
+ *
+ * Return:
+ * * true - @src1 and @src2 have at least one of the same bits set.
+ * * false - @src1 and @src2 don't have any of the same bits set.
+ *
+ * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
+ */
+__bpf_kfunc bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2)
+{
+ return cpumask_intersects(src1, src2);
+}
+
+/**
+ * bpf_cpumask_subset() - Check if a cpumask is a subset of another.
+ * @src1: The first cpumask being checked as a subset.
+ * @src2: The second cpumask being checked as a superset.
+ *
+ * Return:
+ * * true - All of the bits of @src1 are set in @src2.
+ * * false - At least one bit in @src1 is not set in @src2.
+ *
+ * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
+ */
+__bpf_kfunc bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
+{
+ return cpumask_subset(src1, src2);
+}
+
+/**
+ * bpf_cpumask_empty() - Check if a cpumask is empty.
+ * @cpumask: The cpumask being checked.
+ *
+ * Return:
+ * * true - None of the bits in @cpumask are set.
+ * * false - At least one bit in @cpumask is set.
+ *
+ * A struct bpf_cpumask pointer may be safely passed to @cpumask.
+ */
+__bpf_kfunc bool bpf_cpumask_empty(const struct cpumask *cpumask)
+{
+ return cpumask_empty(cpumask);
+}
+
+/**
+ * bpf_cpumask_full() - Check if a cpumask has all bits set.
+ * @cpumask: The cpumask being checked.
+ *
+ * Return:
+ * * true - All of the bits in @cpumask are set.
+ * * false - At least one bit in @cpumask is cleared.
+ *
+ * A struct bpf_cpumask pointer may be safely passed to @cpumask.
+ */
+__bpf_kfunc bool bpf_cpumask_full(const struct cpumask *cpumask)
+{
+ return cpumask_full(cpumask);
+}
+
+/**
+ * bpf_cpumask_copy() - Copy the contents of a cpumask into a BPF cpumask.
+ * @dst: The BPF cpumask being copied into.
+ * @src: The cpumask being copied.
+ *
+ * A struct bpf_cpumask pointer may be safely passed to @src.
+ */
+__bpf_kfunc void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
+{
+ cpumask_copy((struct cpumask *)dst, src);
+}
+
+/**
+ * bpf_cpumask_any() - Return a random set CPU from a cpumask.
+ * @cpumask: The cpumask being queried.
+ *
+ * Return:
+ * * A random set bit within [0, num_cpus) if at least one bit is set.
+ * * >= num_cpus if no bit is set.
+ *
+ * A struct bpf_cpumask pointer may be safely passed to @src.
+ */
+__bpf_kfunc u32 bpf_cpumask_any(const struct cpumask *cpumask)
+{
+ return cpumask_any(cpumask);
+}
+
+/**
+ * bpf_cpumask_any_and() - Return a random set CPU from the AND of two
+ * cpumasks.
+ * @src1: The first cpumask.
+ * @src2: The second cpumask.
+ *
+ * Return:
+ * * A random set bit within [0, num_cpus) if at least one bit is set.
+ * * >= num_cpus if no bit is set.
+ *
+ * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
+ */
+__bpf_kfunc u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2)
+{
+ return cpumask_any_and(src1, src2);
+}
+
+__diag_pop();
+
+BTF_SET8_START(cpumask_kfunc_btf_ids)
+BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_cpumask_first, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_clear_cpu, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_test_cpu, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_test_and_set_cpu, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_test_and_clear_cpu, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_setall, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_clear, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_and, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_or, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_xor, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_equal, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_intersects, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_subset, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_full, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_any, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_cpumask_any_and, KF_TRUSTED_ARGS)
+BTF_SET8_END(cpumask_kfunc_btf_ids)
+
+static const struct btf_kfunc_id_set cpumask_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &cpumask_kfunc_btf_ids,
+};
+
+BTF_ID_LIST(cpumask_dtor_ids)
+BTF_ID(struct, bpf_cpumask)
+BTF_ID(func, bpf_cpumask_release)
+
+static int __init cpumask_kfunc_init(void)
+{
+ int ret;
+ const struct btf_id_dtor_kfunc cpumask_dtors[] = {
+ {
+ .btf_id = cpumask_dtor_ids[0],
+ .kfunc_btf_id = cpumask_dtor_ids[1]
+ },
+ };
+
+ ret = bpf_mem_alloc_init(&bpf_cpumask_ma, 0, false);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set);
+ return ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors,
+ ARRAY_SIZE(cpumask_dtors),
+ THIS_MODULE);
+}
+
+late_initcall(cpumask_kfunc_init);
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d01e4c55b376..2675fefc6cb6 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -474,7 +474,11 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
{
int err;
- if (!dev->netdev_ops->ndo_xdp_xmit)
+ if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
+ return -EOPNOTSUPP;
+
+ if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
+ xdp_frame_has_frags(xdpf)))
return -EOPNOTSUPP;
err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
@@ -532,8 +536,14 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
{
- if (!obj ||
- !obj->dev->netdev_ops->ndo_xdp_xmit)
+ if (!obj)
+ return false;
+
+ if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
+ return false;
+
+ if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
+ xdp_frame_has_frags(xdpf)))
return false;
if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 66bded144377..5dfcb5ad0d06 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1004,8 +1004,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new = ERR_PTR(-ENOMEM);
goto dec_count;
}
- check_and_init_map_value(&htab->map,
- l_new->key + round_up(key_size, 8));
}
memcpy(l_new->key, key, key_size);
@@ -1592,6 +1590,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
else
copy_map_value(map, value, l->key +
roundup_key_size);
+ /* Zeroing special fields in the temp buffer */
check_and_init_map_value(map, value);
}
@@ -1792,6 +1791,7 @@ again_nocopy:
true);
else
copy_map_value(map, dst_val, value);
+ /* Zeroing special fields in the temp buffer */
check_and_init_map_value(map, dst_val);
}
if (do_delete) {
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index af30c6cbd65d..5b278a38ae58 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -756,19 +756,20 @@ static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
* arguments representation.
*/
-#define MAX_BPRINTF_BUF_LEN 512
+#define MAX_BPRINTF_BIN_ARGS 512
/* Support executing three nested bprintf helper calls on a given CPU */
#define MAX_BPRINTF_NEST_LEVEL 3
struct bpf_bprintf_buffers {
- char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
+ char bin_args[MAX_BPRINTF_BIN_ARGS];
+ char buf[MAX_BPRINTF_BUF];
};
-static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
+
+static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
-static int try_get_fmt_tmp_buf(char **tmp_buf)
+static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
{
- struct bpf_bprintf_buffers *bufs;
int nest_level;
preempt_disable();
@@ -778,18 +779,19 @@ static int try_get_fmt_tmp_buf(char **tmp_buf)
preempt_enable();
return -EBUSY;
}
- bufs = this_cpu_ptr(&bpf_bprintf_bufs);
- *tmp_buf = bufs->tmp_bufs[nest_level - 1];
+ *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
return 0;
}
-void bpf_bprintf_cleanup(void)
+void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
{
- if (this_cpu_read(bpf_bprintf_nest_level)) {
- this_cpu_dec(bpf_bprintf_nest_level);
- preempt_enable();
- }
+ if (!data->bin_args && !data->buf)
+ return;
+ if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
+ return;
+ this_cpu_dec(bpf_bprintf_nest_level);
+ preempt_enable();
}
/*
@@ -798,18 +800,20 @@ void bpf_bprintf_cleanup(void)
* Returns a negative value if fmt is an invalid format string or 0 otherwise.
*
* This can be used in two ways:
- * - Format string verification only: when bin_args is NULL
+ * - Format string verification only: when data->get_bin_args is false
* - Arguments preparation: in addition to the above verification, it writes in
- * bin_args a binary representation of arguments usable by bstr_printf where
- * pointers from BPF have been sanitized.
+ * data->bin_args a binary representation of arguments usable by bstr_printf
+ * where pointers from BPF have been sanitized.
*
* In argument preparation mode, if 0 is returned, safe temporary buffers are
* allocated and bpf_bprintf_cleanup should be called to free them after use.
*/
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
- u32 **bin_args, u32 num_args)
+ u32 num_args, struct bpf_bprintf_data *data)
{
+ bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
+ struct bpf_bprintf_buffers *buffers = NULL;
size_t sizeof_cur_arg, sizeof_cur_ip;
int err, i, num_spec = 0;
u64 cur_arg;
@@ -820,14 +824,19 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
return -EINVAL;
fmt_size = fmt_end - fmt;
- if (bin_args) {
- if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
- return -EBUSY;
+ if (get_buffers && try_get_buffers(&buffers))
+ return -EBUSY;
- tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
- *bin_args = (u32 *)tmp_buf;
+ if (data->get_bin_args) {
+ if (num_args)
+ tmp_buf = buffers->bin_args;
+ tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
+ data->bin_args = (u32 *)tmp_buf;
}
+ if (data->get_buf)
+ data->buf = buffers->buf;
+
for (i = 0; i < fmt_size; i++) {
if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
err = -EINVAL;
@@ -1021,31 +1030,33 @@ nocopy_fmt:
err = 0;
out:
if (err)
- bpf_bprintf_cleanup();
+ bpf_bprintf_cleanup(data);
return err;
}
BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
- const void *, data, u32, data_len)
+ const void *, args, u32, data_len)
{
+ struct bpf_bprintf_data data = {
+ .get_bin_args = true,
+ };
int err, num_args;
- u32 *bin_args;
if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
- (data_len && !data))
+ (data_len && !args))
return -EINVAL;
num_args = data_len / 8;
/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
* can safely give an unbounded size.
*/
- err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
+ err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
if (err < 0)
return err;
- err = bstr_printf(str, str_size, fmt, bin_args);
+ err = bstr_printf(str, str_size, fmt, data.bin_args);
- bpf_bprintf_cleanup();
+ bpf_bprintf_cleanup(&data);
return err + 1;
}
@@ -1745,12 +1756,12 @@ unlock:
while (head != orig_head) {
void *obj = head;
- obj -= field->list_head.node_offset;
+ obj -= field->graph_root.node_offset;
head = head->next;
/* The contained type can also have resources, including a
* bpf_list_head which needs to be freed.
*/
- bpf_obj_free_fields(field->list_head.value_rec, obj);
+ bpf_obj_free_fields(field->graph_root.value_rec, obj);
/* bpf_mem_free requires migrate_disable(), since we can be
* called from map free path as well apart from BPF program (as
* part of map ops doing bpf_obj_free_fields).
@@ -1761,11 +1772,51 @@ unlock:
}
}
+/* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
+ * 'rb_node *', so field name of rb_node within containing struct is not
+ * needed.
+ *
+ * Since bpf_rb_tree's node type has a corresponding struct btf_field with
+ * graph_root.node_offset, it's not necessary to know field name
+ * or type of node struct
+ */
+#define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
+ for (pos = rb_first_postorder(root); \
+ pos && ({ n = rb_next_postorder(pos); 1; }); \
+ pos = n)
+
+void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
+ struct bpf_spin_lock *spin_lock)
+{
+ struct rb_root_cached orig_root, *root = rb_root;
+ struct rb_node *pos, *n;
+ void *obj;
+
+ BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
+ BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
+
+ __bpf_spin_lock_irqsave(spin_lock);
+ orig_root = *root;
+ *root = RB_ROOT_CACHED;
+ __bpf_spin_unlock_irqrestore(spin_lock);
+
+ bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
+ obj = pos;
+ obj -= field->graph_root.node_offset;
+
+ bpf_obj_free_fields(field->graph_root.value_rec, obj);
+
+ migrate_disable();
+ bpf_mem_free(&bpf_global_ma, obj);
+ migrate_enable();
+ }
+}
+
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in vmlinux BTF");
-void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
+__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
{
struct btf_struct_meta *meta = meta__ign;
u64 size = local_type_id__k;
@@ -1779,7 +1830,7 @@ void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
return p;
}
-void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
+__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
{
struct btf_struct_meta *meta = meta__ign;
void *p = p__alloc;
@@ -1800,12 +1851,12 @@ static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *hea
tail ? list_add_tail(n, h) : list_add(n, h);
}
-void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
+__bpf_kfunc void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
{
return __bpf_list_add(node, head, false);
}
-void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
+__bpf_kfunc void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
{
return __bpf_list_add(node, head, true);
}
@@ -1823,23 +1874,73 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai
return (struct bpf_list_node *)n;
}
-struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
+__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
{
return __bpf_list_del(head, false);
}
-struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
+__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
{
return __bpf_list_del(head, true);
}
+__bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
+ struct bpf_rb_node *node)
+{
+ struct rb_root_cached *r = (struct rb_root_cached *)root;
+ struct rb_node *n = (struct rb_node *)node;
+
+ rb_erase_cached(n, r);
+ RB_CLEAR_NODE(n);
+ return (struct bpf_rb_node *)n;
+}
+
+/* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
+ * program
+ */
+static void __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
+ void *less)
+{
+ struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
+ bpf_callback_t cb = (bpf_callback_t)less;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node((struct rb_node *)node, parent, link);
+ rb_insert_color_cached((struct rb_node *)node,
+ (struct rb_root_cached *)root, leftmost);
+}
+
+__bpf_kfunc void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
+ bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
+{
+ __bpf_rbtree_add(root, node, (void *)less);
+}
+
+__bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
+{
+ struct rb_root_cached *r = (struct rb_root_cached *)root;
+
+ return (struct bpf_rb_node *)rb_first_cached(r);
+}
+
/**
* bpf_task_acquire - Acquire a reference to a task. A task acquired by this
* kfunc which is not stored in a map as a kptr, must be released by calling
* bpf_task_release().
* @p: The task on which a reference is being acquired.
*/
-struct task_struct *bpf_task_acquire(struct task_struct *p)
+__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
{
return get_task_struct(p);
}
@@ -1850,7 +1951,7 @@ struct task_struct *bpf_task_acquire(struct task_struct *p)
* released by calling bpf_task_release().
* @p: The task on which a reference is being acquired.
*/
-struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
+__bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
{
/* For the time being this function returns NULL, as it's not currently
* possible to safely acquire a reference to a task with RCU protection
@@ -1902,7 +2003,7 @@ struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
* be released by calling bpf_task_release().
* @pp: A pointer to a task kptr on which a reference is being acquired.
*/
-struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
+__bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
{
/* We must return NULL here until we have clarity on how to properly
* leverage RCU for ensuring a task's lifetime. See the comment above
@@ -1915,7 +2016,7 @@ struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
* bpf_task_release - Release the reference acquired on a task.
* @p: The task on which a reference is being released.
*/
-void bpf_task_release(struct task_struct *p)
+__bpf_kfunc void bpf_task_release(struct task_struct *p)
{
if (!p)
return;
@@ -1930,7 +2031,7 @@ void bpf_task_release(struct task_struct *p)
* calling bpf_cgroup_release().
* @cgrp: The cgroup on which a reference is being acquired.
*/
-struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
+__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
{
cgroup_get(cgrp);
return cgrp;
@@ -1942,7 +2043,7 @@ struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
* be released by calling bpf_cgroup_release().
* @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired.
*/
-struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
+__bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
{
struct cgroup *cgrp;
@@ -1974,7 +2075,7 @@ struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
* drops to 0.
* @cgrp: The cgroup on which a reference is being released.
*/
-void bpf_cgroup_release(struct cgroup *cgrp)
+__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
{
if (!cgrp)
return;
@@ -1989,7 +2090,7 @@ void bpf_cgroup_release(struct cgroup *cgrp)
* @cgrp: The cgroup for which we're performing a lookup.
* @level: The level of ancestor to look up.
*/
-struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
+__bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
{
struct cgroup *ancestor;
@@ -2008,7 +2109,7 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
* stored in a map, or released with bpf_task_release().
* @pid: The pid of the task being looked up.
*/
-struct task_struct *bpf_task_from_pid(s32 pid)
+__bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
{
struct task_struct *p;
@@ -2021,22 +2122,22 @@ struct task_struct *bpf_task_from_pid(s32 pid)
return p;
}
-void *bpf_cast_to_kern_ctx(void *obj)
+__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
{
return obj;
}
-void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
+__bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
{
return obj__ign;
}
-void bpf_rcu_read_lock(void)
+__bpf_kfunc void bpf_rcu_read_lock(void)
{
rcu_read_lock();
}
-void bpf_rcu_read_unlock(void)
+__bpf_kfunc void bpf_rcu_read_unlock(void)
{
rcu_read_unlock();
}
@@ -2057,6 +2158,10 @@ BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_acquire_not_zero, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE)
+BTF_ID_FLAGS(func, bpf_rbtree_add)
+BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
+
#ifdef CONFIG_CGROUPS
BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 1db156405b68..5fcdacbb8439 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -143,7 +143,7 @@ static void *__alloc(struct bpf_mem_cache *c, int node)
return obj;
}
- return kmalloc_node(c->unit_size, flags, node);
+ return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
}
static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
@@ -395,7 +395,8 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
unit_size = size;
#ifdef CONFIG_MEMCG_KMEM
- objcg = get_obj_cgroup_from_current();
+ if (memcg_bpf_enabled())
+ objcg = get_obj_cgroup_from_current();
#endif
for_each_possible_cpu(cpu) {
c = per_cpu_ptr(pc, cpu);
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 190d9f9dc987..0c85e06f7ea7 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -41,7 +41,7 @@ struct bpf_offload_dev {
struct bpf_offload_netdev {
struct rhash_head l;
struct net_device *netdev;
- struct bpf_offload_dev *offdev;
+ struct bpf_offload_dev *offdev; /* NULL when bound-only */
struct list_head progs;
struct list_head maps;
struct list_head offdev_netdevs;
@@ -56,7 +56,6 @@ static const struct rhashtable_params offdevs_params = {
};
static struct rhashtable offdevs;
-static bool offdevs_inited;
static int bpf_dev_offload_check(struct net_device *netdev)
{
@@ -72,58 +71,218 @@ bpf_offload_find_netdev(struct net_device *netdev)
{
lockdep_assert_held(&bpf_devs_lock);
- if (!offdevs_inited)
- return NULL;
return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
}
-int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
+static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
{
struct bpf_offload_netdev *ondev;
- struct bpf_prog_offload *offload;
int err;
- if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
- attr->prog_type != BPF_PROG_TYPE_XDP)
- return -EINVAL;
+ ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
+ if (!ondev)
+ return -ENOMEM;
- if (attr->prog_flags)
- return -EINVAL;
+ ondev->netdev = netdev;
+ ondev->offdev = offdev;
+ INIT_LIST_HEAD(&ondev->progs);
+ INIT_LIST_HEAD(&ondev->maps);
+
+ err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
+ if (err) {
+ netdev_warn(netdev, "failed to register for BPF offload\n");
+ goto err_free;
+ }
+
+ if (offdev)
+ list_add(&ondev->offdev_netdevs, &offdev->netdevs);
+ return 0;
+
+err_free:
+ kfree(ondev);
+ return err;
+}
+
+static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
+{
+ struct bpf_prog_offload *offload = prog->aux->offload;
+
+ if (offload->dev_state)
+ offload->offdev->ops->destroy(prog);
+
+ list_del_init(&offload->offloads);
+ kfree(offload);
+ prog->aux->offload = NULL;
+}
+
+static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
+ enum bpf_netdev_command cmd)
+{
+ struct netdev_bpf data = {};
+ struct net_device *netdev;
+
+ ASSERT_RTNL();
+
+ data.command = cmd;
+ data.offmap = offmap;
+ /* Caller must make sure netdev is valid */
+ netdev = offmap->netdev;
+
+ return netdev->netdev_ops->ndo_bpf(netdev, &data);
+}
+
+static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
+{
+ WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
+ /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
+ bpf_map_free_id(&offmap->map);
+ list_del_init(&offmap->offloads);
+ offmap->netdev = NULL;
+}
+
+static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
+{
+ struct bpf_offload_netdev *ondev, *altdev = NULL;
+ struct bpf_offloaded_map *offmap, *mtmp;
+ struct bpf_prog_offload *offload, *ptmp;
+
+ ASSERT_RTNL();
+
+ ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
+ if (WARN_ON(!ondev))
+ return;
+
+ WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
+
+ /* Try to move the objects to another netdev of the device */
+ if (offdev) {
+ list_del(&ondev->offdev_netdevs);
+ altdev = list_first_entry_or_null(&offdev->netdevs,
+ struct bpf_offload_netdev,
+ offdev_netdevs);
+ }
+
+ if (altdev) {
+ list_for_each_entry(offload, &ondev->progs, offloads)
+ offload->netdev = altdev->netdev;
+ list_splice_init(&ondev->progs, &altdev->progs);
+
+ list_for_each_entry(offmap, &ondev->maps, offloads)
+ offmap->netdev = altdev->netdev;
+ list_splice_init(&ondev->maps, &altdev->maps);
+ } else {
+ list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
+ __bpf_prog_offload_destroy(offload->prog);
+ list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
+ __bpf_map_offload_destroy(offmap);
+ }
+
+ WARN_ON(!list_empty(&ondev->progs));
+ WARN_ON(!list_empty(&ondev->maps));
+ kfree(ondev);
+}
+
+static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev)
+{
+ struct bpf_offload_netdev *ondev;
+ struct bpf_prog_offload *offload;
+ int err;
offload = kzalloc(sizeof(*offload), GFP_USER);
if (!offload)
return -ENOMEM;
offload->prog = prog;
+ offload->netdev = netdev;
- offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
- attr->prog_ifindex);
- err = bpf_dev_offload_check(offload->netdev);
- if (err)
- goto err_maybe_put;
-
- down_write(&bpf_devs_lock);
ondev = bpf_offload_find_netdev(offload->netdev);
if (!ondev) {
- err = -EINVAL;
- goto err_unlock;
+ if (bpf_prog_is_offloaded(prog->aux)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ /* When only binding to the device, explicitly
+ * create an entry in the hashtable.
+ */
+ err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
+ if (err)
+ goto err_free;
+ ondev = bpf_offload_find_netdev(offload->netdev);
}
offload->offdev = ondev->offdev;
prog->aux->offload = offload;
list_add_tail(&offload->offloads, &ondev->progs);
- dev_put(offload->netdev);
- up_write(&bpf_devs_lock);
return 0;
-err_unlock:
- up_write(&bpf_devs_lock);
-err_maybe_put:
- if (offload->netdev)
- dev_put(offload->netdev);
+err_free:
kfree(offload);
return err;
}
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
+{
+ struct net_device *netdev;
+ int err;
+
+ if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
+ attr->prog_type != BPF_PROG_TYPE_XDP)
+ return -EINVAL;
+
+ if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
+ return -EINVAL;
+
+ if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
+ attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
+ return -EINVAL;
+
+ netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex);
+ if (!netdev)
+ return -EINVAL;
+
+ err = bpf_dev_offload_check(netdev);
+ if (err)
+ goto out;
+
+ prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
+
+ down_write(&bpf_devs_lock);
+ err = __bpf_prog_dev_bound_init(prog, netdev);
+ up_write(&bpf_devs_lock);
+
+out:
+ dev_put(netdev);
+ return err;
+}
+
+int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog)
+{
+ int err;
+
+ if (!bpf_prog_is_dev_bound(old_prog->aux))
+ return 0;
+
+ if (bpf_prog_is_offloaded(old_prog->aux))
+ return -EINVAL;
+
+ new_prog->aux->dev_bound = old_prog->aux->dev_bound;
+ new_prog->aux->offload_requested = old_prog->aux->offload_requested;
+
+ down_write(&bpf_devs_lock);
+ if (!old_prog->aux->offload) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev);
+
+out:
+ up_write(&bpf_devs_lock);
+ return err;
+}
+
int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
{
struct bpf_prog_offload *offload;
@@ -209,24 +368,25 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
up_read(&bpf_devs_lock);
}
-static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
{
- struct bpf_prog_offload *offload = prog->aux->offload;
-
- if (offload->dev_state)
- offload->offdev->ops->destroy(prog);
-
- list_del_init(&offload->offloads);
- kfree(offload);
- prog->aux->offload = NULL;
-}
+ struct bpf_offload_netdev *ondev;
+ struct net_device *netdev;
-void bpf_prog_offload_destroy(struct bpf_prog *prog)
-{
+ rtnl_lock();
down_write(&bpf_devs_lock);
- if (prog->aux->offload)
+ if (prog->aux->offload) {
+ list_del_init(&prog->aux->offload->offloads);
+
+ netdev = prog->aux->offload->netdev;
__bpf_prog_offload_destroy(prog);
+
+ ondev = bpf_offload_find_netdev(netdev);
+ if (!ondev->offdev && list_empty(&ondev->progs))
+ __bpf_offload_dev_netdev_unregister(NULL, netdev);
+ }
up_write(&bpf_devs_lock);
+ rtnl_unlock();
}
static int bpf_prog_offload_translate(struct bpf_prog *prog)
@@ -340,22 +500,6 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
const struct bpf_prog_ops bpf_offload_prog_ops = {
};
-static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
- enum bpf_netdev_command cmd)
-{
- struct netdev_bpf data = {};
- struct net_device *netdev;
-
- ASSERT_RTNL();
-
- data.command = cmd;
- data.offmap = offmap;
- /* Caller must make sure netdev is valid */
- netdev = offmap->netdev;
-
- return netdev->netdev_ops->ndo_bpf(netdev, &data);
-}
-
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
struct net *net = current->nsproxy->net_ns;
@@ -405,15 +549,6 @@ err_unlock:
return ERR_PTR(err);
}
-static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
-{
- WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
- /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
- bpf_map_free_id(&offmap->map, true);
- list_del_init(&offmap->offloads);
- offmap->netdev = NULL;
-}
-
void bpf_map_offload_map_free(struct bpf_map *map)
{
struct bpf_offloaded_map *offmap = map_to_offmap(map);
@@ -573,12 +708,28 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
+bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
+{
+ bool ret;
+
+ if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux))
+ return false;
+
+ down_read(&bpf_devs_lock);
+ ret = lhs->aux->offload && rhs->aux->offload &&
+ lhs->aux->offload->netdev &&
+ lhs->aux->offload->netdev == rhs->aux->offload->netdev;
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
{
struct bpf_offloaded_map *offmap;
bool ret;
- if (!bpf_map_is_dev_bound(map))
+ if (!bpf_map_is_offloaded(map))
return bpf_map_offload_neutral(map);
offmap = map_to_offmap(map);
@@ -592,32 +743,11 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev)
{
- struct bpf_offload_netdev *ondev;
int err;
- ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
- if (!ondev)
- return -ENOMEM;
-
- ondev->netdev = netdev;
- ondev->offdev = offdev;
- INIT_LIST_HEAD(&ondev->progs);
- INIT_LIST_HEAD(&ondev->maps);
-
down_write(&bpf_devs_lock);
- err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
- if (err) {
- netdev_warn(netdev, "failed to register for BPF offload\n");
- goto err_unlock_free;
- }
-
- list_add(&ondev->offdev_netdevs, &offdev->netdevs);
- up_write(&bpf_devs_lock);
- return 0;
-
-err_unlock_free:
+ err = __bpf_offload_dev_netdev_register(offdev, netdev);
up_write(&bpf_devs_lock);
- kfree(ondev);
return err;
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
@@ -625,43 +755,8 @@ EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
struct net_device *netdev)
{
- struct bpf_offload_netdev *ondev, *altdev;
- struct bpf_offloaded_map *offmap, *mtmp;
- struct bpf_prog_offload *offload, *ptmp;
-
- ASSERT_RTNL();
-
down_write(&bpf_devs_lock);
- ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
- if (WARN_ON(!ondev))
- goto unlock;
-
- WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
- list_del(&ondev->offdev_netdevs);
-
- /* Try to move the objects to another netdev of the device */
- altdev = list_first_entry_or_null(&offdev->netdevs,
- struct bpf_offload_netdev,
- offdev_netdevs);
- if (altdev) {
- list_for_each_entry(offload, &ondev->progs, offloads)
- offload->netdev = altdev->netdev;
- list_splice_init(&ondev->progs, &altdev->progs);
-
- list_for_each_entry(offmap, &ondev->maps, offloads)
- offmap->netdev = altdev->netdev;
- list_splice_init(&ondev->maps, &altdev->maps);
- } else {
- list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
- __bpf_prog_offload_destroy(offload->prog);
- list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
- __bpf_map_offload_destroy(offmap);
- }
-
- WARN_ON(!list_empty(&ondev->progs));
- WARN_ON(!list_empty(&ondev->maps));
- kfree(ondev);
-unlock:
+ __bpf_offload_dev_netdev_unregister(offdev, netdev);
up_write(&bpf_devs_lock);
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
@@ -670,18 +765,6 @@ struct bpf_offload_dev *
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
{
struct bpf_offload_dev *offdev;
- int err;
-
- down_write(&bpf_devs_lock);
- if (!offdevs_inited) {
- err = rhashtable_init(&offdevs, &offdevs_params);
- if (err) {
- up_write(&bpf_devs_lock);
- return ERR_PTR(err);
- }
- offdevs_inited = true;
- }
- up_write(&bpf_devs_lock);
offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
if (!offdev)
@@ -707,3 +790,67 @@ void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
return offdev->priv;
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
+
+void bpf_dev_bound_netdev_unregister(struct net_device *dev)
+{
+ struct bpf_offload_netdev *ondev;
+
+ ASSERT_RTNL();
+
+ down_write(&bpf_devs_lock);
+ ondev = bpf_offload_find_netdev(dev);
+ if (ondev && !ondev->offdev)
+ __bpf_offload_dev_netdev_unregister(NULL, ondev->netdev);
+ up_write(&bpf_devs_lock);
+}
+
+int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux)
+{
+ if (!bpf_prog_is_dev_bound(prog_aux)) {
+ bpf_log(log, "metadata kfuncs require device-bound program\n");
+ return -EINVAL;
+ }
+
+ if (bpf_prog_is_offloaded(prog_aux)) {
+ bpf_log(log, "metadata kfuncs can't be offloaded\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
+{
+ const struct xdp_metadata_ops *ops;
+ void *p = NULL;
+
+ /* We don't hold bpf_devs_lock while resolving several
+ * kfuncs and can race with the unregister_netdevice().
+ * We rely on bpf_dev_bound_match() check at attach
+ * to render this program unusable.
+ */
+ down_read(&bpf_devs_lock);
+ if (!prog->aux->offload)
+ goto out;
+
+ ops = prog->aux->offload->netdev->xdp_metadata_ops;
+ if (!ops)
+ goto out;
+
+ if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP))
+ p = ops->xmo_rx_timestamp;
+ else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH))
+ p = ops->xmo_rx_hash;
+out:
+ up_read(&bpf_devs_lock);
+
+ return p;
+}
+
+static int __init bpf_offload_init(void)
+{
+ return rhashtable_init(&offdevs, &offdevs_params);
+}
+
+late_initcall(bpf_offload_init);
diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
index 5106b5372f0c..b56f9f3314fd 100644
--- a/kernel/bpf/preload/bpf_preload_kern.c
+++ b/kernel/bpf/preload/bpf_preload_kern.c
@@ -3,7 +3,11 @@
#include <linux/init.h>
#include <linux/module.h>
#include "bpf_preload.h"
-#include "iterators/iterators.lskel.h"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#include "iterators/iterators.lskel-little-endian.h"
+#else
+#include "iterators/iterators.lskel-big-endian.h"
+#endif
static struct bpf_link *maps_link, *progs_link;
static struct iterators_bpf *skel;
diff --git a/kernel/bpf/preload/iterators/Makefile b/kernel/bpf/preload/iterators/Makefile
index 6762b1260f2f..8937dc6bc8d0 100644
--- a/kernel/bpf/preload/iterators/Makefile
+++ b/kernel/bpf/preload/iterators/Makefile
@@ -35,20 +35,22 @@ endif
.PHONY: all clean
-all: iterators.lskel.h
+all: iterators.lskel-little-endian.h
+
+big: iterators.lskel-big-endian.h
clean:
$(call msg,CLEAN)
$(Q)rm -rf $(OUTPUT) iterators
-iterators.lskel.h: $(OUTPUT)/iterators.bpf.o | $(BPFTOOL)
+iterators.lskel-%.h: $(OUTPUT)/%/iterators.bpf.o | $(BPFTOOL)
$(call msg,GEN-SKEL,$@)
$(Q)$(BPFTOOL) gen skeleton -L $< > $@
-
-$(OUTPUT)/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT)
+$(OUTPUT)/%/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT)
$(call msg,BPF,$@)
- $(Q)$(CLANG) -g -O2 -target bpf $(INCLUDES) \
+ $(Q)mkdir -p $(@D)
+ $(Q)$(CLANG) -g -O2 -target bpf -m$* $(INCLUDES) \
-c $(filter %.c,$^) -o $@ && \
$(LLVM_STRIP) -g $@
diff --git a/kernel/bpf/preload/iterators/README b/kernel/bpf/preload/iterators/README
index 7fd6d39a9ad2..98e7c90ea012 100644
--- a/kernel/bpf/preload/iterators/README
+++ b/kernel/bpf/preload/iterators/README
@@ -1,4 +1,7 @@
WARNING:
-If you change "iterators.bpf.c" do "make -j" in this directory to rebuild "iterators.skel.h".
+If you change "iterators.bpf.c" do "make -j" in this directory to
+rebuild "iterators.lskel-little-endian.h". Then, on a big-endian
+machine, do "make -j big" in this directory to rebuild
+"iterators.lskel-big-endian.h". Commit both resulting headers.
Make sure to have clang 10 installed.
See Documentation/bpf/bpf_devel_QA.rst
diff --git a/kernel/bpf/preload/iterators/iterators.lskel-big-endian.h b/kernel/bpf/preload/iterators/iterators.lskel-big-endian.h
new file mode 100644
index 000000000000..ebdc6c0cdb70
--- /dev/null
+++ b/kernel/bpf/preload/iterators/iterators.lskel-big-endian.h
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */
+#ifndef __ITERATORS_BPF_SKEL_H__
+#define __ITERATORS_BPF_SKEL_H__
+
+#include <bpf/skel_internal.h>
+
+struct iterators_bpf {
+ struct bpf_loader_ctx ctx;
+ struct {
+ struct bpf_map_desc rodata;
+ } maps;
+ struct {
+ struct bpf_prog_desc dump_bpf_map;
+ struct bpf_prog_desc dump_bpf_prog;
+ } progs;
+ struct {
+ int dump_bpf_map_fd;
+ int dump_bpf_prog_fd;
+ } links;
+};
+
+static inline int
+iterators_bpf__dump_bpf_map__attach(struct iterators_bpf *skel)
+{
+ int prog_fd = skel->progs.dump_bpf_map.prog_fd;
+ int fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);
+
+ if (fd > 0)
+ skel->links.dump_bpf_map_fd = fd;
+ return fd;
+}
+
+static inline int
+iterators_bpf__dump_bpf_prog__attach(struct iterators_bpf *skel)
+{
+ int prog_fd = skel->progs.dump_bpf_prog.prog_fd;
+ int fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);
+
+ if (fd > 0)
+ skel->links.dump_bpf_prog_fd = fd;
+ return fd;
+}
+
+static inline int
+iterators_bpf__attach(struct iterators_bpf *skel)
+{
+ int ret = 0;
+
+ ret = ret < 0 ? ret : iterators_bpf__dump_bpf_map__attach(skel);
+ ret = ret < 0 ? ret : iterators_bpf__dump_bpf_prog__attach(skel);
+ return ret < 0 ? ret : 0;
+}
+
+static inline void
+iterators_bpf__detach(struct iterators_bpf *skel)
+{
+ skel_closenz(skel->links.dump_bpf_map_fd);
+ skel_closenz(skel->links.dump_bpf_prog_fd);
+}
+static void
+iterators_bpf__destroy(struct iterators_bpf *skel)
+{
+ if (!skel)
+ return;
+ iterators_bpf__detach(skel);
+ skel_closenz(skel->progs.dump_bpf_map.prog_fd);
+ skel_closenz(skel->progs.dump_bpf_prog.prog_fd);
+ skel_closenz(skel->maps.rodata.map_fd);
+ skel_free(skel);
+}
+static inline struct iterators_bpf *
+iterators_bpf__open(void)
+{
+ struct iterators_bpf *skel;
+
+ skel = skel_alloc(sizeof(*skel));
+ if (!skel)
+ goto cleanup;
+ skel->ctx.sz = (void *)&skel->links - (void *)skel;
+ return skel;
+cleanup:
+ iterators_bpf__destroy(skel);
+ return NULL;
+}
+
+static inline int
+iterators_bpf__load(struct iterators_bpf *skel)
+{
+ struct bpf_load_and_run_opts opts = {};
+ int err;
+
+ opts.ctx = (struct bpf_loader_ctx *)skel;
+ opts.data_sz = 6008;
+ opts.data = (void *)"\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xeb\x9f\x01\0\
+\0\0\0\x18\0\0\0\0\0\0\x04\x1c\0\0\x04\x1c\0\0\x05\x18\0\0\0\0\x02\0\0\0\0\0\0\
+\x02\0\0\0\x01\x04\0\0\x02\0\0\0\x10\0\0\0\x13\0\0\0\x03\0\0\0\0\0\0\0\x18\0\0\
+\0\x04\0\0\0\x40\0\0\0\0\x02\0\0\0\0\0\0\x08\0\0\0\0\x02\0\0\0\0\0\0\x0d\0\0\0\
+\0\x0d\0\0\x01\0\0\0\x06\0\0\0\x1c\0\0\0\x01\0\0\0\x20\x01\0\0\0\0\0\0\x04\x01\
+\0\0\x20\0\0\0\x24\x0c\0\0\x01\0\0\0\x05\0\0\0\xc2\x04\0\0\x03\0\0\0\x18\0\0\0\
+\xd0\0\0\0\x09\0\0\0\0\0\0\0\xd4\0\0\0\x0b\0\0\0\x40\0\0\0\xdf\0\0\0\x0b\0\0\0\
+\x80\0\0\0\0\x02\0\0\0\0\0\0\x0a\0\0\0\xe7\x07\0\0\0\0\0\0\0\0\0\0\xf0\x08\0\0\
+\0\0\0\0\x0c\0\0\0\xf6\x01\0\0\0\0\0\0\x08\0\0\0\x40\0\0\x01\xb3\x04\0\0\x03\0\
+\0\0\x18\0\0\x01\xbb\0\0\0\x0e\0\0\0\0\0\0\x01\xbe\0\0\0\x11\0\0\0\x20\0\0\x01\
+\xc3\0\0\0\x0e\0\0\0\xa0\0\0\x01\xcf\x08\0\0\0\0\0\0\x0f\0\0\x01\xd5\x01\0\0\0\
+\0\0\0\x04\0\0\0\x20\0\0\x01\xe2\x01\0\0\0\0\0\0\x01\x01\0\0\x08\0\0\0\0\x03\0\
+\0\0\0\0\0\0\0\0\0\x10\0\0\0\x12\0\0\0\x10\0\0\x01\xe7\x01\0\0\0\0\0\0\x04\0\0\
+\0\x20\0\0\0\0\x02\0\0\0\0\0\0\x14\0\0\x02\x4b\x04\0\0\x02\0\0\0\x10\0\0\0\x13\
+\0\0\0\x03\0\0\0\0\0\0\x02\x5e\0\0\0\x15\0\0\0\x40\0\0\0\0\x02\0\0\0\0\0\0\x18\
+\0\0\0\0\x0d\0\0\x01\0\0\0\x06\0\0\0\x1c\0\0\0\x13\0\0\x02\x63\x0c\0\0\x01\0\0\
+\0\x16\0\0\x02\xaf\x04\0\0\x01\0\0\0\x08\0\0\x02\xb8\0\0\0\x19\0\0\0\0\0\0\0\0\
+\x02\0\0\0\0\0\0\x1a\0\0\x03\x09\x04\0\0\x06\0\0\0\x38\0\0\x01\xbb\0\0\0\x0e\0\
+\0\0\0\0\0\x01\xbe\0\0\0\x11\0\0\0\x20\0\0\x03\x16\0\0\0\x1b\0\0\0\xc0\0\0\x03\
+\x27\0\0\0\x15\0\0\x01\0\0\0\x03\x30\0\0\0\x1d\0\0\x01\x40\0\0\x03\x3a\0\0\0\
+\x1e\0\0\x01\x80\0\0\0\0\x02\0\0\0\0\0\0\x1c\0\0\0\0\x0a\0\0\0\0\0\0\x10\0\0\0\
+\0\x02\0\0\0\0\0\0\x1f\0\0\0\0\x02\0\0\0\0\0\0\x20\0\0\x03\x84\x04\0\0\x02\0\0\
+\0\x08\0\0\x03\x92\0\0\0\x0e\0\0\0\0\0\0\x03\x9b\0\0\0\x0e\0\0\0\x20\0\0\x03\
+\x3a\x04\0\0\x03\0\0\0\x18\0\0\x03\xa5\0\0\0\x1b\0\0\0\0\0\0\x03\xad\0\0\0\x21\
+\0\0\0\x40\0\0\x03\xb3\0\0\0\x23\0\0\0\x80\0\0\0\0\x02\0\0\0\0\0\0\x22\0\0\0\0\
+\x02\0\0\0\0\0\0\x24\0\0\x03\xb7\x04\0\0\x01\0\0\0\x04\0\0\x03\xc2\0\0\0\x0e\0\
+\0\0\0\0\0\x04\x2b\x04\0\0\x01\0\0\0\x04\0\0\x04\x34\0\0\0\x0e\0\0\0\0\0\0\0\0\
+\x03\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\x12\0\0\0\x23\0\0\x04\xaa\x0e\0\0\0\0\0\0\
+\x25\0\0\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\x12\0\0\0\x0e\0\0\x04\
+\xbe\x0e\0\0\0\0\0\0\x27\0\0\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\x12\
+\0\0\0\x20\0\0\x04\xd4\x0e\0\0\0\0\0\0\x29\0\0\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\
+\0\0\x1c\0\0\0\x12\0\0\0\x11\0\0\x04\xe9\x0e\0\0\0\0\0\0\x2b\0\0\0\0\0\0\0\0\
+\x03\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\x12\0\0\0\x04\0\0\x05\0\x0e\0\0\0\0\0\0\x2d\
+\0\0\0\x01\0\0\x05\x08\x0f\0\0\x04\0\0\0\x62\0\0\0\x26\0\0\0\0\0\0\0\x23\0\0\0\
+\x28\0\0\0\x23\0\0\0\x0e\0\0\0\x2a\0\0\0\x31\0\0\0\x20\0\0\0\x2c\0\0\0\x51\0\0\
+\0\x11\0\0\x05\x10\x0f\0\0\x01\0\0\0\x04\0\0\0\x2e\0\0\0\0\0\0\0\x04\0\x62\x70\
+\x66\x5f\x69\x74\x65\x72\x5f\x5f\x62\x70\x66\x5f\x6d\x61\x70\0\x6d\x65\x74\x61\
+\0\x6d\x61\x70\0\x63\x74\x78\0\x69\x6e\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\
+\x5f\x6d\x61\x70\0\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x6d\x61\x70\0\x30\x3a\
+\x30\0\x2f\x68\x6f\x6d\x65\x2f\x69\x69\x69\x2f\x6c\x69\x6e\x75\x78\x2d\x6b\x65\
+\x72\x6e\x65\x6c\x2d\x74\x6f\x6f\x6c\x63\x68\x61\x69\x6e\x2f\x73\x72\x63\x2f\
+\x6c\x69\x6e\x75\x78\x2f\x6b\x65\x72\x6e\x65\x6c\x2f\x62\x70\x66\x2f\x70\x72\
+\x65\x6c\x6f\x61\x64\x2f\x69\x74\x65\x72\x61\x74\x6f\x72\x73\x2f\x69\x74\x65\
+\x72\x61\x74\x6f\x72\x73\x2e\x62\x70\x66\x2e\x63\0\x09\x73\x74\x72\x75\x63\x74\
+\x20\x73\x65\x71\x5f\x66\x69\x6c\x65\x20\x2a\x73\x65\x71\x20\x3d\x20\x63\x74\
+\x78\x2d\x3e\x6d\x65\x74\x61\x2d\x3e\x73\x65\x71\x3b\0\x62\x70\x66\x5f\x69\x74\
+\x65\x72\x5f\x6d\x65\x74\x61\0\x73\x65\x71\0\x73\x65\x73\x73\x69\x6f\x6e\x5f\
+\x69\x64\0\x73\x65\x71\x5f\x6e\x75\x6d\0\x73\x65\x71\x5f\x66\x69\x6c\x65\0\x5f\
+\x5f\x75\x36\x34\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x6c\x6f\x6e\x67\x20\x6c\
+\x6f\x6e\x67\0\x30\x3a\x31\0\x09\x73\x74\x72\x75\x63\x74\x20\x62\x70\x66\x5f\
+\x6d\x61\x70\x20\x2a\x6d\x61\x70\x20\x3d\x20\x63\x74\x78\x2d\x3e\x6d\x61\x70\
+\x3b\0\x09\x69\x66\x20\x28\x21\x6d\x61\x70\x29\0\x30\x3a\x32\0\x09\x5f\x5f\x75\
+\x36\x34\x20\x73\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x20\x63\x74\x78\x2d\x3e\x6d\
+\x65\x74\x61\x2d\x3e\x73\x65\x71\x5f\x6e\x75\x6d\x3b\0\x09\x69\x66\x20\x28\x73\
+\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x3d\x20\x30\x29\0\x09\x09\x42\x50\x46\x5f\x53\
+\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x20\x20\x69\
+\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x5c\x6e\x22\x29\x3b\0\x62\x70\x66\
+\x5f\x6d\x61\x70\0\x69\x64\0\x6e\x61\x6d\x65\0\x6d\x61\x78\x5f\x65\x6e\x74\x72\
+\x69\x65\x73\0\x5f\x5f\x75\x33\x32\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\
+\x6e\x74\0\x63\x68\x61\x72\0\x5f\x5f\x41\x52\x52\x41\x59\x5f\x53\x49\x5a\x45\
+\x5f\x54\x59\x50\x45\x5f\x5f\0\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\
+\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x25\x34\x75\x20\x25\x2d\x31\x36\x73\
+\x25\x36\x64\x5c\x6e\x22\x2c\x20\x6d\x61\x70\x2d\x3e\x69\x64\x2c\x20\x6d\x61\
+\x70\x2d\x3e\x6e\x61\x6d\x65\x2c\x20\x6d\x61\x70\x2d\x3e\x6d\x61\x78\x5f\x65\
+\x6e\x74\x72\x69\x65\x73\x29\x3b\0\x7d\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\
+\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x70\x72\x6f\x67\0\x64\x75\x6d\x70\x5f\
+\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x70\x72\
+\x6f\x67\0\x09\x73\x74\x72\x75\x63\x74\x20\x62\x70\x66\x5f\x70\x72\x6f\x67\x20\
+\x2a\x70\x72\x6f\x67\x20\x3d\x20\x63\x74\x78\x2d\x3e\x70\x72\x6f\x67\x3b\0\x09\
+\x69\x66\x20\x28\x21\x70\x72\x6f\x67\x29\0\x62\x70\x66\x5f\x70\x72\x6f\x67\0\
+\x61\x75\x78\0\x09\x61\x75\x78\x20\x3d\x20\x70\x72\x6f\x67\x2d\x3e\x61\x75\x78\
+\x3b\0\x09\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\
+\x65\x71\x2c\x20\x22\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\x74\x61\x63\x68\x65\x64\x5c\x6e\x22\
+\x29\x3b\0\x62\x70\x66\x5f\x70\x72\x6f\x67\x5f\x61\x75\x78\0\x61\x74\x74\x61\
+\x63\x68\x5f\x66\x75\x6e\x63\x5f\x6e\x61\x6d\x65\0\x64\x73\x74\x5f\x70\x72\x6f\
+\x67\0\x66\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\x62\x74\x66\0\x09\x42\x50\x46\x5f\
+\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x25\x34\
+\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\x25\x73\x5c\x6e\x22\x2c\x20\x61\
+\x75\x78\x2d\x3e\x69\x64\x2c\0\x30\x3a\x34\0\x30\x3a\x35\0\x09\x69\x66\x20\x28\
+\x21\x62\x74\x66\x29\0\x62\x70\x66\x5f\x66\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\
+\x69\x6e\x73\x6e\x5f\x6f\x66\x66\0\x74\x79\x70\x65\x5f\x69\x64\0\x30\0\x73\x74\
+\x72\x69\x6e\x67\x73\0\x74\x79\x70\x65\x73\0\x68\x64\x72\0\x62\x74\x66\x5f\x68\
+\x65\x61\x64\x65\x72\0\x73\x74\x72\x5f\x6c\x65\x6e\0\x09\x74\x79\x70\x65\x73\
+\x20\x3d\x20\x62\x74\x66\x2d\x3e\x74\x79\x70\x65\x73\x3b\0\x09\x62\x70\x66\x5f\
+\x70\x72\x6f\x62\x65\x5f\x72\x65\x61\x64\x5f\x6b\x65\x72\x6e\x65\x6c\x28\x26\
+\x74\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x74\x29\x2c\x20\x74\x79\x70\x65\x73\
+\x20\x2b\x20\x62\x74\x66\x5f\x69\x64\x29\x3b\0\x09\x73\x74\x72\x20\x3d\x20\x62\
+\x74\x66\x2d\x3e\x73\x74\x72\x69\x6e\x67\x73\x3b\0\x62\x74\x66\x5f\x74\x79\x70\
+\x65\0\x6e\x61\x6d\x65\x5f\x6f\x66\x66\0\x09\x6e\x61\x6d\x65\x5f\x6f\x66\x66\
+\x20\x3d\x20\x42\x50\x46\x5f\x43\x4f\x52\x45\x5f\x52\x45\x41\x44\x28\x74\x2c\
+\x20\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x29\x3b\0\x30\x3a\x32\x3a\x30\0\x09\x69\
+\x66\x20\x28\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x20\x3e\x3d\x20\x62\x74\x66\x2d\
+\x3e\x68\x64\x72\x2e\x73\x74\x72\x5f\x6c\x65\x6e\x29\0\x09\x72\x65\x74\x75\x72\
+\x6e\x20\x73\x74\x72\x20\x2b\x20\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x3b\0\x30\x3a\
+\x33\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\
+\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\
+\x74\x2e\x31\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\
+\x5f\x66\x6d\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\
+\x5f\x5f\x66\x6d\x74\x2e\x32\0\x4c\x49\x43\x45\x4e\x53\x45\0\x2e\x72\x6f\x64\
+\x61\x74\x61\0\x6c\x69\x63\x65\x6e\x73\x65\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\x09\x4c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\0\0\0\x04\0\0\0\x62\0\0\0\
+\x01\0\0\0\x80\0\0\0\0\0\0\0\0\x69\x74\x65\x72\x61\x74\x6f\x72\x2e\x72\x6f\x64\
+\x61\x74\x61\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x2f\0\0\0\0\0\0\0\0\0\0\0\0\x20\
+\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x0a\0\x25\x34\x75\x20\x25\
+\x2d\x31\x36\x73\x25\x36\x64\x0a\0\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\x74\x61\x63\x68\x65\x64\
+\x0a\0\x25\x34\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\x25\x73\x0a\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\x47\x50\x4c\0\0\0\0\0\x79\x21\0\0\0\0\0\0\x79\x62\0\0\
+\0\0\0\0\x79\x71\0\x08\0\0\0\0\x15\x70\0\x1a\0\0\0\0\x79\x12\0\x10\0\0\0\0\x55\
+\x10\0\x08\0\0\0\0\xbf\x4a\0\0\0\0\0\0\x07\x40\0\0\xff\xff\xff\xe8\xbf\x16\0\0\
+\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb7\x30\0\0\0\0\0\x23\xb7\x50\0\0\
+\0\0\0\0\x85\0\0\0\0\0\0\x7e\x61\x17\0\0\0\0\0\0\x7b\xa1\xff\xe8\0\0\0\0\xb7\
+\x10\0\0\0\0\0\x04\xbf\x27\0\0\0\0\0\0\x0f\x21\0\0\0\0\0\0\x7b\xa2\xff\xf0\0\0\
+\0\0\x61\x17\0\x14\0\0\0\0\x7b\xa1\xff\xf8\0\0\0\0\xbf\x4a\0\0\0\0\0\0\x07\x40\
+\0\0\xff\xff\xff\xe8\xbf\x16\0\0\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\0\x23\
+\xb7\x30\0\0\0\0\0\x0e\xb7\x50\0\0\0\0\0\x18\x85\0\0\0\0\0\0\x7e\xb7\0\0\0\0\0\
+\0\0\x95\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x07\0\0\0\0\0\0\0\x42\0\0\0\x9a\0\x01\x3c\
+\x1e\0\0\0\x01\0\0\0\x42\0\0\0\x9a\0\x01\x3c\x24\0\0\0\x02\0\0\0\x42\0\0\x01\
+\x0d\0\x01\x44\x1d\0\0\0\x03\0\0\0\x42\0\0\x01\x2e\0\x01\x4c\x06\0\0\0\x04\0\0\
+\0\x42\0\0\x01\x3d\0\x01\x40\x1d\0\0\0\x05\0\0\0\x42\0\0\x01\x62\0\x01\x58\x06\
+\0\0\0\x07\0\0\0\x42\0\0\x01\x75\0\x01\x5c\x03\0\0\0\x0e\0\0\0\x42\0\0\x01\xfb\
+\0\x01\x64\x02\0\0\0\x1e\0\0\0\x42\0\0\x02\x49\0\x01\x6c\x01\0\0\0\0\0\0\0\x02\
+\0\0\0\x3e\0\0\0\0\0\0\0\x08\0\0\0\x08\0\0\0\x3e\0\0\0\0\0\0\0\x10\0\0\0\x02\0\
+\0\x01\x09\0\0\0\0\0\0\0\x20\0\0\0\x08\0\0\x01\x39\0\0\0\0\0\0\0\x70\0\0\0\x0d\
+\0\0\0\x3e\0\0\0\0\0\0\0\x80\0\0\0\x0d\0\0\x01\x09\0\0\0\0\0\0\0\xa0\0\0\0\x0d\
+\0\0\x01\x39\0\0\0\0\0\0\0\x1a\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\
+\x6d\x61\x70\0\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\
+\x01\0\0\0\x10\0\0\0\0\0\0\0\0\0\0\0\x09\0\0\0\x01\0\0\0\0\0\0\0\x07\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\
+\x62\x70\x66\x5f\x6d\x61\x70\0\0\0\0\0\0\0\0\x47\x50\x4c\0\0\0\0\0\x79\x21\0\0\
+\0\0\0\0\x79\x62\0\0\0\0\0\0\x79\x11\0\x08\0\0\0\0\x15\x10\0\x3b\0\0\0\0\x79\
+\x71\0\0\0\0\0\0\x79\x12\0\x10\0\0\0\0\x55\x10\0\x08\0\0\0\0\xbf\x4a\0\0\0\0\0\
+\0\x07\x40\0\0\xff\xff\xff\xd0\xbf\x16\0\0\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\x31\xb7\x30\0\0\0\0\0\x20\xb7\x50\0\0\0\0\0\0\x85\0\0\0\0\0\0\x7e\x7b\
+\xa6\xff\xc8\0\0\0\0\x61\x17\0\0\0\0\0\0\x7b\xa1\xff\xd0\0\0\0\0\xb7\x30\0\0\0\
+\0\0\x04\xbf\x97\0\0\0\0\0\0\x0f\x93\0\0\0\0\0\0\x79\x17\0\x28\0\0\0\0\x79\x87\
+\0\x30\0\0\0\0\x15\x80\0\x18\0\0\0\0\xb7\x20\0\0\0\0\0\0\x0f\x12\0\0\0\0\0\0\
+\x61\x11\0\x04\0\0\0\0\x79\x38\0\x08\0\0\0\0\x67\x10\0\0\0\0\0\x03\x0f\x31\0\0\
+\0\0\0\0\x79\x68\0\0\0\0\0\0\xbf\x1a\0\0\0\0\0\0\x07\x10\0\0\xff\xff\xff\xf8\
+\xb7\x20\0\0\0\0\0\x08\x85\0\0\0\0\0\0\x71\xb7\x10\0\0\0\0\0\0\x79\x3a\xff\xf8\
+\0\0\0\0\x0f\x31\0\0\0\0\0\0\xbf\x1a\0\0\0\0\0\0\x07\x10\0\0\xff\xff\xff\xf4\
+\xb7\x20\0\0\0\0\0\x04\x85\0\0\0\0\0\0\x71\xb7\x30\0\0\0\0\0\x04\x61\x1a\xff\
+\xf4\0\0\0\0\x61\x28\0\x10\0\0\0\0\x3d\x12\0\x02\0\0\0\0\x0f\x61\0\0\0\0\0\0\
+\xbf\x96\0\0\0\0\0\0\x7b\xa9\xff\xd8\0\0\0\0\x79\x17\0\x18\0\0\0\0\x7b\xa1\xff\
+\xe0\0\0\0\0\x79\x17\0\x20\0\0\0\0\x79\x11\0\0\0\0\0\0\x0f\x13\0\0\0\0\0\0\x7b\
+\xa1\xff\xe8\0\0\0\0\xbf\x4a\0\0\0\0\0\0\x07\x40\0\0\xff\xff\xff\xd0\x79\x1a\
+\xff\xc8\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\0\x51\xb7\x30\0\0\0\0\0\x11\
+\xb7\x50\0\0\0\0\0\x20\x85\0\0\0\0\0\0\x7e\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\x17\0\0\0\0\0\0\0\x42\0\0\0\x9a\0\x01\x80\x1e\0\0\0\x01\0\0\0\
+\x42\0\0\0\x9a\0\x01\x80\x24\0\0\0\x02\0\0\0\x42\0\0\x02\x7f\0\x01\x88\x1f\0\0\
+\0\x03\0\0\0\x42\0\0\x02\xa3\0\x01\x94\x06\0\0\0\x04\0\0\0\x42\0\0\x02\xbc\0\
+\x01\xa0\x0e\0\0\0\x05\0\0\0\x42\0\0\x01\x3d\0\x01\x84\x1d\0\0\0\x06\0\0\0\x42\
+\0\0\x01\x62\0\x01\xa4\x06\0\0\0\x08\0\0\0\x42\0\0\x02\xce\0\x01\xa8\x03\0\0\0\
+\x10\0\0\0\x42\0\0\x03\x3e\0\x01\xb0\x02\0\0\0\x17\0\0\0\x42\0\0\x03\x79\0\x01\
+\x04\x06\0\0\0\x1a\0\0\0\x42\0\0\x03\x3e\0\x01\xb0\x02\0\0\0\x1b\0\0\0\x42\0\0\
+\x03\xca\0\x01\x10\x0f\0\0\0\x1c\0\0\0\x42\0\0\x03\xdf\0\x01\x14\x2d\0\0\0\x1e\
+\0\0\0\x42\0\0\x04\x16\0\x01\x0c\x0d\0\0\0\x20\0\0\0\x42\0\0\x03\x3e\0\x01\xb0\
+\x02\0\0\0\x21\0\0\0\x42\0\0\x03\xdf\0\x01\x14\x02\0\0\0\x24\0\0\0\x42\0\0\x04\
+\x3d\0\x01\x18\x0d\0\0\0\x27\0\0\0\x42\0\0\x03\x3e\0\x01\xb0\x02\0\0\0\x28\0\0\
+\0\x42\0\0\x04\x3d\0\x01\x18\x0d\0\0\0\x2b\0\0\0\x42\0\0\x04\x3d\0\x01\x18\x0d\
+\0\0\0\x2c\0\0\0\x42\0\0\x04\x6b\0\x01\x1c\x1b\0\0\0\x2d\0\0\0\x42\0\0\x04\x6b\
+\0\x01\x1c\x06\0\0\0\x2e\0\0\0\x42\0\0\x04\x8e\0\x01\x24\x0d\0\0\0\x30\0\0\0\
+\x42\0\0\x03\x3e\0\x01\xb0\x02\0\0\0\x3f\0\0\0\x42\0\0\x02\x49\0\x01\xc0\x01\0\
+\0\0\0\0\0\0\x14\0\0\0\x3e\0\0\0\0\0\0\0\x08\0\0\0\x08\0\0\0\x3e\0\0\0\0\0\0\0\
+\x10\0\0\0\x14\0\0\x01\x09\0\0\0\0\0\0\0\x20\0\0\0\x18\0\0\0\x3e\0\0\0\0\0\0\0\
+\x28\0\0\0\x08\0\0\x01\x39\0\0\0\0\0\0\0\x80\0\0\0\x1a\0\0\0\x3e\0\0\0\0\0\0\0\
+\x90\0\0\0\x1a\0\0\x01\x09\0\0\0\0\0\0\0\xa8\0\0\0\x1a\0\0\x03\x71\0\0\0\0\0\0\
+\0\xb0\0\0\0\x1a\0\0\x03\x75\0\0\0\0\0\0\0\xc0\0\0\0\x1f\0\0\x03\xa3\0\0\0\0\0\
+\0\0\xd8\0\0\0\x20\0\0\x01\x09\0\0\0\0\0\0\0\xf0\0\0\0\x20\0\0\0\x3e\0\0\0\0\0\
+\0\x01\x18\0\0\0\x24\0\0\0\x3e\0\0\0\0\0\0\x01\x50\0\0\0\x1a\0\0\x01\x09\0\0\0\
+\0\0\0\x01\x60\0\0\0\x20\0\0\x04\x65\0\0\0\0\0\0\x01\x88\0\0\0\x1a\0\0\x01\x39\
+\0\0\0\0\0\0\x01\x98\0\0\0\x1a\0\0\x04\xa6\0\0\0\0\0\0\x01\xa0\0\0\0\x18\0\0\0\
+\x3e\0\0\0\0\0\0\0\x1a\0\0\0\x41\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\
+\x6f\x67\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\x01\0\
+\0\0\x10\0\0\0\0\0\0\0\0\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x12\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\x62\x70\
+\x66\x5f\x70\x72\x6f\x67\0\0\0\0\0\0\0";
+ opts.insns_sz = 2216;
+ opts.insns = (void *)"\
+\xbf\x61\0\0\0\0\0\0\xbf\x1a\0\0\0\0\0\0\x07\x10\0\0\xff\xff\xff\x78\xb7\x20\0\
+\0\0\0\0\x88\xb7\x30\0\0\0\0\0\0\x85\0\0\0\0\0\0\x71\x05\0\0\x14\0\0\0\0\x61\
+\x1a\xff\x78\0\0\0\0\xd5\x10\0\x01\0\0\0\0\x85\0\0\0\0\0\0\xa8\x61\x1a\xff\x7c\
+\0\0\0\0\xd5\x10\0\x01\0\0\0\0\x85\0\0\0\0\0\0\xa8\x61\x1a\xff\x80\0\0\0\0\xd5\
+\x10\0\x01\0\0\0\0\x85\0\0\0\0\0\0\xa8\x61\x1a\xff\x84\0\0\0\0\xd5\x10\0\x01\0\
+\0\0\0\x85\0\0\0\0\0\0\xa8\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\x10\0\0\0\0\
+\0\0\xd5\x10\0\x02\0\0\0\0\xbf\x91\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa8\xbf\x07\0\0\
+\0\0\0\0\x95\0\0\0\0\0\0\0\x61\x06\0\x08\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\
+\0\x0e\x68\x63\x10\0\0\0\0\0\0\x61\x06\0\x0c\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\
+\0\0\0\x0e\x64\x63\x10\0\0\0\0\0\0\x79\x06\0\x10\0\0\0\0\x18\x16\0\0\0\0\0\0\0\
+\0\0\0\0\0\x0e\x58\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x05\0\
+\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0e\x50\x7b\x10\0\0\0\0\0\0\xb7\x10\0\0\0\0\0\
+\x12\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\x0e\x50\xb7\x30\0\0\0\0\0\x1c\x85\0\0\0\0\
+\0\0\xa6\xbf\x70\0\0\0\0\0\0\xc5\x70\xff\xd4\0\0\0\0\x63\xa7\xff\x78\0\0\0\0\
+\x61\x0a\xff\x78\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0e\xa0\x63\x10\0\0\0\
+\0\0\0\x61\x06\0\x1c\0\0\0\0\x15\0\0\x03\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\
+\0\x0e\x7c\x63\x10\0\0\0\0\0\0\xb7\x10\0\0\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\
+\0\0\x0e\x70\xb7\x30\0\0\0\0\0\x48\x85\0\0\0\0\0\0\xa6\xbf\x70\0\0\0\0\0\0\xc5\
+\x70\xff\xc3\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x63\x17\0\0\0\0\0\0\
+\x79\x36\0\x20\0\0\0\0\x15\x30\0\x08\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\
+\x0e\xb8\xb7\x20\0\0\0\0\0\x62\x61\x06\0\x04\0\0\0\0\x45\0\0\x02\0\0\0\x01\x85\
+\0\0\0\0\0\0\x94\x05\0\0\x01\0\0\0\0\x85\0\0\0\0\0\0\x71\x18\x26\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\x61\x02\0\0\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x28\x63\
+\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x20\x18\x16\0\0\0\0\0\0\0\
+\0\0\0\0\0\x0f\x30\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x0e\xb8\
+\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x38\x7b\x10\0\0\0\0\0\0\xb7\x10\0\0\0\0\0\
+\x02\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x28\xb7\x30\0\0\0\0\0\x20\x85\0\0\0\0\
+\0\0\xa6\xbf\x70\0\0\0\0\0\0\xc5\x70\xff\x9f\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\x61\x02\0\0\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x48\x63\x10\
+\0\0\0\0\0\0\xb7\x10\0\0\0\0\0\x16\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x48\xb7\
+\x30\0\0\0\0\0\x04\x85\0\0\0\0\0\0\xa6\xbf\x70\0\0\0\0\0\0\xc5\x70\xff\x92\0\0\
+\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x50\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\
+\x11\x70\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x58\x18\x16\0\
+\0\0\0\0\0\0\0\0\0\0\0\x11\x68\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\
+\0\0\x10\x58\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xb0\x7b\x10\0\0\0\0\0\0\x18\
+\x06\0\0\0\0\0\0\0\0\0\0\0\0\x10\x60\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xc0\
+\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x10\xf0\x18\x16\0\0\0\0\0\
+\0\0\0\0\0\0\0\x11\xe0\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xd8\x7b\x10\0\0\0\0\0\0\x61\x06\0\x08\0\0\
+\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\x78\x63\x10\0\0\0\0\0\0\x61\x06\0\x0c\
+\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\x7c\x63\x10\0\0\0\0\0\0\x79\x06\0\
+\x10\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\x80\x7b\x10\0\0\0\0\0\0\x61\
+\x0a\xff\x78\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xa8\x63\x10\0\0\0\0\0\
+\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xf0\xb7\x20\0\0\0\0\0\x11\xb7\x30\0\0\0\
+\0\0\x0c\xb7\x40\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa7\xbf\x70\0\0\0\0\0\0\xc5\x70\
+\xff\x5c\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x11\x60\x63\x07\0\x6c\0\0\0\0\
+\x77\x70\0\0\0\0\0\x20\x63\x07\0\x70\0\0\0\0\xb7\x10\0\0\0\0\0\x05\x18\x26\0\0\
+\0\0\0\0\0\0\0\0\0\0\x11\x60\xb7\x30\0\0\0\0\0\x8c\x85\0\0\0\0\0\0\xa6\xbf\x70\
+\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x11\xd0\x61\x10\0\0\0\0\0\0\xd5\
+\x10\0\x02\0\0\0\0\xbf\x91\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa8\xc5\x70\xff\x4a\0\0\
+\0\0\x63\xa7\xff\x80\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x12\x08\x18\x16\0\
+\0\0\0\0\0\0\0\0\0\0\0\x16\xe0\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\
+\0\0\x12\x10\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x16\xd8\x7b\x10\0\0\0\0\0\0\x18\
+\x06\0\0\0\0\0\0\0\0\0\0\0\0\x14\x18\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\x20\
+\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x14\x20\x18\x16\0\0\0\0\0\
+\0\0\0\0\0\0\0\x17\x30\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x15\
+\xb0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\x50\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\x48\x7b\x10\0\0\0\0\
+\0\0\x61\x06\0\x08\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x16\xe8\x63\x10\0\0\
+\0\0\0\0\x61\x06\0\x0c\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x16\xec\x63\x10\
+\0\0\0\0\0\0\x79\x06\0\x10\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x16\xf0\x7b\
+\x10\0\0\0\0\0\0\x61\x0a\xff\x78\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\
+\x18\x63\x10\0\0\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\x60\xb7\x20\0\0\0\
+\0\0\x12\xb7\x30\0\0\0\0\0\x0c\xb7\x40\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa7\xbf\x70\
+\0\0\0\0\0\0\xc5\x70\xff\x13\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x16\xd0\
+\x63\x07\0\x6c\0\0\0\0\x77\x70\0\0\0\0\0\x20\x63\x07\0\x70\0\0\0\0\xb7\x10\0\0\
+\0\0\0\x05\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\x16\xd0\xb7\x30\0\0\0\0\0\x8c\x85\0\
+\0\0\0\0\0\xa6\xbf\x70\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x17\x40\x61\
+\x10\0\0\0\0\0\0\xd5\x10\0\x02\0\0\0\0\xbf\x91\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa8\
+\xc5\x70\xff\x01\0\0\0\0\x63\xa7\xff\x84\0\0\0\0\x61\x1a\xff\x78\0\0\0\0\xd5\
+\x10\0\x02\0\0\0\0\xbf\x91\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa8\x61\x0a\xff\x80\0\0\
+\0\0\x63\x60\0\x28\0\0\0\0\x61\x0a\xff\x84\0\0\0\0\x63\x60\0\x2c\0\0\0\0\x18\
+\x16\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\x01\0\0\0\0\0\0\x63\x60\0\x18\0\0\0\0\xb7\
+\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0";
+ err = bpf_load_and_run(&opts);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static inline struct iterators_bpf *
+iterators_bpf__open_and_load(void)
+{
+ struct iterators_bpf *skel;
+
+ skel = iterators_bpf__open();
+ if (!skel)
+ return NULL;
+ if (iterators_bpf__load(skel)) {
+ iterators_bpf__destroy(skel);
+ return NULL;
+ }
+ return skel;
+}
+
+__attribute__((unused)) static void
+iterators_bpf__assert(struct iterators_bpf *s __attribute__((unused)))
+{
+#ifdef __cplusplus
+#define _Static_assert static_assert
+#endif
+#ifdef __cplusplus
+#undef _Static_assert
+#endif
+}
+
+#endif /* __ITERATORS_BPF_SKEL_H__ */
diff --git a/kernel/bpf/preload/iterators/iterators.lskel.h b/kernel/bpf/preload/iterators/iterators.lskel-little-endian.h
index 70f236a82fe1..70f236a82fe1 100644
--- a/kernel/bpf/preload/iterators/iterators.lskel.h
+++ b/kernel/bpf/preload/iterators/iterators.lskel-little-endian.h
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ecca9366c7a6..e3fcdc9836a6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -181,7 +181,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
int err;
/* Need to create a kthread, thus must support schedule */
- if (bpf_map_is_dev_bound(map)) {
+ if (bpf_map_is_offloaded(map)) {
return bpf_map_offload_update_elem(map, key, value, flags);
} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
@@ -238,7 +238,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
void *ptr;
int err;
- if (bpf_map_is_dev_bound(map))
+ if (bpf_map_is_offloaded(map))
return bpf_map_offload_lookup_elem(map, key, value);
bpf_disable_instrumentation();
@@ -309,7 +309,7 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
* __GFP_RETRY_MAYFAIL to avoid such situations.
*/
- const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
+ gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
unsigned int flags = 0;
unsigned long align = 1;
void *area;
@@ -390,7 +390,7 @@ static int bpf_map_alloc_id(struct bpf_map *map)
return id > 0 ? 0 : id;
}
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_free_id(struct bpf_map *map)
{
unsigned long flags;
@@ -402,18 +402,12 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
if (!map->id)
return;
- if (do_idr_lock)
- spin_lock_irqsave(&map_idr_lock, flags);
- else
- __acquire(&map_idr_lock);
+ spin_lock_irqsave(&map_idr_lock, flags);
idr_remove(&map_idr, map->id);
map->id = 0;
- if (do_idr_lock)
- spin_unlock_irqrestore(&map_idr_lock, flags);
- else
- __release(&map_idr_lock);
+ spin_unlock_irqrestore(&map_idr_lock, flags);
}
#ifdef CONFIG_MEMCG_KMEM
@@ -424,7 +418,8 @@ static void bpf_map_save_memcg(struct bpf_map *map)
* So we have to check map->objcg for being NULL each time it's
* being used.
*/
- map->objcg = get_obj_cgroup_from_current();
+ if (memcg_bpf_enabled())
+ map->objcg = get_obj_cgroup_from_current();
}
static void bpf_map_release_memcg(struct bpf_map *map)
@@ -470,6 +465,21 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
return ptr;
}
+void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
+ gfp_t flags)
+{
+ struct mem_cgroup *memcg, *old_memcg;
+ void *ptr;
+
+ memcg = bpf_map_get_memcg(map);
+ old_memcg = set_active_memcg(memcg);
+ ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
+ set_active_memcg(old_memcg);
+ mem_cgroup_put(memcg);
+
+ return ptr;
+}
+
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags)
{
@@ -527,9 +537,6 @@ void btf_record_free(struct btf_record *rec)
return;
for (i = 0; i < rec->cnt; i++) {
switch (rec->fields[i].type) {
- case BPF_SPIN_LOCK:
- case BPF_TIMER:
- break;
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
if (rec->fields[i].kptr.module)
@@ -538,7 +545,11 @@ void btf_record_free(struct btf_record *rec)
break;
case BPF_LIST_HEAD:
case BPF_LIST_NODE:
- /* Nothing to release for bpf_list_head */
+ case BPF_RB_ROOT:
+ case BPF_RB_NODE:
+ case BPF_SPIN_LOCK:
+ case BPF_TIMER:
+ /* Nothing to release */
break;
default:
WARN_ON_ONCE(1);
@@ -571,9 +582,6 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
new_rec->cnt = 0;
for (i = 0; i < rec->cnt; i++) {
switch (fields[i].type) {
- case BPF_SPIN_LOCK:
- case BPF_TIMER:
- break;
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
btf_get(fields[i].kptr.btf);
@@ -584,7 +592,11 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
break;
case BPF_LIST_HEAD:
case BPF_LIST_NODE:
- /* Nothing to acquire for bpf_list_head */
+ case BPF_RB_ROOT:
+ case BPF_RB_NODE:
+ case BPF_SPIN_LOCK:
+ case BPF_TIMER:
+ /* Nothing to acquire */
break;
default:
ret = -EFAULT;
@@ -664,7 +676,13 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
continue;
bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
break;
+ case BPF_RB_ROOT:
+ if (WARN_ON_ONCE(rec->spin_lock_off < 0))
+ continue;
+ bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
+ break;
case BPF_LIST_NODE:
+ case BPF_RB_NODE:
break;
default:
WARN_ON_ONCE(1);
@@ -706,13 +724,13 @@ static void bpf_map_put_uref(struct bpf_map *map)
}
/* decrement map refcnt and schedule it for freeing via workqueue
- * (unrelying map implementation ops->map_free() might sleep)
+ * (underlying map implementation ops->map_free() might sleep)
*/
-static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_put(struct bpf_map *map)
{
if (atomic64_dec_and_test(&map->refcnt)) {
/* bpf_map_free_id() must be called first */
- bpf_map_free_id(map, do_idr_lock);
+ bpf_map_free_id(map);
btf_put(map->btf);
INIT_WORK(&map->work, bpf_map_free_deferred);
/* Avoid spawning kworkers, since they all might contend
@@ -721,11 +739,6 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
queue_work(system_unbound_wq, &map->work);
}
}
-
-void bpf_map_put(struct bpf_map *map)
-{
- __bpf_map_put(map, true);
-}
EXPORT_SYMBOL_GPL(bpf_map_put);
void bpf_map_put_with_uref(struct bpf_map *map)
@@ -1005,7 +1018,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
return -EINVAL;
map->record = btf_parse_fields(btf, value_type,
- BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD,
+ BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
+ BPF_RB_ROOT,
map->value_size);
if (!IS_ERR_OR_NULL(map->record)) {
int i;
@@ -1053,6 +1067,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
}
break;
case BPF_LIST_HEAD:
+ case BPF_RB_ROOT:
if (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
map->map_type != BPF_MAP_TYPE_ARRAY) {
@@ -1483,7 +1498,7 @@ static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
goto err_put;
}
- if (bpf_map_is_dev_bound(map)) {
+ if (bpf_map_is_offloaded(map)) {
err = bpf_map_offload_delete_elem(map, key);
goto out;
} else if (IS_FD_PROG_ARRAY(map) ||
@@ -1547,7 +1562,7 @@ static int map_get_next_key(union bpf_attr *attr)
if (!next_key)
goto free_key;
- if (bpf_map_is_dev_bound(map)) {
+ if (bpf_map_is_offloaded(map)) {
err = bpf_map_offload_get_next_key(map, key, next_key);
goto out;
}
@@ -1605,7 +1620,7 @@ int generic_map_delete_batch(struct bpf_map *map,
map->key_size))
break;
- if (bpf_map_is_dev_bound(map)) {
+ if (bpf_map_is_offloaded(map)) {
err = bpf_map_offload_delete_elem(map, key);
break;
}
@@ -1851,7 +1866,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
- if (!bpf_map_is_dev_bound(map)) {
+ if (!bpf_map_is_offloaded(map)) {
bpf_disable_instrumentation();
rcu_read_lock();
err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
@@ -1944,7 +1959,7 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
if (!ops)
return -EINVAL;
- if (!bpf_prog_is_dev_bound(prog->aux))
+ if (!bpf_prog_is_offloaded(prog->aux))
prog->aux->ops = ops;
else
prog->aux->ops = &bpf_offload_prog_ops;
@@ -2245,7 +2260,7 @@ bool bpf_prog_get_ok(struct bpf_prog *prog,
if (prog->type != *attach_type)
return false;
- if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
+ if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
return false;
return true;
@@ -2481,7 +2496,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
BPF_F_TEST_STATE_FREQ |
BPF_F_SLEEPABLE |
BPF_F_TEST_RND_HI32 |
- BPF_F_XDP_HAS_FRAGS))
+ BPF_F_XDP_HAS_FRAGS |
+ BPF_F_XDP_DEV_BOUND_ONLY))
return -EINVAL;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
@@ -2565,7 +2581,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
prog->aux->attach_btf = attach_btf;
prog->aux->attach_btf_id = attr->attach_btf_id;
prog->aux->dst_prog = dst_prog;
- prog->aux->offload_requested = !!attr->prog_ifindex;
+ prog->aux->dev_bound = !!attr->prog_ifindex;
prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
@@ -2589,7 +2605,14 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
prog->gpl_compatible = is_gpl ? 1 : 0;
if (bpf_prog_is_dev_bound(prog->aux)) {
- err = bpf_prog_offload_init(prog, attr);
+ err = bpf_prog_dev_bound_init(prog, attr);
+ if (err)
+ goto free_prog_sec;
+ }
+
+ if (type == BPF_PROG_TYPE_EXT && dst_prog &&
+ bpf_prog_is_dev_bound(dst_prog->aux)) {
+ err = bpf_prog_dev_bound_inherit(prog, dst_prog);
if (err)
goto free_prog_sec;
}
@@ -3987,7 +4010,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
return -EFAULT;
}
- if (bpf_prog_is_dev_bound(prog->aux)) {
+ if (bpf_prog_is_offloaded(prog->aux)) {
err = bpf_prog_offload_info_fill(&info, prog);
if (err)
return err;
@@ -4215,7 +4238,7 @@ static int bpf_map_get_info_by_fd(struct file *file,
}
info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
- if (bpf_map_is_dev_bound(map)) {
+ if (bpf_map_is_offloaded(map)) {
err = bpf_map_offload_info_fill(&info, map);
if (err)
return err;
@@ -5309,7 +5332,6 @@ static struct ctl_table bpf_syscall_table[] = {
{
.procname = "bpf_stats_enabled",
.data = &bpf_stats_enabled_key.key,
- .maxlen = sizeof(bpf_stats_enabled_key),
.mode = 0644,
.proc_handler = bpf_stats_handler,
},
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 7ee218827259..272563a0b770 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -190,6 +190,10 @@ struct bpf_verifier_stack_elem {
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
+static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
+static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
+static int ref_set_non_owning(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg);
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{
@@ -255,6 +259,7 @@ struct bpf_call_arg_meta {
int mem_size;
u64 msize_max_value;
int ref_obj_id;
+ int dynptr_id;
int map_uid;
int func_id;
struct btf *btf;
@@ -456,6 +461,11 @@ static bool type_is_ptr_alloc_obj(u32 type)
return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
}
+static bool type_is_non_owning_ref(u32 type)
+{
+ return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
+}
+
static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
{
struct btf_record *rec = NULL;
@@ -638,31 +648,57 @@ static void print_liveness(struct bpf_verifier_env *env,
verbose(env, "D");
}
-static int get_spi(s32 off)
+static int __get_spi(s32 off)
{
return (-off - 1) / BPF_REG_SIZE;
}
+static struct bpf_func_state *func(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg)
+{
+ struct bpf_verifier_state *cur = env->cur_state;
+
+ return cur->frame[reg->frameno];
+}
+
static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
{
- int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
+ int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
- /* We need to check that slots between [spi - nr_slots + 1, spi] are
- * within [0, allocated_stack).
- *
- * Please note that the spi grows downwards. For example, a dynptr
- * takes the size of two stack slots; the first slot will be at
- * spi and the second slot will be at spi - 1.
- */
- return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
+ /* We need to check that slots between [spi - nr_slots + 1, spi] are
+ * within [0, allocated_stack).
+ *
+ * Please note that the spi grows downwards. For example, a dynptr
+ * takes the size of two stack slots; the first slot will be at
+ * spi and the second slot will be at spi - 1.
+ */
+ return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
}
-static struct bpf_func_state *func(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg)
+static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
- struct bpf_verifier_state *cur = env->cur_state;
+ int off, spi;
- return cur->frame[reg->frameno];
+ if (!tnum_is_const(reg->var_off)) {
+ verbose(env, "dynptr has to be at a constant offset\n");
+ return -EINVAL;
+ }
+
+ off = reg->off + reg->var_off.value;
+ if (off % BPF_REG_SIZE) {
+ verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
+ return -EINVAL;
+ }
+
+ spi = __get_spi(off);
+ if (spi < 1) {
+ verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
+ return -EINVAL;
+ }
+
+ if (!is_spi_bounds_valid(func(env, reg), spi, BPF_DYNPTR_NR_SLOTS))
+ return -ERANGE;
+ return spi;
}
static const char *kernel_type_name(const struct btf* btf, u32 id)
@@ -727,37 +763,58 @@ static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
static void __mark_dynptr_reg(struct bpf_reg_state *reg,
enum bpf_dynptr_type type,
- bool first_slot);
+ bool first_slot, int dynptr_id);
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);
-static void mark_dynptr_stack_regs(struct bpf_reg_state *sreg1,
+static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
+ struct bpf_reg_state *sreg1,
struct bpf_reg_state *sreg2,
enum bpf_dynptr_type type)
{
- __mark_dynptr_reg(sreg1, type, true);
- __mark_dynptr_reg(sreg2, type, false);
+ int id = ++env->id_gen;
+
+ __mark_dynptr_reg(sreg1, type, true, id);
+ __mark_dynptr_reg(sreg2, type, false, id);
}
-static void mark_dynptr_cb_reg(struct bpf_reg_state *reg,
+static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg,
enum bpf_dynptr_type type)
{
- __mark_dynptr_reg(reg, type, true);
+ __mark_dynptr_reg(reg, type, true, ++env->id_gen);
}
+static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
+ struct bpf_func_state *state, int spi);
static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
enum bpf_arg_type arg_type, int insn_idx)
{
struct bpf_func_state *state = func(env, reg);
enum bpf_dynptr_type type;
- int spi, i, id;
-
- spi = get_spi(reg->off);
-
- if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
- return -EINVAL;
+ int spi, i, id, err;
+
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
+
+ /* We cannot assume both spi and spi - 1 belong to the same dynptr,
+ * hence we need to call destroy_if_dynptr_stack_slot twice for both,
+ * to ensure that for the following example:
+ * [d1][d1][d2][d2]
+ * spi 3 2 1 0
+ * So marking spi = 2 should lead to destruction of both d1 and d2. In
+ * case they do belong to same dynptr, second call won't see slot_type
+ * as STACK_DYNPTR and will simply skip destruction.
+ */
+ err = destroy_if_dynptr_stack_slot(env, state, spi);
+ if (err)
+ return err;
+ err = destroy_if_dynptr_stack_slot(env, state, spi - 1);
+ if (err)
+ return err;
for (i = 0; i < BPF_REG_SIZE; i++) {
state->stack[spi].slot_type[i] = STACK_DYNPTR;
@@ -768,7 +825,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
if (type == BPF_DYNPTR_TYPE_INVALID)
return -EINVAL;
- mark_dynptr_stack_regs(&state->stack[spi].spilled_ptr,
+ mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
&state->stack[spi - 1].spilled_ptr, type);
if (dynptr_type_refcounted(type)) {
@@ -781,6 +838,9 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
}
+ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
return 0;
}
@@ -789,10 +849,9 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
struct bpf_func_state *state = func(env, reg);
int spi, i;
- spi = get_spi(reg->off);
-
- if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
- return -EINVAL;
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
for (i = 0; i < BPF_REG_SIZE; i++) {
state->stack[spi].slot_type[i] = STACK_INVALID;
@@ -805,43 +864,133 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
+
+ /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
+ *
+ * While we don't allow reading STACK_INVALID, it is still possible to
+ * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
+ * helpers or insns can do partial read of that part without failing,
+ * but check_stack_range_initialized, check_stack_read_var_off, and
+ * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
+ * the slot conservatively. Hence we need to prevent those liveness
+ * marking walks.
+ *
+ * This was not a problem before because STACK_INVALID is only set by
+ * default (where the default reg state has its reg->parent as NULL), or
+ * in clean_live_states after REG_LIVE_DONE (at which point
+ * mark_reg_read won't walk reg->parent chain), but not randomly during
+ * verifier state exploration (like we did above). Hence, for our case
+ * parentage chain will still be live (i.e. reg->parent may be
+ * non-NULL), while earlier reg->parent was NULL, so we need
+ * REG_LIVE_WRITTEN to screen off read marker propagation when it is
+ * done later on reads or by mark_dynptr_read as well to unnecessary
+ * mark registers in verifier state.
+ */
+ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
return 0;
}
-static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+static void __mark_reg_unknown(const struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg);
+
+static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
+ struct bpf_func_state *state, int spi)
{
- struct bpf_func_state *state = func(env, reg);
- int spi, i;
+ struct bpf_func_state *fstate;
+ struct bpf_reg_state *dreg;
+ int i, dynptr_id;
- if (reg->type == CONST_PTR_TO_DYNPTR)
- return false;
+ /* We always ensure that STACK_DYNPTR is never set partially,
+ * hence just checking for slot_type[0] is enough. This is
+ * different for STACK_SPILL, where it may be only set for
+ * 1 byte, so code has to use is_spilled_reg.
+ */
+ if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
+ return 0;
- spi = get_spi(reg->off);
- if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
- return true;
+ /* Reposition spi to first slot */
+ if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
+ spi = spi + 1;
+ if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
+ verbose(env, "cannot overwrite referenced dynptr\n");
+ return -EINVAL;
+ }
+
+ mark_stack_slot_scratched(env, spi);
+ mark_stack_slot_scratched(env, spi - 1);
+
+ /* Writing partially to one dynptr stack slot destroys both. */
for (i = 0; i < BPF_REG_SIZE; i++) {
- if (state->stack[spi].slot_type[i] == STACK_DYNPTR ||
- state->stack[spi - 1].slot_type[i] == STACK_DYNPTR)
- return false;
+ state->stack[spi].slot_type[i] = STACK_INVALID;
+ state->stack[spi - 1].slot_type[i] = STACK_INVALID;
}
+ dynptr_id = state->stack[spi].spilled_ptr.id;
+ /* Invalidate any slices associated with this dynptr */
+ bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
+ /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
+ if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
+ continue;
+ if (dreg->dynptr_id == dynptr_id) {
+ if (!env->allow_ptr_leaks)
+ __mark_reg_not_init(env, dreg);
+ else
+ __mark_reg_unknown(env, dreg);
+ }
+ }));
+
+ /* Do not release reference state, we are destroying dynptr on stack,
+ * not using some helper to release it. Just reset register.
+ */
+ __mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
+ __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
+
+ /* Same reason as unmark_stack_slots_dynptr above */
+ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
+ return 0;
+}
+
+static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ int spi)
+{
+ if (reg->type == CONST_PTR_TO_DYNPTR)
+ return false;
+
+ /* For -ERANGE (i.e. spi not falling into allocated stack slots), we
+ * will do check_mem_access to check and update stack bounds later, so
+ * return true for that case.
+ */
+ if (spi < 0)
+ return spi == -ERANGE;
+ /* We allow overwriting existing unreferenced STACK_DYNPTR slots, see
+ * mark_stack_slots_dynptr which calls destroy_if_dynptr_stack_slot to
+ * ensure dynptr objects at the slots we are touching are completely
+ * destructed before we reinitialize them for a new one. For referenced
+ * ones, destroy_if_dynptr_stack_slot returns an error early instead of
+ * delaying it until the end where the user will get "Unreleased
+ * reference" error.
+ */
return true;
}
-static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ int spi)
{
struct bpf_func_state *state = func(env, reg);
- int spi;
int i;
/* This already represents first slot of initialized bpf_dynptr */
if (reg->type == CONST_PTR_TO_DYNPTR)
return true;
- spi = get_spi(reg->off);
- if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
- !state->stack[spi].spilled_ptr.dynptr.first_slot)
+ if (spi < 0)
+ return false;
+ if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
return false;
for (i = 0; i < BPF_REG_SIZE; i++) {
@@ -868,7 +1017,9 @@ static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg
if (reg->type == CONST_PTR_TO_DYNPTR) {
return reg->dynptr.type == dynptr_type;
} else {
- spi = get_spi(reg->off);
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0)
+ return false;
return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
}
}
@@ -931,6 +1082,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
verbose_a("id=%d", reg->id);
if (reg->ref_obj_id)
verbose_a("ref_obj_id=%d", reg->ref_obj_id);
+ if (type_is_non_owning_ref(reg->type))
+ verbose_a("%s", "non_own_ref");
if (t != SCALAR_VALUE)
verbose_a("off=%d", reg->off);
if (type_is_pkt_pointer(t))
@@ -1404,9 +1557,11 @@ static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
*/
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
- /* Clear id, off, and union(map_ptr, range) */
+ /* Clear off and union(map_ptr, range) */
memset(((u8 *)reg) + sizeof(reg->type), 0,
offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
+ reg->id = 0;
+ reg->ref_obj_id = 0;
___mark_reg_known(reg, imm);
}
@@ -1447,7 +1602,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
}
static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
- bool first_slot)
+ bool first_slot, int dynptr_id)
{
/* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
* callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
@@ -1455,6 +1610,8 @@ static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type ty
*/
__mark_reg_known_zero(reg);
reg->type = CONST_PTR_TO_DYNPTR;
+ /* Give each dynptr a unique id to uniquely associate slices to it. */
+ reg->id = dynptr_id;
reg->dynptr.type = type;
reg->dynptr.first_slot = first_slot;
}
@@ -1486,6 +1643,16 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
reg->type &= ~PTR_MAYBE_NULL;
}
+static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
+ struct btf_field_graph_root *ds_head)
+{
+ __mark_reg_known_zero(&regs[regno]);
+ regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
+ regs[regno].btf = ds_head->btf;
+ regs[regno].btf_id = ds_head->value_btf_id;
+ regs[regno].off = ds_head->node_offset;
+}
+
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
{
return type_is_pkt_pointer(reg->type);
@@ -1752,11 +1919,13 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
{
/*
- * Clear type, id, off, and union(map_ptr, range) and
+ * Clear type, off, and union(map_ptr, range) and
* padding between 'type' and union
*/
memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
reg->type = SCALAR_VALUE;
+ reg->id = 0;
+ reg->ref_obj_id = 0;
reg->var_off = tnum_unknown;
reg->frameno = 0;
reg->precise = !env->bpf_capable;
@@ -2185,6 +2354,12 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
return -EINVAL;
}
+ if (bpf_dev_bound_kfunc_id(func_id)) {
+ err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
+ if (err)
+ return err;
+ }
+
desc = &tab->descs[tab->nr_descs++];
desc->func_id = func_id;
desc->imm = call_imm;
@@ -2386,6 +2561,32 @@ static int mark_reg_read(struct bpf_verifier_env *env,
return 0;
}
+static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ struct bpf_func_state *state = func(env, reg);
+ int spi, ret;
+
+ /* For CONST_PTR_TO_DYNPTR, it must have already been done by
+ * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
+ * check_kfunc_call.
+ */
+ if (reg->type == CONST_PTR_TO_DYNPTR)
+ return 0;
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
+ /* Caller ensures dynptr is valid and initialized, which means spi is in
+ * bounds and spi is the first dynptr slot. Simply mark stack slot as
+ * read.
+ */
+ ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
+ state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
+ if (ret)
+ return ret;
+ return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
+ state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
+}
+
/* This function is supposed to be used by the following 32-bit optimization
* code only. It returns TRUE if the source or destination register operates
* on 64-bit, otherwise return FALSE.
@@ -3272,6 +3473,11 @@ static void save_register_state(struct bpf_func_state *state,
scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
}
+static bool is_bpf_st_mem(struct bpf_insn *insn)
+{
+ return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
+}
+
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
@@ -3283,8 +3489,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
{
struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
- u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
+ struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
struct bpf_reg_state *reg = NULL;
+ u32 dst_reg = insn->dst_reg;
err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
if (err)
@@ -3318,6 +3525,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
}
+ err = destroy_if_dynptr_stack_slot(env, state, spi);
+ if (err)
+ return err;
+
mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
@@ -3333,6 +3544,13 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
return err;
}
save_register_state(state, spi, reg, size);
+ } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
+ insn->imm != 0 && env->bpf_capable) {
+ struct bpf_reg_state fake_reg = {};
+
+ __mark_reg_known(&fake_reg, (u32)insn->imm);
+ fake_reg.type = SCALAR_VALUE;
+ save_register_state(state, spi, &fake_reg, size);
} else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
@@ -3367,7 +3585,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
/* when we zero initialize stack slots mark them as such */
- if (reg && register_is_null(reg)) {
+ if ((reg && register_is_null(reg)) ||
+ (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
/* backtracking doesn't work for STACK_ZERO yet. */
err = mark_chain_precision(env, value_regno);
if (err)
@@ -3412,6 +3631,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
int min_off, max_off;
int i, err;
struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
+ struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
bool writing_zero = false;
/* set if the fact that we're writing a zero is used to let any
* stack slots remain STACK_ZERO
@@ -3424,13 +3644,22 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
max_off = ptr_reg->smax_value + off + size;
if (value_regno >= 0)
value_reg = &cur->regs[value_regno];
- if (value_reg && register_is_null(value_reg))
+ if ((value_reg && register_is_null(value_reg)) ||
+ (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
writing_zero = true;
err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
if (err)
return err;
+ for (i = min_off; i < max_off; i++) {
+ int spi;
+
+ spi = __get_spi(i);
+ err = destroy_if_dynptr_stack_slot(env, state, spi);
+ if (err)
+ return err;
+ }
/* Variable offset writes destroy any spilled pointers in range. */
for (i = min_off; i < max_off; i++) {
@@ -4770,6 +4999,25 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
return 0;
}
+#define BTF_TYPE_SAFE_NESTED(__type) __PASTE(__type, __safe_fields)
+
+BTF_TYPE_SAFE_NESTED(struct task_struct) {
+ const cpumask_t *cpus_ptr;
+};
+
+static bool nested_ptr_is_trusted(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg,
+ int off)
+{
+ /* If its parent is not trusted, it can't regain its trusted status. */
+ if (!is_trusted_reg(reg))
+ return false;
+
+ BTF_TYPE_EMIT(BTF_TYPE_SAFE_NESTED(struct task_struct));
+
+ return btf_nested_type_is_trusted(&env->log, reg, off);
+}
+
static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
struct bpf_reg_state *regs,
int regno, int off, int size,
@@ -4841,7 +5089,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
return -EACCES;
}
- if (type_is_alloc(reg->type) && !reg->ref_obj_id) {
+ if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
+ !reg->ref_obj_id) {
verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
return -EFAULT;
}
@@ -4858,10 +5107,17 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
if (type_flag(reg->type) & PTR_UNTRUSTED)
flag |= PTR_UNTRUSTED;
- /* By default any pointer obtained from walking a trusted pointer is
- * no longer trusted except the rcu case below.
+ /* By default any pointer obtained from walking a trusted pointer is no
+ * longer trusted, unless the field being accessed has explicitly been
+ * marked as inheriting its parent's state of trust.
+ *
+ * An RCU-protected pointer can also be deemed trusted if we are in an
+ * RCU read region. This case is handled below.
*/
- flag &= ~PTR_TRUSTED;
+ if (nested_ptr_is_trusted(env, reg, off))
+ flag |= PTR_TRUSTED;
+ else
+ flag &= ~PTR_TRUSTED;
if (flag & MEM_RCU) {
/* Mark value register as MEM_RCU only if it is protected by
@@ -5458,6 +5714,31 @@ static int check_stack_range_initialized(
}
if (meta && meta->raw_mode) {
+ /* Ensure we won't be overwriting dynptrs when simulating byte
+ * by byte access in check_helper_call using meta.access_size.
+ * This would be a problem if we have a helper in the future
+ * which takes:
+ *
+ * helper(uninit_mem, len, dynptr)
+ *
+ * Now, uninint_mem may overlap with dynptr pointer. Hence, it
+ * may end up writing to dynptr itself when touching memory from
+ * arg 1. This can be relaxed on a case by case basis for known
+ * safe cases, but reject due to the possibilitiy of aliasing by
+ * default.
+ */
+ for (i = min_off; i < max_off + access_size; i++) {
+ int stack_off = -i - 1;
+
+ spi = __get_spi(i);
+ /* raw_mode may write past allocated_stack */
+ if (state->allocated_stack <= stack_off)
+ continue;
+ if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
+ verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
+ return -EACCES;
+ }
+ }
meta->access_size = access_size;
meta->regno = regno;
return 0;
@@ -5799,9 +6080,7 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
cur->active_lock.ptr = btf;
cur->active_lock.id = reg->id;
} else {
- struct bpf_func_state *fstate = cur_func(env);
void *ptr;
- int i;
if (map)
ptr = map;
@@ -5817,25 +6096,11 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
verbose(env, "bpf_spin_unlock of different lock\n");
return -EINVAL;
}
- cur->active_lock.ptr = NULL;
- cur->active_lock.id = 0;
- for (i = fstate->acquired_refs - 1; i >= 0; i--) {
- int err;
+ invalidate_non_owning_refs(env);
- /* Complain on error because this reference state cannot
- * be freed before this point, as bpf_spin_lock critical
- * section does not allow functions that release the
- * allocated object immediately.
- */
- if (!fstate->refs[i].release_on_unlock)
- continue;
- err = release_reference(env, fstate->refs[i].id);
- if (err) {
- verbose(env, "failed to release release_on_unlock reference");
- return err;
- }
- }
+ cur->active_lock.ptr = NULL;
+ cur->active_lock.id = 0;
}
return 0;
}
@@ -5945,6 +6210,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
+ int spi = 0;
/* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
* ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
@@ -5955,12 +6221,14 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
}
/* CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
* check_func_arg_reg_off's logic. We only need to check offset
- * alignment for PTR_TO_STACK.
+ * and its alignment for PTR_TO_STACK.
*/
- if (reg->type == PTR_TO_STACK && (reg->off % BPF_REG_SIZE)) {
- verbose(env, "cannot pass in dynptr at an offset=%d\n", reg->off);
- return -EINVAL;
+ if (reg->type == PTR_TO_STACK) {
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0 && spi != -ERANGE)
+ return spi;
}
+
/* MEM_UNINIT - Points to memory that is an appropriate candidate for
* constructing a mutable bpf_dynptr object.
*
@@ -5977,7 +6245,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
* to.
*/
if (arg_type & MEM_UNINIT) {
- if (!is_dynptr_reg_valid_uninit(env, reg)) {
+ if (!is_dynptr_reg_valid_uninit(env, reg, spi)) {
verbose(env, "Dynptr has to be an uninitialized dynptr\n");
return -EINVAL;
}
@@ -5992,13 +6260,15 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
meta->uninit_dynptr_regno = regno;
} else /* MEM_RDONLY and None case from above */ {
+ int err;
+
/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
return -EINVAL;
}
- if (!is_dynptr_reg_valid_init(env, reg)) {
+ if (!is_dynptr_reg_valid_init(env, reg, spi)) {
verbose(env,
"Expected an initialized dynptr as arg #%d\n",
regno);
@@ -6025,6 +6295,10 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
err_extra, regno);
return -EINVAL;
}
+
+ err = mark_dynptr_read(env, reg);
+ if (err)
+ return err;
}
return 0;
}
@@ -6294,6 +6568,23 @@ found:
return 0;
}
+static struct btf_field *
+reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
+{
+ struct btf_field *field;
+ struct btf_record *rec;
+
+ rec = reg_btf_record(reg);
+ if (!rec)
+ return NULL;
+
+ field = btf_record_find(rec, off, fields);
+ if (!field)
+ return NULL;
+
+ return field;
+}
+
int check_func_arg_reg_off(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno,
enum bpf_arg_type arg_type)
@@ -6315,6 +6606,18 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
*/
if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
return 0;
+
+ if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) {
+ if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT))
+ return __check_ptr_off_reg(env, reg, regno, true);
+
+ verbose(env, "R%d must have zero offset when passed to release func\n",
+ regno);
+ verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno,
+ kernel_type_name(reg->btf, reg->btf_id), reg->off);
+ return -EINVAL;
+ }
+
/* Doing check_ptr_off_reg check for the offset will catch this
* because fixed_off_ok is false, but checking here allows us
* to give the user a better error message.
@@ -6349,6 +6652,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
case PTR_TO_BTF_ID | PTR_TRUSTED:
case PTR_TO_BTF_ID | MEM_RCU:
case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED:
+ case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
/* When referenced PTR_TO_BTF_ID is passed to release function,
* its fixed offset must be 0. In the other cases, fixed offset
* can be non-zero. This was already checked above. So pass
@@ -6362,15 +6666,29 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
}
}
-static u32 dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
struct bpf_func_state *state = func(env, reg);
int spi;
if (reg->type == CONST_PTR_TO_DYNPTR)
- return reg->ref_obj_id;
+ return reg->id;
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
+ return state->stack[spi].spilled_ptr.id;
+}
- spi = get_spi(reg->off);
+static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ struct bpf_func_state *state = func(env, reg);
+ int spi;
+
+ if (reg->type == CONST_PTR_TO_DYNPTR)
+ return reg->ref_obj_id;
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
return state->stack[spi].spilled_ptr.ref_obj_id;
}
@@ -6444,9 +6762,8 @@ skip_type_check:
* PTR_TO_STACK.
*/
if (reg->type == PTR_TO_STACK) {
- spi = get_spi(reg->off);
- if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
- !state->stack[spi].spilled_ptr.ref_obj_id) {
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) {
verbose(env, "arg %d is an unacquired reference\n", regno);
return -EINVAL;
}
@@ -6547,6 +6864,10 @@ skip_type_check:
meta->ret_btf_id = reg->btf_id;
break;
case ARG_PTR_TO_SPIN_LOCK:
+ if (in_rbtree_lock_required_cb(env)) {
+ verbose(env, "can't spin_{lock,unlock} in rbtree cb\n");
+ return -EACCES;
+ }
if (meta->func_id == BPF_FUNC_spin_lock) {
err = process_spin_lock(env, regno, true);
if (err)
@@ -7098,6 +7419,17 @@ static int release_reference(struct bpf_verifier_env *env,
return 0;
}
+static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
+{
+ struct bpf_func_state *unused;
+ struct bpf_reg_state *reg;
+
+ bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
+ if (type_is_non_owning_ref(reg->type))
+ __mark_reg_unknown(env, reg);
+ }));
+}
+
static void clear_caller_saved_regs(struct bpf_verifier_env *env,
struct bpf_reg_state *regs)
{
@@ -7119,6 +7451,8 @@ static int set_callee_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee, int insn_idx);
+static bool is_callback_calling_kfunc(u32 btf_id);
+
static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx, int subprog,
set_callee_state_fn set_callee_state_cb)
@@ -7173,10 +7507,18 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
* interested in validating only BPF helpers that can call subprogs as
* callbacks
*/
- if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) {
- verbose(env, "verifier bug: helper %s#%d is not marked as callback-calling\n",
- func_id_name(insn->imm), insn->imm);
- return -EFAULT;
+ if (set_callee_state_cb != set_callee_state) {
+ if (bpf_pseudo_kfunc_call(insn) &&
+ !is_callback_calling_kfunc(insn->imm)) {
+ verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
+ func_id_name(insn->imm), insn->imm);
+ return -EFAULT;
+ } else if (!bpf_pseudo_kfunc_call(insn) &&
+ !is_callback_calling_function(insn->imm)) { /* helper */
+ verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
+ func_id_name(insn->imm), insn->imm);
+ return -EFAULT;
+ }
}
if (insn->code == (BPF_JMP | BPF_CALL) &&
@@ -7428,7 +7770,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
* callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
*/
__mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
- mark_dynptr_cb_reg(&callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
+ mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
/* unused */
@@ -7441,6 +7783,63 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
return 0;
}
+static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee,
+ int insn_idx)
+{
+ /* void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
+ * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
+ *
+ * 'struct bpf_rb_node *node' arg to bpf_rbtree_add is the same PTR_TO_BTF_ID w/ offset
+ * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd
+ * by this point, so look at 'root'
+ */
+ struct btf_field *field;
+
+ field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off,
+ BPF_RB_ROOT);
+ if (!field || !field->graph_root.value_btf_id)
+ return -EFAULT;
+
+ mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root);
+ ref_set_non_owning(env, &callee->regs[BPF_REG_1]);
+ mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root);
+ ref_set_non_owning(env, &callee->regs[BPF_REG_2]);
+
+ __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
+ __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
+ __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
+ callee->in_callback_fn = true;
+ callee->callback_ret_range = tnum_range(0, 1);
+ return 0;
+}
+
+static bool is_rbtree_lock_required_kfunc(u32 btf_id);
+
+/* Are we currently verifying the callback for a rbtree helper that must
+ * be called with lock held? If so, no need to complain about unreleased
+ * lock
+ */
+static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_insn *insn = env->prog->insnsi;
+ struct bpf_func_state *callee;
+ int kfunc_btf_id;
+
+ if (!state->curframe)
+ return false;
+
+ callee = state->frame[state->curframe];
+
+ if (!callee->in_callback_fn)
+ return false;
+
+ kfunc_btf_id = insn[callee->callsite].imm;
+ return is_rbtree_lock_required_kfunc(kfunc_btf_id);
+}
+
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
@@ -7633,6 +8032,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
struct bpf_map *fmt_map = fmt_reg->map_ptr;
+ struct bpf_bprintf_data data = {};
int err, fmt_map_off, num_args;
u64 fmt_addr;
char *fmt;
@@ -7657,7 +8057,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
* can focus on validating the format specifiers.
*/
- err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
+ err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data);
if (err < 0)
verbose(env, "Invalid format string\n");
@@ -7933,13 +8333,32 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
if (arg_type_is_dynptr(fn->arg_type[i])) {
struct bpf_reg_state *reg = &regs[BPF_REG_1 + i];
+ int id, ref_obj_id;
+
+ if (meta.dynptr_id) {
+ verbose(env, "verifier internal error: meta.dynptr_id already set\n");
+ return -EFAULT;
+ }
if (meta.ref_obj_id) {
verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
return -EFAULT;
}
- meta.ref_obj_id = dynptr_ref_obj_id(env, reg);
+ id = dynptr_id(env, reg);
+ if (id < 0) {
+ verbose(env, "verifier internal error: failed to obtain dynptr id\n");
+ return id;
+ }
+
+ ref_obj_id = dynptr_ref_obj_id(env, reg);
+ if (ref_obj_id < 0) {
+ verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
+ return ref_obj_id;
+ }
+
+ meta.dynptr_id = id;
+ meta.ref_obj_id = ref_obj_id;
break;
}
}
@@ -8095,6 +8514,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EFAULT;
}
+ if (is_dynptr_ref_function(func_id))
+ regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
+
if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
@@ -8186,6 +8608,7 @@ struct bpf_kfunc_call_arg_meta {
bool r0_rdonly;
u32 ret_btf_id;
u64 r0_size;
+ u32 subprogno;
struct {
u64 value;
bool found;
@@ -8197,6 +8620,9 @@ struct bpf_kfunc_call_arg_meta {
struct {
struct btf_field *field;
} arg_list_head;
+ struct {
+ struct btf_field *field;
+ } arg_rbtree_root;
};
static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
@@ -8308,12 +8734,16 @@ enum {
KF_ARG_DYNPTR_ID,
KF_ARG_LIST_HEAD_ID,
KF_ARG_LIST_NODE_ID,
+ KF_ARG_RB_ROOT_ID,
+ KF_ARG_RB_NODE_ID,
};
BTF_ID_LIST(kf_arg_btf_ids)
BTF_ID(struct, bpf_dynptr_kern)
BTF_ID(struct, bpf_list_head)
BTF_ID(struct, bpf_list_node)
+BTF_ID(struct, bpf_rb_root)
+BTF_ID(struct, bpf_rb_node)
static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
const struct btf_param *arg, int type)
@@ -8347,6 +8777,28 @@ static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param
return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
}
+static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
+{
+ return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID);
+}
+
+static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
+{
+ return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID);
+}
+
+static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
+ const struct btf_param *arg)
+{
+ const struct btf_type *t;
+
+ t = btf_type_resolve_func_ptr(btf, arg->type, NULL);
+ if (!t)
+ return false;
+
+ return true;
+}
+
/* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
const struct btf *btf,
@@ -8406,6 +8858,9 @@ enum kfunc_ptr_arg_type {
KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */
KF_ARG_PTR_TO_MEM,
KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */
+ KF_ARG_PTR_TO_CALLBACK,
+ KF_ARG_PTR_TO_RB_ROOT,
+ KF_ARG_PTR_TO_RB_NODE,
};
enum special_kfunc_type {
@@ -8419,6 +8874,9 @@ enum special_kfunc_type {
KF_bpf_rdonly_cast,
KF_bpf_rcu_read_lock,
KF_bpf_rcu_read_unlock,
+ KF_bpf_rbtree_remove,
+ KF_bpf_rbtree_add,
+ KF_bpf_rbtree_first,
};
BTF_SET_START(special_kfunc_set)
@@ -8430,6 +8888,9 @@ BTF_ID(func, bpf_list_pop_front)
BTF_ID(func, bpf_list_pop_back)
BTF_ID(func, bpf_cast_to_kern_ctx)
BTF_ID(func, bpf_rdonly_cast)
+BTF_ID(func, bpf_rbtree_remove)
+BTF_ID(func, bpf_rbtree_add)
+BTF_ID(func, bpf_rbtree_first)
BTF_SET_END(special_kfunc_set)
BTF_ID_LIST(special_kfunc_list)
@@ -8443,6 +8904,9 @@ BTF_ID(func, bpf_cast_to_kern_ctx)
BTF_ID(func, bpf_rdonly_cast)
BTF_ID(func, bpf_rcu_read_lock)
BTF_ID(func, bpf_rcu_read_unlock)
+BTF_ID(func, bpf_rbtree_remove)
+BTF_ID(func, bpf_rbtree_add)
+BTF_ID(func, bpf_rbtree_first)
static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
{
@@ -8504,6 +8968,12 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
return KF_ARG_PTR_TO_LIST_NODE;
+ if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno]))
+ return KF_ARG_PTR_TO_RB_ROOT;
+
+ if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno]))
+ return KF_ARG_PTR_TO_RB_NODE;
+
if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
if (!btf_type_is_struct(ref_t)) {
verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
@@ -8513,6 +8983,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
return KF_ARG_PTR_TO_BTF_ID;
}
+ if (is_kfunc_arg_callback(env, meta->btf, &args[argno]))
+ return KF_ARG_PTR_TO_CALLBACK;
+
if (argno + 1 < nargs && is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]))
arg_mem_size = true;
@@ -8551,9 +9024,37 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
reg_ref_id = *reg2btf_ids[base_type(reg->type)];
}
- if (is_kfunc_trusted_args(meta) || (is_kfunc_release(meta) && reg->ref_obj_id))
+ /* Enforce strict type matching for calls to kfuncs that are acquiring
+ * or releasing a reference, or are no-cast aliases. We do _not_
+ * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default,
+ * as we want to enable BPF programs to pass types that are bitwise
+ * equivalent without forcing them to explicitly cast with something
+ * like bpf_cast_to_kern_ctx().
+ *
+ * For example, say we had a type like the following:
+ *
+ * struct bpf_cpumask {
+ * cpumask_t cpumask;
+ * refcount_t usage;
+ * };
+ *
+ * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed
+ * to a struct cpumask, so it would be safe to pass a struct
+ * bpf_cpumask * to a kfunc expecting a struct cpumask *.
+ *
+ * The philosophy here is similar to how we allow scalars of different
+ * types to be passed to kfuncs as long as the size is the same. The
+ * only difference here is that we're simply allowing
+ * btf_struct_ids_match() to walk the struct at the 0th offset, and
+ * resolve types.
+ */
+ if (is_kfunc_acquire(meta) ||
+ (is_kfunc_release(meta) && reg->ref_obj_id) ||
+ btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
strict_type_match = true;
+ WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
+
reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
@@ -8599,38 +9100,54 @@ static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env,
return 0;
}
-static int ref_set_release_on_unlock(struct bpf_verifier_env *env, u32 ref_obj_id)
+static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
- struct bpf_func_state *state = cur_func(env);
+ struct bpf_verifier_state *state = env->cur_state;
+
+ if (!state->active_lock.ptr) {
+ verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
+ return -EFAULT;
+ }
+
+ if (type_flag(reg->type) & NON_OWN_REF) {
+ verbose(env, "verifier internal error: NON_OWN_REF already set\n");
+ return -EFAULT;
+ }
+
+ reg->type |= NON_OWN_REF;
+ return 0;
+}
+
+static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
+{
+ struct bpf_func_state *state, *unused;
struct bpf_reg_state *reg;
int i;
- /* bpf_spin_lock only allows calling list_push and list_pop, no BPF
- * subprogs, no global functions. This means that the references would
- * not be released inside the critical section but they may be added to
- * the reference state, and the acquired_refs are never copied out for a
- * different frame as BPF to BPF calls don't work in bpf_spin_lock
- * critical sections.
- */
+ state = cur_func(env);
+
if (!ref_obj_id) {
- verbose(env, "verifier internal error: ref_obj_id is zero for release_on_unlock\n");
+ verbose(env, "verifier internal error: ref_obj_id is zero for "
+ "owning -> non-owning conversion\n");
return -EFAULT;
}
+
for (i = 0; i < state->acquired_refs; i++) {
- if (state->refs[i].id == ref_obj_id) {
- if (state->refs[i].release_on_unlock) {
- verbose(env, "verifier internal error: expected false release_on_unlock");
- return -EFAULT;
+ if (state->refs[i].id != ref_obj_id)
+ continue;
+
+ /* Clear ref_obj_id here so release_reference doesn't clobber
+ * the whole reg
+ */
+ bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
+ if (reg->ref_obj_id == ref_obj_id) {
+ reg->ref_obj_id = 0;
+ ref_set_non_owning(env, reg);
}
- state->refs[i].release_on_unlock = true;
- /* Now mark everyone sharing same ref_obj_id as untrusted */
- bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
- if (reg->ref_obj_id == ref_obj_id)
- reg->type |= PTR_UNTRUSTED;
- }));
- return 0;
- }
+ }));
+ return 0;
}
+
verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
return -EFAULT;
}
@@ -8716,101 +9233,226 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
btf_id == special_kfunc_list[KF_bpf_list_pop_back];
}
-static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, u32 regno,
- struct bpf_kfunc_call_arg_meta *meta)
+static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
+{
+ return btf_id == special_kfunc_list[KF_bpf_rbtree_add] ||
+ btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
+ btf_id == special_kfunc_list[KF_bpf_rbtree_first];
+}
+
+static bool is_bpf_graph_api_kfunc(u32 btf_id)
+{
+ return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id);
+}
+
+static bool is_callback_calling_kfunc(u32 btf_id)
+{
+ return btf_id == special_kfunc_list[KF_bpf_rbtree_add];
+}
+
+static bool is_rbtree_lock_required_kfunc(u32 btf_id)
+{
+ return is_bpf_rbtree_api_kfunc(btf_id);
+}
+
+static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
+ enum btf_field_type head_field_type,
+ u32 kfunc_btf_id)
+{
+ bool ret;
+
+ switch (head_field_type) {
+ case BPF_LIST_HEAD:
+ ret = is_bpf_list_api_kfunc(kfunc_btf_id);
+ break;
+ case BPF_RB_ROOT:
+ ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
+ break;
+ default:
+ verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
+ btf_field_type_name(head_field_type));
+ return false;
+ }
+
+ if (!ret)
+ verbose(env, "verifier internal error: %s head arg for unknown kfunc\n",
+ btf_field_type_name(head_field_type));
+ return ret;
+}
+
+static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
+ enum btf_field_type node_field_type,
+ u32 kfunc_btf_id)
{
+ bool ret;
+
+ switch (node_field_type) {
+ case BPF_LIST_NODE:
+ ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
+ kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back]);
+ break;
+ case BPF_RB_NODE:
+ ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
+ kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add]);
+ break;
+ default:
+ verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
+ btf_field_type_name(node_field_type));
+ return false;
+ }
+
+ if (!ret)
+ verbose(env, "verifier internal error: %s node arg for unknown kfunc\n",
+ btf_field_type_name(node_field_type));
+ return ret;
+}
+
+static int
+__process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, u32 regno,
+ struct bpf_kfunc_call_arg_meta *meta,
+ enum btf_field_type head_field_type,
+ struct btf_field **head_field)
+{
+ const char *head_type_name;
struct btf_field *field;
struct btf_record *rec;
- u32 list_head_off;
+ u32 head_off;
- if (meta->btf != btf_vmlinux || !is_bpf_list_api_kfunc(meta->func_id)) {
- verbose(env, "verifier internal error: bpf_list_head argument for unknown kfunc\n");
+ if (meta->btf != btf_vmlinux) {
+ verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
return -EFAULT;
}
+ if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id))
+ return -EFAULT;
+
+ head_type_name = btf_field_type_name(head_field_type);
if (!tnum_is_const(reg->var_off)) {
verbose(env,
- "R%d doesn't have constant offset. bpf_list_head has to be at the constant offset\n",
- regno);
+ "R%d doesn't have constant offset. %s has to be at the constant offset\n",
+ regno, head_type_name);
return -EINVAL;
}
rec = reg_btf_record(reg);
- list_head_off = reg->off + reg->var_off.value;
- field = btf_record_find(rec, list_head_off, BPF_LIST_HEAD);
+ head_off = reg->off + reg->var_off.value;
+ field = btf_record_find(rec, head_off, head_field_type);
if (!field) {
- verbose(env, "bpf_list_head not found at offset=%u\n", list_head_off);
+ verbose(env, "%s not found at offset=%u\n", head_type_name, head_off);
return -EINVAL;
}
/* All functions require bpf_list_head to be protected using a bpf_spin_lock */
if (check_reg_allocation_locked(env, reg)) {
- verbose(env, "bpf_spin_lock at off=%d must be held for bpf_list_head\n",
- rec->spin_lock_off);
+ verbose(env, "bpf_spin_lock at off=%d must be held for %s\n",
+ rec->spin_lock_off, head_type_name);
return -EINVAL;
}
- if (meta->arg_list_head.field) {
- verbose(env, "verifier internal error: repeating bpf_list_head arg\n");
+ if (*head_field) {
+ verbose(env, "verifier internal error: repeating %s arg\n", head_type_name);
return -EFAULT;
}
- meta->arg_list_head.field = field;
+ *head_field = field;
return 0;
}
-static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
+static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno,
struct bpf_kfunc_call_arg_meta *meta)
{
+ return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD,
+ &meta->arg_list_head.field);
+}
+
+static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, u32 regno,
+ struct bpf_kfunc_call_arg_meta *meta)
+{
+ return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT,
+ &meta->arg_rbtree_root.field);
+}
+
+static int
+__process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, u32 regno,
+ struct bpf_kfunc_call_arg_meta *meta,
+ enum btf_field_type head_field_type,
+ enum btf_field_type node_field_type,
+ struct btf_field **node_field)
+{
+ const char *node_type_name;
const struct btf_type *et, *t;
struct btf_field *field;
- struct btf_record *rec;
- u32 list_node_off;
+ u32 node_off;
- if (meta->btf != btf_vmlinux ||
- (meta->func_id != special_kfunc_list[KF_bpf_list_push_front] &&
- meta->func_id != special_kfunc_list[KF_bpf_list_push_back])) {
- verbose(env, "verifier internal error: bpf_list_node argument for unknown kfunc\n");
+ if (meta->btf != btf_vmlinux) {
+ verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
return -EFAULT;
}
+ if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id))
+ return -EFAULT;
+
+ node_type_name = btf_field_type_name(node_field_type);
if (!tnum_is_const(reg->var_off)) {
verbose(env,
- "R%d doesn't have constant offset. bpf_list_node has to be at the constant offset\n",
- regno);
+ "R%d doesn't have constant offset. %s has to be at the constant offset\n",
+ regno, node_type_name);
return -EINVAL;
}
- rec = reg_btf_record(reg);
- list_node_off = reg->off + reg->var_off.value;
- field = btf_record_find(rec, list_node_off, BPF_LIST_NODE);
- if (!field || field->offset != list_node_off) {
- verbose(env, "bpf_list_node not found at offset=%u\n", list_node_off);
+ node_off = reg->off + reg->var_off.value;
+ field = reg_find_field_offset(reg, node_off, node_field_type);
+ if (!field || field->offset != node_off) {
+ verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
return -EINVAL;
}
- field = meta->arg_list_head.field;
+ field = *node_field;
- et = btf_type_by_id(field->list_head.btf, field->list_head.value_btf_id);
+ et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
t = btf_type_by_id(reg->btf, reg->btf_id);
- if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->list_head.btf,
- field->list_head.value_btf_id, true)) {
- verbose(env, "operation on bpf_list_head expects arg#1 bpf_list_node at offset=%d "
+ if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
+ field->graph_root.value_btf_id, true)) {
+ verbose(env, "operation on %s expects arg#1 %s at offset=%d "
"in struct %s, but arg is at offset=%d in struct %s\n",
- field->list_head.node_offset, btf_name_by_offset(field->list_head.btf, et->name_off),
- list_node_off, btf_name_by_offset(reg->btf, t->name_off));
+ btf_field_type_name(head_field_type),
+ btf_field_type_name(node_field_type),
+ field->graph_root.node_offset,
+ btf_name_by_offset(field->graph_root.btf, et->name_off),
+ node_off, btf_name_by_offset(reg->btf, t->name_off));
return -EINVAL;
}
- if (list_node_off != field->list_head.node_offset) {
- verbose(env, "arg#1 offset=%d, but expected bpf_list_node at offset=%d in struct %s\n",
- list_node_off, field->list_head.node_offset,
- btf_name_by_offset(field->list_head.btf, et->name_off));
+ if (node_off != field->graph_root.node_offset) {
+ verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
+ node_off, btf_field_type_name(node_field_type),
+ field->graph_root.node_offset,
+ btf_name_by_offset(field->graph_root.btf, et->name_off));
return -EINVAL;
}
- /* Set arg#1 for expiration after unlock */
- return ref_set_release_on_unlock(env, reg->ref_obj_id);
+
+ return 0;
+}
+
+static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, u32 regno,
+ struct bpf_kfunc_call_arg_meta *meta)
+{
+ return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
+ BPF_LIST_HEAD, BPF_LIST_NODE,
+ &meta->arg_list_head.field);
+}
+
+static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, u32 regno,
+ struct bpf_kfunc_call_arg_meta *meta)
+{
+ return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
+ BPF_RB_ROOT, BPF_RB_NODE,
+ &meta->arg_rbtree_root.field);
}
static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)
@@ -8896,6 +9538,12 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EINVAL;
}
+ if (is_kfunc_trusted_args(meta) &&
+ (register_is_null(reg) || type_may_be_null(reg->type))) {
+ verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
+ return -EACCES;
+ }
+
if (reg->ref_obj_id) {
if (is_kfunc_release(meta) && meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
@@ -8941,8 +9589,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_DYNPTR:
case KF_ARG_PTR_TO_LIST_HEAD:
case KF_ARG_PTR_TO_LIST_NODE:
+ case KF_ARG_PTR_TO_RB_ROOT:
+ case KF_ARG_PTR_TO_RB_NODE:
case KF_ARG_PTR_TO_MEM:
case KF_ARG_PTR_TO_MEM_SIZE:
+ case KF_ARG_PTR_TO_CALLBACK:
/* Trusted by default */
break;
default:
@@ -9019,6 +9670,20 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (ret < 0)
return ret;
break;
+ case KF_ARG_PTR_TO_RB_ROOT:
+ if (reg->type != PTR_TO_MAP_VALUE &&
+ reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
+ verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
+ return -EINVAL;
+ }
+ if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
+ verbose(env, "allocated object must be referenced\n");
+ return -EINVAL;
+ }
+ ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta);
+ if (ret < 0)
+ return ret;
+ break;
case KF_ARG_PTR_TO_LIST_NODE:
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
verbose(env, "arg#%d expected pointer to allocated object\n", i);
@@ -9032,6 +9697,31 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (ret < 0)
return ret;
break;
+ case KF_ARG_PTR_TO_RB_NODE:
+ if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) {
+ if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) {
+ verbose(env, "rbtree_remove node input must be non-owning ref\n");
+ return -EINVAL;
+ }
+ if (in_rbtree_lock_required_cb(env)) {
+ verbose(env, "rbtree_remove not allowed in rbtree cb\n");
+ return -EINVAL;
+ }
+ } else {
+ if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
+ verbose(env, "arg#%d expected pointer to allocated object\n", i);
+ return -EINVAL;
+ }
+ if (!reg->ref_obj_id) {
+ verbose(env, "allocated object must be referenced\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
+ if (ret < 0)
+ return ret;
+ break;
case KF_ARG_PTR_TO_BTF_ID:
/* Only base_type is checked, further checks are done here */
if ((base_type(reg->type) != PTR_TO_BTF_ID ||
@@ -9067,6 +9757,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
/* Skip next '__sz' argument */
i++;
break;
+ case KF_ARG_PTR_TO_CALLBACK:
+ meta->subprogno = reg->subprogno;
+ break;
}
}
@@ -9083,11 +9776,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
{
const struct btf_type *t, *func, *func_proto, *ptr_type;
+ u32 i, nargs, func_id, ptr_type_id, release_ref_obj_id;
struct bpf_reg_state *regs = cur_regs(env);
const char *func_name, *ptr_type_name;
bool sleepable, rcu_lock, rcu_unlock;
struct bpf_kfunc_call_arg_meta meta;
- u32 i, nargs, func_id, ptr_type_id;
int err, insn_idx = *insn_idx_p;
const struct btf_param *args;
const struct btf_type *ret_t;
@@ -9182,6 +9875,35 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
}
}
+ if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front] ||
+ meta.func_id == special_kfunc_list[KF_bpf_list_push_back] ||
+ meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
+ release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
+ err = ref_convert_owning_non_owning(env, release_ref_obj_id);
+ if (err) {
+ verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
+ func_name, func_id);
+ return err;
+ }
+
+ err = release_reference(env, release_ref_obj_id);
+ if (err) {
+ verbose(env, "kfunc %s#%d reference has not been acquired before\n",
+ func_name, func_id);
+ return err;
+ }
+ }
+
+ if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
+ err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+ set_rbtree_add_callback_state);
+ if (err) {
+ verbose(env, "kfunc %s#%d failed callback verification\n",
+ func_name, func_id);
+ return err;
+ }
+ }
+
for (i = 0; i < CALLER_SAVED_REGS; i++)
mark_reg_not_init(env, regs, caller_saved[i]);
@@ -9246,11 +9968,12 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
struct btf_field *field = meta.arg_list_head.field;
- mark_reg_known_zero(env, regs, BPF_REG_0);
- regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
- regs[BPF_REG_0].btf = field->list_head.btf;
- regs[BPF_REG_0].btf_id = field->list_head.value_btf_id;
- regs[BPF_REG_0].off = field->list_head.node_offset;
+ mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
+ } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
+ meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
+ struct btf_field *field = meta.arg_rbtree_root.field;
+
+ mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
@@ -9316,7 +10039,13 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (is_kfunc_ret_null(&meta))
regs[BPF_REG_0].id = id;
regs[BPF_REG_0].ref_obj_id = id;
+ } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
+ ref_set_non_owning(env, &regs[BPF_REG_0]);
}
+
+ if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove])
+ invalidate_non_owning_refs(env);
+
if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
regs[BPF_REG_0].id = ++env->id_gen;
} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
@@ -11502,8 +12231,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
*/
if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
return;
- if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL) && WARN_ON_ONCE(reg->off))
+ if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) &&
+ WARN_ON_ONCE(reg->off))
return;
+
if (is_null) {
reg->type = SCALAR_VALUE;
/* We don't need id and ref_obj_id from this point
@@ -12969,6 +13700,13 @@ static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
{
unsigned int i;
+ /* either both IDs should be set or both should be zero */
+ if (!!old_id != !!cur_id)
+ return false;
+
+ if (old_id == 0) /* cur_id == 0 as well */
+ return true;
+
for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
if (!idmap[i].old) {
/* Reached an empty slot; haven't seen this id before */
@@ -13080,79 +13818,74 @@ next:
}
}
+static bool regs_exact(const struct bpf_reg_state *rold,
+ const struct bpf_reg_state *rcur,
+ struct bpf_id_pair *idmap)
+{
+ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
+ check_ids(rold->id, rcur->id, idmap) &&
+ check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
+}
+
/* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
{
- bool equal;
-
if (!(rold->live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
-
- equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
-
if (rold->type == NOT_INIT)
/* explored state can't have used this */
return true;
if (rcur->type == NOT_INIT)
return false;
+
+ /* Enforce that register types have to match exactly, including their
+ * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
+ * rule.
+ *
+ * One can make a point that using a pointer register as unbounded
+ * SCALAR would be technically acceptable, but this could lead to
+ * pointer leaks because scalars are allowed to leak while pointers
+ * are not. We could make this safe in special cases if root is
+ * calling us, but it's probably not worth the hassle.
+ *
+ * Also, register types that are *not* MAYBE_NULL could technically be
+ * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE
+ * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point
+ * to the same map).
+ * However, if the old MAYBE_NULL register then got NULL checked,
+ * doing so could have affected others with the same id, and we can't
+ * check for that because we lost the id when we converted to
+ * a non-MAYBE_NULL variant.
+ * So, as a general rule we don't allow mixing MAYBE_NULL and
+ * non-MAYBE_NULL registers as well.
+ */
+ if (rold->type != rcur->type)
+ return false;
+
switch (base_type(rold->type)) {
case SCALAR_VALUE:
- if (equal)
+ if (regs_exact(rold, rcur, idmap))
return true;
if (env->explore_alu_limits)
return false;
- if (rcur->type == SCALAR_VALUE) {
- if (!rold->precise)
- return true;
- /* new val must satisfy old val knowledge */
- return range_within(rold, rcur) &&
- tnum_in(rold->var_off, rcur->var_off);
- } else {
- /* We're trying to use a pointer in place of a scalar.
- * Even if the scalar was unbounded, this could lead to
- * pointer leaks because scalars are allowed to leak
- * while pointers are not. We could make this safe in
- * special cases if root is calling us, but it's
- * probably not worth the hassle.
- */
- return false;
- }
+ if (!rold->precise)
+ return true;
+ /* new val must satisfy old val knowledge */
+ return range_within(rold, rcur) &&
+ tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_MAP_KEY:
case PTR_TO_MAP_VALUE:
- /* a PTR_TO_MAP_VALUE could be safe to use as a
- * PTR_TO_MAP_VALUE_OR_NULL into the same map.
- * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
- * checked, doing so could have affected others with the same
- * id, and we can't check for that because we lost the id when
- * we converted to a PTR_TO_MAP_VALUE.
- */
- if (type_may_be_null(rold->type)) {
- if (!type_may_be_null(rcur->type))
- return false;
- if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
- return false;
- /* Check our ids match any regs they're supposed to */
- return check_ids(rold->id, rcur->id, idmap);
- }
-
/* If the new min/max/var_off satisfy the old ones and
* everything else matches, we are OK.
- * 'id' is not compared, since it's only used for maps with
- * bpf_spin_lock inside map element and in such cases if
- * the rest of the prog is valid for one map element then
- * it's valid for all map elements regardless of the key
- * used in bpf_map_lookup()
*/
- return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
+ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off) &&
check_ids(rold->id, rcur->id, idmap);
case PTR_TO_PACKET_META:
case PTR_TO_PACKET:
- if (rcur->type != rold->type)
- return false;
/* We must have at least as much range as the old ptr
* did, so that any accesses which were safe before are
* still safe. This is true even if old range < old off,
@@ -13167,7 +13900,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
if (rold->off != rcur->off)
return false;
/* id relations must be preserved */
- if (rold->id && !check_ids(rold->id, rcur->id, idmap))
+ if (!check_ids(rold->id, rcur->id, idmap))
return false;
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
@@ -13176,15 +13909,10 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
/* two stack pointers are equal only if they're pointing to
* the same stack frame, since fp-8 in foo != fp-8 in bar
*/
- return equal && rold->frameno == rcur->frameno;
+ return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
default:
- /* Only valid matches are exact, which memcmp() */
- return equal;
+ return regs_exact(rold, rcur, idmap);
}
-
- /* Shouldn't get here; if we do, say it's not safe */
- WARN_ON_ONCE(1);
- return false;
}
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
@@ -13231,10 +13959,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
return false;
if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
continue;
- if (!is_spilled_reg(&old->stack[spi]))
- continue;
- if (!regsafe(env, &old->stack[spi].spilled_ptr,
- &cur->stack[spi].spilled_ptr, idmap))
+ /* Both old and cur are having same slot_type */
+ switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
+ case STACK_SPILL:
/* when explored and current stack slot are both storing
* spilled registers, check that stored pointers types
* are the same as well.
@@ -13245,17 +13972,48 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
* such verifier states are not equivalent.
* return false to continue verification of this path
*/
+ if (!regsafe(env, &old->stack[spi].spilled_ptr,
+ &cur->stack[spi].spilled_ptr, idmap))
+ return false;
+ break;
+ case STACK_DYNPTR:
+ {
+ const struct bpf_reg_state *old_reg, *cur_reg;
+
+ old_reg = &old->stack[spi].spilled_ptr;
+ cur_reg = &cur->stack[spi].spilled_ptr;
+ if (old_reg->dynptr.type != cur_reg->dynptr.type ||
+ old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
+ !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
+ return false;
+ break;
+ }
+ case STACK_MISC:
+ case STACK_ZERO:
+ case STACK_INVALID:
+ continue;
+ /* Ensure that new unhandled slot types return false by default */
+ default:
return false;
+ }
}
return true;
}
-static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
+static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
+ struct bpf_id_pair *idmap)
{
+ int i;
+
if (old->acquired_refs != cur->acquired_refs)
return false;
- return !memcmp(old->refs, cur->refs,
- sizeof(*old->refs) * old->acquired_refs);
+
+ for (i = 0; i < old->acquired_refs; i++) {
+ if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap))
+ return false;
+ }
+
+ return true;
}
/* compare two verifier states
@@ -13297,7 +14055,7 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
if (!stacksafe(env, old, cur, env->idmap_scratch))
return false;
- if (!refsafe(old, cur))
+ if (!refsafe(old, cur, env->idmap_scratch))
return false;
return true;
@@ -13834,7 +14592,7 @@ static int do_check(struct bpf_verifier_env *env)
env->prev_log_len = env->log.len_used;
}
- if (bpf_prog_is_dev_bound(env->prog->aux)) {
+ if (bpf_prog_is_offloaded(env->prog->aux)) {
err = bpf_prog_offload_verify_insn(env, env->insn_idx,
env->prev_insn_idx);
if (err)
@@ -13985,7 +14743,7 @@ static int do_check(struct bpf_verifier_env *env)
if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
(insn->src_reg == BPF_PSEUDO_CALL) ||
(insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
- (insn->off != 0 || !is_bpf_list_api_kfunc(insn->imm)))) {
+ (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
verbose(env, "function calls are not allowed while holding a lock\n");
return -EINVAL;
}
@@ -14021,7 +14779,8 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (env->cur_state->active_lock.ptr) {
+ if (env->cur_state->active_lock.ptr &&
+ !in_rbtree_lock_required_cb(env)) {
verbose(env, "bpf_spin_unlock is missing\n");
return -EINVAL;
}
@@ -14283,9 +15042,10 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
{
enum bpf_prog_type prog_type = resolve_prog_type(prog);
- if (btf_record_has_field(map->record, BPF_LIST_HEAD)) {
+ if (btf_record_has_field(map->record, BPF_LIST_HEAD) ||
+ btf_record_has_field(map->record, BPF_RB_ROOT)) {
if (is_tracing_prog_type(prog_type)) {
- verbose(env, "tracing progs cannot use bpf_list_head yet\n");
+ verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n");
return -EINVAL;
}
}
@@ -14314,7 +15074,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
}
}
- if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
+ if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
!bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
return -EINVAL;
@@ -14795,7 +15555,7 @@ static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
unsigned int orig_prog_len = env->prog->len;
int err;
- if (bpf_prog_is_dev_bound(env->prog->aux))
+ if (bpf_prog_is_offloaded(env->prog->aux))
bpf_prog_offload_remove_insns(env, off, cnt);
err = bpf_remove_insns(env->prog, off, cnt);
@@ -14876,7 +15636,7 @@ static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
else
continue;
- if (bpf_prog_is_dev_bound(env->prog->aux))
+ if (bpf_prog_is_offloaded(env->prog->aux))
bpf_prog_offload_replace_insn(env, i, &ja);
memcpy(insn, &ja, sizeof(ja));
@@ -15063,7 +15823,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
}
- if (bpf_prog_is_dev_bound(env->prog->aux))
+ if (bpf_prog_is_offloaded(env->prog->aux))
return 0;
insn = env->prog->insnsi + delta;
@@ -15463,7 +16223,7 @@ static int fixup_call_args(struct bpf_verifier_env *env)
int err = 0;
if (env->prog->jit_requested &&
- !bpf_prog_is_dev_bound(env->prog->aux)) {
+ !bpf_prog_is_offloaded(env->prog->aux)) {
err = jit_subprogs(env);
if (err == 0)
return 0;
@@ -15507,12 +16267,25 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct bpf_insn *insn_buf, int insn_idx, int *cnt)
{
const struct bpf_kfunc_desc *desc;
+ void *xdp_kfunc;
if (!insn->imm) {
verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
return -EINVAL;
}
+ *cnt = 0;
+
+ if (bpf_dev_bound_kfunc_id(insn->imm)) {
+ xdp_kfunc = bpf_dev_bound_resolve_kfunc(env->prog, insn->imm);
+ if (xdp_kfunc) {
+ insn->imm = BPF_CALL_IMM(xdp_kfunc);
+ return 0;
+ }
+
+ /* fallback to default kfunc when not supported by netdev */
+ }
+
/* insn->imm has the btf func_id. Replace it with
* an address (relative to __bpf_call_base).
*/
@@ -15523,7 +16296,6 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return -EFAULT;
}
- *cnt = 0;
insn->imm = desc->imm;
if (insn->off)
return 0;
@@ -16449,7 +17221,7 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
}
if (st_ops->check_member) {
- int err = st_ops->check_member(t, member);
+ int err = st_ops->check_member(t, member, prog);
if (err) {
verbose(env, "attach to unsupported member %s of struct %s\n",
@@ -16530,6 +17302,12 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
if (tgt_prog) {
struct bpf_prog_aux *aux = tgt_prog->aux;
+ if (bpf_prog_is_dev_bound(prog->aux) &&
+ !bpf_prog_dev_bound_match(prog, tgt_prog)) {
+ bpf_log(log, "Target program bound device mismatch");
+ return -EINVAL;
+ }
+
for (i = 0; i < aux->func_info_cnt; i++)
if (aux->func_info[i].type_id == btf_id) {
subprog = i;
@@ -16751,6 +17529,24 @@ BTF_ID(func, rcu_read_unlock_strict)
#endif
BTF_SET_END(btf_id_deny)
+static bool can_be_sleepable(struct bpf_prog *prog)
+{
+ if (prog->type == BPF_PROG_TYPE_TRACING) {
+ switch (prog->expected_attach_type) {
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
+ case BPF_TRACE_ITER:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return prog->type == BPF_PROG_TYPE_LSM ||
+ prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ ||
+ prog->type == BPF_PROG_TYPE_STRUCT_OPS;
+}
+
static int check_attach_btf_id(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
@@ -16769,9 +17565,8 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
- prog->type != BPF_PROG_TYPE_LSM && prog->type != BPF_PROG_TYPE_KPROBE) {
- verbose(env, "Only fentry/fexit/fmod_ret, lsm, and kprobe/uprobe programs can be sleepable\n");
+ if (prog->aux->sleepable && !can_be_sleepable(prog)) {
+ verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
return -EINVAL;
}
@@ -16950,7 +17745,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
if (ret < 0)
goto skip_full_check;
- if (bpf_prog_is_dev_bound(env->prog->aux)) {
+ if (bpf_prog_is_offloaded(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env->prog);
if (ret)
goto skip_full_check;
@@ -16963,7 +17758,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
ret = do_check_subprogs(env);
ret = ret ?: do_check_main(env);
- if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
+ if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux))
ret = bpf_prog_offload_finalize(env);
skip_full_check:
@@ -16998,7 +17793,7 @@ skip_full_check:
/* do 32-bit optimization after insn patching has done so those patched
* insns could be handled correctly.
*/
- if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
+ if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) {
ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
: false;
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 793ecff29038..831f1f472bb8 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -26,7 +26,7 @@ static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
* rstat_cpu->updated_children list. See the comment on top of
* cgroup_rstat_cpu definition for details.
*/
-void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
+__bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
{
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
unsigned long flags;
@@ -231,7 +231,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
*
* This function may block.
*/
-void cgroup_rstat_flush(struct cgroup *cgrp)
+__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
{
might_sleep();
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 969e8f52f7da..b1cf259854ca 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/btf.h>
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/file.h>
@@ -975,7 +976,7 @@ void __noclone __crash_kexec(struct pt_regs *regs)
}
STACK_FRAME_NON_STANDARD(__crash_kexec);
-void crash_kexec(struct pt_regs *regs)
+__bpf_kfunc void crash_kexec(struct pt_regs *regs)
{
int old_cpu, this_cpu;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 201f0c0482fb..c973ed9e42f8 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -118,7 +118,6 @@ static struct klp_object *klp_find_object(struct klp_patch *patch,
}
struct klp_find_arg {
- const char *objname;
const char *name;
unsigned long addr;
unsigned long count;
@@ -148,15 +147,9 @@ static int klp_find_callback(void *data, const char *name,
{
struct klp_find_arg *args = data;
- if ((mod && !args->objname) || (!mod && args->objname))
- return 0;
-
if (strcmp(args->name, name))
return 0;
- if (args->objname && strcmp(args->objname, mod->name))
- return 0;
-
return klp_match_callback(data, addr);
}
@@ -164,7 +157,6 @@ static int klp_find_object_symbol(const char *objname, const char *name,
unsigned long sympos, unsigned long *addr)
{
struct klp_find_arg args = {
- .objname = objname,
.name = name,
.addr = 0,
.count = 0,
@@ -172,7 +164,7 @@ static int klp_find_object_symbol(const char *objname, const char *name,
};
if (objname)
- module_kallsyms_on_each_symbol(klp_find_callback, &args);
+ module_kallsyms_on_each_symbol(objname, klp_find_callback, &args);
else
kallsyms_on_each_match_symbol(klp_match_callback, name, &args);
diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c
index 4523f99b0358..ab2376a1be88 100644
--- a/kernel/module/kallsyms.c
+++ b/kernel/module/kallsyms.c
@@ -494,7 +494,8 @@ unsigned long module_kallsyms_lookup_name(const char *name)
return ret;
}
-int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *,
struct module *, unsigned long),
void *data)
{
@@ -509,6 +510,9 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
if (mod->state == MODULE_STATE_UNFORMED)
continue;
+ if (modname && strcmp(modname, mod->name))
+ continue;
+
/* Use rcu_dereference_sched() to remain compliant with the sparse tool */
preempt_disable();
kallsyms = rcu_dereference_sched(mod->kallsyms);
@@ -525,6 +529,13 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
if (ret != 0)
goto out;
}
+
+ /*
+ * The given module is found, the subsequent modules do not
+ * need to be compared.
+ */
+ if (modname)
+ break;
}
out:
mutex_unlock(&module_mutex);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index d93c3379e901..051aaf65c749 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -3,6 +3,8 @@
* Scheduler topology setup/handling methods
*/
+#include <linux/bsearch.h>
+
DEFINE_MUTEX(sched_domains_mutex);
/* Protected by sched_domains_mutex: */
@@ -2067,6 +2069,99 @@ unlock:
return found;
}
+struct __cmp_key {
+ const struct cpumask *cpus;
+ struct cpumask ***masks;
+ int node;
+ int cpu;
+ int w;
+};
+
+static int hop_cmp(const void *a, const void *b)
+{
+ struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b;
+ struct __cmp_key *k = (struct __cmp_key *)a;
+
+ if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu)
+ return 1;
+
+ if (b == k->masks) {
+ k->w = 0;
+ return 0;
+ }
+
+ prev_hop = *((struct cpumask ***)b - 1);
+ k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]);
+ if (k->w <= k->cpu)
+ return 0;
+
+ return -1;
+}
+
+/*
+ * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth next cpu
+ * closest to @cpu from @cpumask.
+ * cpumask: cpumask to find a cpu from
+ * cpu: Nth cpu to find
+ *
+ * returns: cpu, or nr_cpu_ids when nothing found.
+ */
+int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+{
+ struct __cmp_key k = { .cpus = cpus, .node = node, .cpu = cpu };
+ struct cpumask ***hop_masks;
+ int hop, ret = nr_cpu_ids;
+
+ rcu_read_lock();
+
+ k.masks = rcu_dereference(sched_domains_numa_masks);
+ if (!k.masks)
+ goto unlock;
+
+ hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp);
+ hop = hop_masks - k.masks;
+
+ ret = hop ?
+ cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) :
+ cpumask_nth_and(cpu, cpus, k.masks[0][node]);
+unlock:
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
+
+/**
+ * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
+ * @node
+ * @node: The node to count hops from.
+ * @hops: Include CPUs up to that many hops away. 0 means local node.
+ *
+ * Return: On success, a pointer to a cpumask of CPUs at most @hops away from
+ * @node, an error value otherwise.
+ *
+ * Requires rcu_lock to be held. Returned cpumask is only valid within that
+ * read-side section, copy it if required beyond that.
+ *
+ * Note that not all hops are equal in distance; see sched_init_numa() for how
+ * distances and masks are handled.
+ * Also note that this is a reflection of sched_domains_numa_masks, which may change
+ * during the lifetime of the system (offline nodes are taken out of the masks).
+ */
+const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
+{
+ struct cpumask ***masks;
+
+ if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
+ return ERR_PTR(-EINVAL);
+
+ masks = rcu_dereference(sched_domains_numa_masks);
+ if (!masks)
+ return ERR_PTR(-EBUSY);
+
+ return masks[hops][node];
+}
+EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
+
#endif /* CONFIG_NUMA */
static int __sdt_alloc(const struct cpumask *cpu_map)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index b8ac8b09c86f..e8da032bb6fc 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -369,8 +369,6 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
return &bpf_probe_write_user_proto;
}
-static DEFINE_RAW_SPINLOCK(trace_printk_lock);
-
#define MAX_TRACE_PRINTK_VARARGS 3
#define BPF_TRACE_PRINTK_SIZE 1024
@@ -378,23 +376,22 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
u64, arg2, u64, arg3)
{
u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
- u32 *bin_args;
- static char buf[BPF_TRACE_PRINTK_SIZE];
- unsigned long flags;
+ struct bpf_bprintf_data data = {
+ .get_bin_args = true,
+ .get_buf = true,
+ };
int ret;
- ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
- MAX_TRACE_PRINTK_VARARGS);
+ ret = bpf_bprintf_prepare(fmt, fmt_size, args,
+ MAX_TRACE_PRINTK_VARARGS, &data);
if (ret < 0)
return ret;
- raw_spin_lock_irqsave(&trace_printk_lock, flags);
- ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
+ ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
- trace_bpf_trace_printk(buf);
- raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
+ trace_bpf_trace_printk(data.buf);
- bpf_bprintf_cleanup();
+ bpf_bprintf_cleanup(&data);
return ret;
}
@@ -427,30 +424,29 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
return &bpf_trace_printk_proto;
}
-BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
+BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
u32, data_len)
{
- static char buf[BPF_TRACE_PRINTK_SIZE];
- unsigned long flags;
+ struct bpf_bprintf_data data = {
+ .get_bin_args = true,
+ .get_buf = true,
+ };
int ret, num_args;
- u32 *bin_args;
if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
- (data_len && !data))
+ (data_len && !args))
return -EINVAL;
num_args = data_len / 8;
- ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
+ ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
if (ret < 0)
return ret;
- raw_spin_lock_irqsave(&trace_printk_lock, flags);
- ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
+ ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
- trace_bpf_trace_printk(buf);
- raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
+ trace_bpf_trace_printk(data.buf);
- bpf_bprintf_cleanup();
+ bpf_bprintf_cleanup(&data);
return ret;
}
@@ -472,23 +468,25 @@ const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
}
BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
- const void *, data, u32, data_len)
+ const void *, args, u32, data_len)
{
+ struct bpf_bprintf_data data = {
+ .get_bin_args = true,
+ };
int err, num_args;
- u32 *bin_args;
if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
- (data_len && !data))
+ (data_len && !args))
return -EINVAL;
num_args = data_len / 8;
- err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
+ err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
if (err < 0)
return err;
- seq_bprintf(m, fmt, bin_args);
+ seq_bprintf(m, fmt, data.bin_args);
- bpf_bprintf_cleanup();
+ bpf_bprintf_cleanup(&data);
return seq_has_overflowed(m) ? -EOVERFLOW : 0;
}
@@ -1237,7 +1235,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* Return: a bpf_key pointer with a valid key pointer if the key is found, a
* NULL pointer otherwise.
*/
-struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
+__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
{
key_ref_t key_ref;
struct bpf_key *bkey;
@@ -1286,7 +1284,7 @@ struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
* Return: a bpf_key pointer with an invalid key pointer set from the
* pre-determined ID on success, a NULL pointer otherwise
*/
-struct bpf_key *bpf_lookup_system_key(u64 id)
+__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
{
struct bpf_key *bkey;
@@ -1310,7 +1308,7 @@ struct bpf_key *bpf_lookup_system_key(u64 id)
* Decrement the reference count of the key inside *bkey*, if the pointer
* is valid, and free *bkey*.
*/
-void bpf_key_put(struct bpf_key *bkey)
+__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
{
if (bkey->has_ref)
key_put(bkey->key);
@@ -1330,7 +1328,7 @@ void bpf_key_put(struct bpf_key *bkey)
*
* Return: 0 on success, a negative value on error.
*/
-int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
+__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
struct bpf_dynptr_kern *sig_ptr,
struct bpf_key *trusted_keyring)
{
@@ -2686,69 +2684,77 @@ static void symbols_swap_r(void *a, void *b, int size, const void *priv)
}
}
-struct module_addr_args {
- unsigned long *addrs;
- u32 addrs_cnt;
+struct modules_array {
struct module **mods;
int mods_cnt;
int mods_cap;
};
-static int module_callback(void *data, const char *name,
- struct module *mod, unsigned long addr)
+static int add_module(struct modules_array *arr, struct module *mod)
{
- struct module_addr_args *args = data;
struct module **mods;
- /* We iterate all modules symbols and for each we:
- * - search for it in provided addresses array
- * - if found we check if we already have the module pointer stored
- * (we iterate modules sequentially, so we can check just the last
- * module pointer)
- * - take module reference and store it
- */
- if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr),
- bpf_kprobe_multi_addrs_cmp))
- return 0;
-
- if (args->mods && args->mods[args->mods_cnt - 1] == mod)
- return 0;
-
- if (args->mods_cnt == args->mods_cap) {
- args->mods_cap = max(16, args->mods_cap * 3 / 2);
- mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL);
+ if (arr->mods_cnt == arr->mods_cap) {
+ arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
+ mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
if (!mods)
return -ENOMEM;
- args->mods = mods;
+ arr->mods = mods;
}
- if (!try_module_get(mod))
- return -EINVAL;
-
- args->mods[args->mods_cnt] = mod;
- args->mods_cnt++;
+ arr->mods[arr->mods_cnt] = mod;
+ arr->mods_cnt++;
return 0;
}
+static bool has_module(struct modules_array *arr, struct module *mod)
+{
+ int i;
+
+ for (i = arr->mods_cnt - 1; i >= 0; i--) {
+ if (arr->mods[i] == mod)
+ return true;
+ }
+ return false;
+}
+
static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
{
- struct module_addr_args args = {
- .addrs = addrs,
- .addrs_cnt = addrs_cnt,
- };
- int err;
+ struct modules_array arr = {};
+ u32 i, err = 0;
+
+ for (i = 0; i < addrs_cnt; i++) {
+ struct module *mod;
+
+ preempt_disable();
+ mod = __module_address(addrs[i]);
+ /* Either no module or we it's already stored */
+ if (!mod || has_module(&arr, mod)) {
+ preempt_enable();
+ continue;
+ }
+ if (!try_module_get(mod))
+ err = -EINVAL;
+ preempt_enable();
+ if (err)
+ break;
+ err = add_module(&arr, mod);
+ if (err) {
+ module_put(mod);
+ break;
+ }
+ }
/* We return either err < 0 in case of error, ... */
- err = module_kallsyms_on_each_symbol(module_callback, &args);
if (err) {
- kprobe_multi_put_modules(args.mods, args.mods_cnt);
- kfree(args.mods);
+ kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
+ kfree(arr.mods);
return err;
}
/* or number of modules found if everything is ok. */
- *mods = args.mods;
- return args.mods_cnt;
+ *mods = arr.mods;
+ return arr.mods_cnt;
}
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
@@ -2861,13 +2867,6 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
bpf_kprobe_multi_cookie_cmp,
bpf_kprobe_multi_cookie_swap,
link);
- } else {
- /*
- * We need to sort addrs array even if there are no cookies
- * provided, to allow bsearch in get_modules_for_addrs.
- */
- sort(addrs, cnt, sizeof(*addrs),
- bpf_kprobe_multi_addrs_cmp, NULL);
}
err = get_modules_for_addrs(&link->mods, addrs, cnt);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 51896b610414..29baa97d0d53 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -8444,7 +8444,7 @@ int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *a
found_all = kallsyms_on_each_symbol(kallsyms_callback, &args);
if (found_all)
return 0;
- found_all = module_kallsyms_on_each_symbol(kallsyms_callback, &args);
+ found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args);
return found_all ? 0 : -ESRCH;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 02ee440f7be3..3509115a0e85 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -389,6 +389,15 @@ config PAHOLE_HAS_BTF_TAG
btf_decl_tag) or not. Currently only clang compiler implements
these attributes, so make the config depend on CC_IS_CLANG.
+config PAHOLE_HAS_LANG_EXCLUDE
+ def_bool PAHOLE_VERSION >= 124
+ help
+ Support for the --lang_exclude flag which makes pahole exclude
+ compilation units from the supplied language. Used in Kbuild to
+ omit Rust CUs which are not supported in version 1.24 of pahole,
+ otherwise it would emit malformed kernel and module binaries when
+ using DEBUG_INFO_BTF_MODULES.
+
config DEBUG_INFO_BTF_MODULES
def_bool y
depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
diff --git a/lib/cpumask.c b/lib/cpumask.c
index c7c392514fd3..e7258836b60b 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -110,15 +110,33 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
#endif
/**
- * cpumask_local_spread - select the i'th cpu with local numa cpu's first
+ * cpumask_local_spread - select the i'th cpu based on NUMA distances
* @i: index number
* @node: local numa_node
*
- * This function selects an online CPU according to a numa aware policy;
- * local cpus are returned first, followed by non-local ones, then it
- * wraps around.
+ * Returns online CPU according to a numa aware policy; local cpus are returned
+ * first, followed by non-local ones, then it wraps around.
*
- * It's not very efficient, but useful for setup.
+ * For those who wants to enumerate all CPUs based on their NUMA distances,
+ * i.e. call this function in a loop, like:
+ *
+ * for (i = 0; i < num_online_cpus(); i++) {
+ * cpu = cpumask_local_spread(i, node);
+ * do_something(cpu);
+ * }
+ *
+ * There's a better alternative based on for_each()-like iterators:
+ *
+ * for_each_numa_hop_mask(mask, node) {
+ * for_each_cpu_andnot(cpu, mask, prev)
+ * do_something(cpu);
+ * prev = mask;
+ * }
+ *
+ * It's simpler and more verbose than above. Complexity of iterator-based
+ * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while
+ * cpumask_local_spread() when called for each cpu is
+ * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)).
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
@@ -127,24 +145,12 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
/* Wrap: we always want a cpu. */
i %= num_online_cpus();
- if (node == NUMA_NO_NODE) {
- cpu = cpumask_nth(i, cpu_online_mask);
- if (cpu < nr_cpu_ids)
- return cpu;
- } else {
- /* NUMA first. */
- cpu = cpumask_nth_and(i, cpu_online_mask, cpumask_of_node(node));
- if (cpu < nr_cpu_ids)
- return cpu;
-
- i -= cpumask_weight_and(cpu_online_mask, cpumask_of_node(node));
-
- /* Skip NUMA nodes, done above. */
- cpu = cpumask_nth_andnot(i, cpu_online_mask, cpumask_of_node(node));
- if (cpu < nr_cpu_ids)
- return cpu;
- }
- BUG();
+ cpu = (node == NUMA_NO_NODE) ?
+ cpumask_nth(i, cpu_online_mask) :
+ sched_numa_find_nth_cpu(cpu_online_mask, i, node);
+
+ WARN_ON(cpu >= nr_cpu_ids);
+ return cpu;
}
EXPORT_SYMBOL(cpumask_local_spread);
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 18bc0a7ac8ee..c10920e66788 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -155,6 +155,15 @@ unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned l
}
EXPORT_SYMBOL(__find_nth_andnot_bit);
+unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr1[idx] & addr2[idx] & ~addr3[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_and_andnot_bit);
+
#ifndef find_next_and_bit
unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long nbits, unsigned long start)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 73afff8062f9..49f40730e711 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -88,6 +88,9 @@ static bool cgroup_memory_nosocket __ro_after_init;
/* Kernel memory accounting disabled? */
static bool cgroup_memory_nokmem __ro_after_init;
+/* BPF memory accounting disabled? */
+static bool cgroup_memory_nobpf __ro_after_init;
+
#ifdef CONFIG_CGROUP_WRITEBACK
static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
#endif
@@ -347,6 +350,9 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
*/
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
EXPORT_SYMBOL(memcg_kmem_enabled_key);
+
+DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
+EXPORT_SYMBOL(memcg_bpf_enabled_key);
#endif
/**
@@ -5357,6 +5363,11 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
static_branch_inc(&memcg_sockets_enabled_key);
+#if defined(CONFIG_MEMCG_KMEM)
+ if (!cgroup_memory_nobpf)
+ static_branch_inc(&memcg_bpf_enabled_key);
+#endif
+
return &memcg->css;
}
@@ -5441,6 +5452,11 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
static_branch_dec(&memcg_sockets_enabled_key);
+#if defined(CONFIG_MEMCG_KMEM)
+ if (!cgroup_memory_nobpf)
+ static_branch_dec(&memcg_bpf_enabled_key);
+#endif
+
vmpressure_cleanup(&memcg->vmpressure);
cancel_work_sync(&memcg->high_work);
mem_cgroup_remove_from_trees(memcg);
@@ -7269,6 +7285,8 @@ static int __init cgroup_memory(char *s)
cgroup_memory_nosocket = true;
if (!strcmp(token, "nokmem"))
cgroup_memory_nokmem = true;
+ if (!strcmp(token, "nobpf"))
+ cgroup_memory_nobpf = true;
}
return 1;
}
diff --git a/net/Makefile b/net/Makefile
index 6a62e5b27378..0914bea9c335 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_BPFILTER) += bpfilter/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
obj-$(CONFIG_BRIDGE) += bridge/
+obj-$(CONFIG_NET_DEVLINK) += devlink/
obj-$(CONFIG_NET_DSA) += dsa/
obj-$(CONFIG_ATALK) += appletalk/
obj-$(CONFIG_X25) += x25/
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 114ee5da261f..828fb393ee94 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -27,7 +27,6 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/pkt_sched.h>
-#include <linux/prandom.h>
#include <linux/printk.h>
#include <linux/random.h>
#include <linux/rculist.h>
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index f9a58fb5442e..acff565849ae 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -21,7 +21,6 @@
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/nl80211.h>
-#include <linux/prandom.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index addfd8c4fe95..e710e9afe78f 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -21,7 +21,6 @@
#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
-#include <linux/prandom.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -800,8 +799,8 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
/* only unknown & newer OGMs contain TVLVs we are interested in */
if (seqno_age > 0 && if_outgoing == BATADV_IF_DEFAULT)
- batadv_tvlv_containers_process(bat_priv, true, orig_node,
- NULL, NULL,
+ batadv_tvlv_containers_process(bat_priv, BATADV_OGM2, orig_node,
+ NULL,
(unsigned char *)(ogm2 + 1),
ntohs(ogm2->tvlv_len));
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index fefb51a5f606..6968e55eb971 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -822,7 +822,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
batadv_dat_start_timer(bat_priv);
batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
- NULL, BATADV_TVLV_DAT, 1,
+ NULL, NULL, BATADV_TVLV_DAT, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
batadv_dat_tvlv_container_update(bat_priv);
return 0;
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 9349c76f30c5..6a964a773f57 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -259,7 +259,7 @@ void batadv_gw_init(struct batadv_priv *bat_priv)
atomic_set(&bat_priv->gw.sel_class, 1);
batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
- NULL, BATADV_TVLV_GW, 1,
+ NULL, NULL, BATADV_TVLV_GW, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
}
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index c48803b32bb0..156ed39eded1 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2022.3"
+#define BATADV_SOURCE_VERSION "2023.1"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index b238455913df..315394f12c55 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -26,7 +26,6 @@
#include <linux/ipv6.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
-#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
@@ -1137,222 +1136,19 @@ static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_forw_tt_node_get() - get a multicast tt node
- * @bat_priv: the bat priv with all the soft interface information
- * @ethhdr: the ether header containing the multicast destination
- *
- * Return: an orig_node matching the multicast address provided by ethhdr
- * via a translation table lookup. This increases the returned nodes refcount.
- */
-static struct batadv_orig_node *
-batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
- struct ethhdr *ethhdr)
-{
- return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
- BATADV_NO_FLAGS);
-}
-
-/**
- * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
- * @bat_priv: the bat priv with all the soft interface information
- *
- * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
- * increases its refcount.
- */
-static struct batadv_orig_node *
-batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
-{
- struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_orig_node,
- &bat_priv->mcast.want_all_ipv4_list,
- mcast_want_all_ipv4_node) {
- if (!kref_get_unless_zero(&tmp_orig_node->refcount))
- continue;
-
- orig_node = tmp_orig_node;
- break;
- }
- rcu_read_unlock();
-
- return orig_node;
-}
-
-/**
- * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
- * @bat_priv: the bat priv with all the soft interface information
- *
- * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
- * and increases its refcount.
- */
-static struct batadv_orig_node *
-batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
-{
- struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_orig_node,
- &bat_priv->mcast.want_all_ipv6_list,
- mcast_want_all_ipv6_node) {
- if (!kref_get_unless_zero(&tmp_orig_node->refcount))
- continue;
-
- orig_node = tmp_orig_node;
- break;
- }
- rcu_read_unlock();
-
- return orig_node;
-}
-
-/**
- * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
- * @bat_priv: the bat priv with all the soft interface information
- * @ethhdr: an ethernet header to determine the protocol family from
- *
- * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
- * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and
- * increases its refcount.
- */
-static struct batadv_orig_node *
-batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
- struct ethhdr *ethhdr)
-{
- switch (ntohs(ethhdr->h_proto)) {
- case ETH_P_IP:
- return batadv_mcast_forw_ipv4_node_get(bat_priv);
- case ETH_P_IPV6:
- return batadv_mcast_forw_ipv6_node_get(bat_priv);
- default:
- /* we shouldn't be here... */
- return NULL;
- }
-}
-
-/**
- * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
- * @bat_priv: the bat priv with all the soft interface information
- *
- * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
- * set and increases its refcount.
- */
-static struct batadv_orig_node *
-batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
-{
- struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_orig_node,
- &bat_priv->mcast.want_all_unsnoopables_list,
- mcast_want_all_unsnoopables_node) {
- if (!kref_get_unless_zero(&tmp_orig_node->refcount))
- continue;
-
- orig_node = tmp_orig_node;
- break;
- }
- rcu_read_unlock();
-
- return orig_node;
-}
-
-/**
- * batadv_mcast_forw_rtr4_node_get() - get a node with an ipv4 mcast router flag
- * @bat_priv: the bat priv with all the soft interface information
- *
- * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR4 flag unset and
- * increases its refcount.
- */
-static struct batadv_orig_node *
-batadv_mcast_forw_rtr4_node_get(struct batadv_priv *bat_priv)
-{
- struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_orig_node,
- &bat_priv->mcast.want_all_rtr4_list,
- mcast_want_all_rtr4_node) {
- if (!kref_get_unless_zero(&tmp_orig_node->refcount))
- continue;
-
- orig_node = tmp_orig_node;
- break;
- }
- rcu_read_unlock();
-
- return orig_node;
-}
-
-/**
- * batadv_mcast_forw_rtr6_node_get() - get a node with an ipv6 mcast router flag
- * @bat_priv: the bat priv with all the soft interface information
- *
- * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR6 flag unset
- * and increases its refcount.
- */
-static struct batadv_orig_node *
-batadv_mcast_forw_rtr6_node_get(struct batadv_priv *bat_priv)
-{
- struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_orig_node,
- &bat_priv->mcast.want_all_rtr6_list,
- mcast_want_all_rtr6_node) {
- if (!kref_get_unless_zero(&tmp_orig_node->refcount))
- continue;
-
- orig_node = tmp_orig_node;
- break;
- }
- rcu_read_unlock();
-
- return orig_node;
-}
-
-/**
- * batadv_mcast_forw_rtr_node_get() - get a node with an ipv4/ipv6 router flag
- * @bat_priv: the bat priv with all the soft interface information
- * @ethhdr: an ethernet header to determine the protocol family from
- *
- * Return: an orig_node which has no BATADV_MCAST_WANT_NO_RTR4 or
- * BATADV_MCAST_WANT_NO_RTR6 flag, depending on the provided ethhdr, set and
- * increases its refcount.
- */
-static struct batadv_orig_node *
-batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
- struct ethhdr *ethhdr)
-{
- switch (ntohs(ethhdr->h_proto)) {
- case ETH_P_IP:
- return batadv_mcast_forw_rtr4_node_get(bat_priv);
- case ETH_P_IPV6:
- return batadv_mcast_forw_rtr6_node_get(bat_priv);
- default:
- /* we shouldn't be here... */
- return NULL;
- }
-}
-
-/**
* batadv_mcast_forw_mode() - check on how to forward a multicast packet
* @bat_priv: the bat priv with all the soft interface information
- * @skb: The multicast packet to check
- * @orig: an originator to be set to forward the skb to
+ * @skb: the multicast packet to check
* @is_routable: stores whether the destination is routable
*
- * Return: the forwarding mode as enum batadv_forw_mode and in case of
- * BATADV_FORW_SINGLE set the orig to the single originator the skb
- * should be forwarded to.
+ * Return: The forwarding mode as enum batadv_forw_mode.
*/
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_orig_node **orig, int *is_routable)
+ int *is_routable)
{
int ret, tt_count, ip_count, unsnoop_count, total_count;
bool is_unsnoopable = false;
- unsigned int mcast_fanout;
struct ethhdr *ethhdr;
int rtr_count = 0;
@@ -1361,7 +1157,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (ret == -ENOMEM)
return BATADV_FORW_NONE;
else if (ret < 0)
- return BATADV_FORW_ALL;
+ return BATADV_FORW_BCAST;
ethhdr = eth_hdr(skb);
@@ -1374,32 +1170,15 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
total_count = tt_count + ip_count + unsnoop_count + rtr_count;
- switch (total_count) {
- case 1:
- if (tt_count)
- *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr);
- else if (ip_count)
- *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr);
- else if (unsnoop_count)
- *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv);
- else if (rtr_count)
- *orig = batadv_mcast_forw_rtr_node_get(bat_priv,
- ethhdr);
-
- if (*orig)
- return BATADV_FORW_SINGLE;
-
- fallthrough;
- case 0:
+ if (!total_count)
return BATADV_FORW_NONE;
- default:
- mcast_fanout = atomic_read(&bat_priv->multicast_fanout);
+ else if (unsnoop_count)
+ return BATADV_FORW_BCAST;
- if (!unsnoop_count && total_count <= mcast_fanout)
- return BATADV_FORW_SOME;
- }
+ if (total_count <= atomic_read(&bat_priv->multicast_fanout))
+ return BATADV_FORW_UCASTS;
- return BATADV_FORW_ALL;
+ return BATADV_FORW_BCAST;
}
/**
@@ -1411,10 +1190,10 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
*
* Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
*/
-int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
- struct sk_buff *skb,
- unsigned short vid,
- struct batadv_orig_node *orig_node)
+static int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned short vid,
+ struct batadv_orig_node *orig_node)
{
/* Avoid sending multicast-in-unicast packets to other BLA
* gateways - they already got the frame from the LAN side
@@ -2039,7 +1818,7 @@ static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
void batadv_mcast_init(struct batadv_priv *bat_priv)
{
batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
- NULL, BATADV_TVLV_MCAST, 2,
+ NULL, NULL, BATADV_TVLV_MCAST, 2,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 8aec818d0bf6..a9770d8d6d36 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -17,23 +17,16 @@
*/
enum batadv_forw_mode {
/**
- * @BATADV_FORW_ALL: forward the packet to all nodes (currently via
- * classic flooding)
+ * @BATADV_FORW_BCAST: forward the packet to all nodes via a batman-adv
+ * broadcast packet
*/
- BATADV_FORW_ALL,
+ BATADV_FORW_BCAST,
/**
- * @BATADV_FORW_SOME: forward the packet to some nodes (currently via
- * a multicast-to-unicast conversion and the BATMAN unicast routing
- * protocol)
+ * @BATADV_FORW_UCASTS: forward the packet to some nodes via one
+ * or more batman-adv unicast packets
*/
- BATADV_FORW_SOME,
-
- /**
- * @BATADV_FORW_SINGLE: forward the packet to a single node (currently
- * via the BATMAN unicast routing protocol)
- */
- BATADV_FORW_SINGLE,
+ BATADV_FORW_UCASTS,
/** @BATADV_FORW_NONE: don't forward, drop it */
BATADV_FORW_NONE,
@@ -43,14 +36,8 @@ enum batadv_forw_mode {
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_orig_node **mcast_single_orig,
int *is_routable);
-int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
- struct sk_buff *skb,
- unsigned short vid,
- struct batadv_orig_node *orig_node);
-
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid, int is_routable);
@@ -69,20 +56,9 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
static inline enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_orig_node **mcast_single_orig,
int *is_routable)
{
- return BATADV_FORW_ALL;
-}
-
-static inline int
-batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
- struct sk_buff *skb,
- unsigned short vid,
- struct batadv_orig_node *orig_node)
-{
- kfree_skb(skb);
- return NET_XMIT_DROP;
+ return BATADV_FORW_BCAST;
}
static inline int
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index bf29fba4dde5..71ebd0284f95 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -25,8 +25,8 @@
#include <linux/lockdep.h>
#include <linux/net.h>
#include <linux/netdevice.h>
-#include <linux/prandom.h>
#include <linux/printk.h>
+#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
@@ -160,7 +160,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
batadv_nc_start_timer(bat_priv);
batadv_tvlv_handler_register(bat_priv, batadv_nc_tvlv_ogm_handler_v1,
- NULL, BATADV_TVLV_NC, 1,
+ NULL, NULL, BATADV_TVLV_NC, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
batadv_nc_tvlv_container_update(bat_priv);
return 0;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 83f31494ea4d..163cd43c4821 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1073,10 +1073,9 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
if (tvlv_buff_len > skb->len - hdr_size)
goto free_skb;
- ret = batadv_tvlv_containers_process(bat_priv, false, NULL,
- unicast_tvlv_packet->src,
- unicast_tvlv_packet->dst,
- tvlv_buff, tvlv_buff_len);
+ ret = batadv_tvlv_containers_process(bat_priv, BATADV_UNICAST_TVLV,
+ NULL, skb, tvlv_buff,
+ tvlv_buff_len);
if (ret != NET_RX_SUCCESS) {
ret = batadv_route_unicast_packet(skb, recv_if);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 0f5c0679b55a..125f4628687c 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -48,7 +48,6 @@
#include "hard-interface.h"
#include "multicast.h"
#include "network-coding.h"
-#include "originator.h"
#include "send.h"
#include "translation-table.h"
@@ -196,8 +195,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
unsigned short vid;
u32 seqno;
int gw_mode;
- enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
- struct batadv_orig_node *mcast_single_orig = NULL;
+ enum batadv_forw_mode forw_mode = BATADV_FORW_BCAST;
int mcast_is_routable = 0;
int network_offset = ETH_HLEN;
__be16 proto;
@@ -301,14 +299,18 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
send:
if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
- &mcast_single_orig,
&mcast_is_routable);
- if (forw_mode == BATADV_FORW_NONE)
- goto dropped;
-
- if (forw_mode == BATADV_FORW_SINGLE ||
- forw_mode == BATADV_FORW_SOME)
+ switch (forw_mode) {
+ case BATADV_FORW_BCAST:
+ break;
+ case BATADV_FORW_UCASTS:
do_bcast = false;
+ break;
+ case BATADV_FORW_NONE:
+ fallthrough;
+ default:
+ goto dropped;
+ }
}
}
@@ -357,10 +359,7 @@ send:
if (ret)
goto dropped;
ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
- } else if (mcast_single_orig) {
- ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
- mcast_single_orig);
- } else if (forw_mode == BATADV_FORW_SOME) {
+ } else if (forw_mode == BATADV_FORW_UCASTS) {
ret = batadv_mcast_forw_send(bat_priv, skb, vid,
mcast_is_routable);
} else {
@@ -386,7 +385,6 @@ dropped:
dropped_freed:
batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
end:
- batadv_orig_node_put(mcast_single_orig);
batadv_hardif_put(primary_if);
return NETDEV_TX_OK;
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 01d30c1e412c..36ca31252a73 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -4168,11 +4168,11 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
}
batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
- batadv_tt_tvlv_unicast_handler_v1,
+ batadv_tt_tvlv_unicast_handler_v1, NULL,
BATADV_TVLV_TT, 1, BATADV_NO_FLAGS);
batadv_tvlv_handler_register(bat_priv, NULL,
- batadv_roam_tvlv_unicast_handler_v1,
+ batadv_roam_tvlv_unicast_handler_v1, NULL,
BATADV_TVLV_ROAM, 1, BATADV_NO_FLAGS);
INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index 7ec2e2343884..2a583215d439 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -352,10 +352,9 @@ end:
* appropriate handlers
* @bat_priv: the bat priv with all the soft interface information
* @tvlv_handler: tvlv callback function handling the tvlv content
- * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
+ * @packet_type: indicates for which packet type the TVLV handler is called
* @orig_node: orig node emitting the ogm packet
- * @src: source mac address of the unicast packet
- * @dst: destination mac address of the unicast packet
+ * @skb: the skb the TVLV handler is called for
* @tvlv_value: tvlv content
* @tvlv_value_len: tvlv content length
*
@@ -364,15 +363,20 @@ end:
*/
static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
struct batadv_tvlv_handler *tvlv_handler,
- bool ogm_source,
+ u8 packet_type,
struct batadv_orig_node *orig_node,
- u8 *src, u8 *dst,
- void *tvlv_value, u16 tvlv_value_len)
+ struct sk_buff *skb, void *tvlv_value,
+ u16 tvlv_value_len)
{
+ unsigned int tvlv_offset;
+ u8 *src, *dst;
+
if (!tvlv_handler)
return NET_RX_SUCCESS;
- if (ogm_source) {
+ switch (packet_type) {
+ case BATADV_IV_OGM:
+ case BATADV_OGM2:
if (!tvlv_handler->ogm_handler)
return NET_RX_SUCCESS;
@@ -383,19 +387,32 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
BATADV_NO_FLAGS,
tvlv_value, tvlv_value_len);
tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
- } else {
- if (!src)
- return NET_RX_SUCCESS;
-
- if (!dst)
+ break;
+ case BATADV_UNICAST_TVLV:
+ if (!skb)
return NET_RX_SUCCESS;
if (!tvlv_handler->unicast_handler)
return NET_RX_SUCCESS;
+ src = ((struct batadv_unicast_tvlv_packet *)skb->data)->src;
+ dst = ((struct batadv_unicast_tvlv_packet *)skb->data)->dst;
+
return tvlv_handler->unicast_handler(bat_priv, src,
dst, tvlv_value,
tvlv_value_len);
+ case BATADV_MCAST:
+ if (!skb)
+ return NET_RX_SUCCESS;
+
+ if (!tvlv_handler->mcast_handler)
+ return NET_RX_SUCCESS;
+
+ tvlv_offset = (unsigned char *)tvlv_value - skb->data;
+ skb_set_network_header(skb, tvlv_offset);
+ skb_set_transport_header(skb, tvlv_offset + tvlv_value_len);
+
+ return tvlv_handler->mcast_handler(bat_priv, skb);
}
return NET_RX_SUCCESS;
@@ -405,10 +422,9 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
* batadv_tvlv_containers_process() - parse the given tvlv buffer to call the
* appropriate handlers
* @bat_priv: the bat priv with all the soft interface information
- * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
+ * @packet_type: indicates for which packet type the TVLV handler is called
* @orig_node: orig node emitting the ogm packet
- * @src: source mac address of the unicast packet
- * @dst: destination mac address of the unicast packet
+ * @skb: the skb the TVLV handler is called for
* @tvlv_value: tvlv content
* @tvlv_value_len: tvlv content length
*
@@ -416,10 +432,10 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
* handler callbacks.
*/
int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
- bool ogm_source,
+ u8 packet_type,
struct batadv_orig_node *orig_node,
- u8 *src, u8 *dst,
- void *tvlv_value, u16 tvlv_value_len)
+ struct sk_buff *skb, void *tvlv_value,
+ u16 tvlv_value_len)
{
struct batadv_tvlv_handler *tvlv_handler;
struct batadv_tvlv_hdr *tvlv_hdr;
@@ -441,20 +457,24 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
tvlv_hdr->version);
ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
- ogm_source, orig_node,
- src, dst, tvlv_value,
+ packet_type, orig_node, skb,
+ tvlv_value,
tvlv_value_cont_len);
batadv_tvlv_handler_put(tvlv_handler);
tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
tvlv_value_len -= tvlv_value_cont_len;
}
- if (!ogm_source)
+ if (packet_type != BATADV_IV_OGM &&
+ packet_type != BATADV_OGM2)
return ret;
rcu_read_lock();
hlist_for_each_entry_rcu(tvlv_handler,
&bat_priv->tvlv.handler_list, list) {
+ if (!tvlv_handler->ogm_handler)
+ continue;
+
if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
!(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
tvlv_handler->ogm_handler(bat_priv, orig_node,
@@ -490,7 +510,7 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
tvlv_value = batadv_ogm_packet + 1;
- batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
+ batadv_tvlv_containers_process(bat_priv, BATADV_IV_OGM, orig_node, NULL,
tvlv_value, tvlv_value_len);
}
@@ -504,6 +524,10 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
* @uptr: unicast tvlv handler callback function. This function receives the
* source & destination of the unicast packet as well as the tvlv content
* to process.
+ * @mptr: multicast packet tvlv handler callback function. This function
+ * receives the full skb to process, with the skb network header pointing
+ * to the current tvlv and the skb transport header pointing to the first
+ * byte after the current tvlv.
* @type: tvlv handler type to be registered
* @version: tvlv handler version to be registered
* @flags: flags to enable or disable TVLV API behavior
@@ -518,6 +542,8 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
u8 *src, u8 *dst,
void *tvlv_value,
u16 tvlv_value_len),
+ int (*mptr)(struct batadv_priv *bat_priv,
+ struct sk_buff *skb),
u8 type, u8 version, u8 flags)
{
struct batadv_tvlv_handler *tvlv_handler;
@@ -539,6 +565,7 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
tvlv_handler->ogm_handler = optr;
tvlv_handler->unicast_handler = uptr;
+ tvlv_handler->mcast_handler = mptr;
tvlv_handler->type = type;
tvlv_handler->version = version;
tvlv_handler->flags = flags;
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
index 4cf8af00fc11..e5697230d991 100644
--- a/net/batman-adv/tvlv.h
+++ b/net/batman-adv/tvlv.h
@@ -9,6 +9,7 @@
#include "main.h"
+#include <linux/skbuff.h>
#include <linux/types.h>
#include <uapi/linux/batadv_packet.h>
@@ -34,14 +35,16 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
u8 *src, u8 *dst,
void *tvlv_value,
u16 tvlv_value_len),
+ int (*mptr)(struct batadv_priv *bat_priv,
+ struct sk_buff *skb),
u8 type, u8 version, u8 flags);
void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
u8 type, u8 version);
int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
- bool ogm_source,
+ u8 packet_type,
struct batadv_orig_node *orig_node,
- u8 *src, u8 *dst,
- void *tvlv_buff, u16 tvlv_buff_len);
+ struct sk_buff *skb, void *tvlv_buff,
+ u16 tvlv_buff_len);
void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src,
const u8 *dst, u8 type, u8 version,
void *tvlv_value, u16 tvlv_value_len);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 758cd797a063..ca9449ec9836 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -2335,6 +2335,12 @@ struct batadv_tvlv_handler {
u8 *src, u8 *dst,
void *tvlv_value, u16 tvlv_value_len);
+ /**
+ * @mcast_handler: handler callback which is given the tvlv payload to
+ * process on incoming mcast packet
+ */
+ int (*mcast_handler)(struct batadv_priv *bat_priv, struct sk_buff *skb);
+
/** @type: tvlv type this handler feels responsible for */
u8 type;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index acf563fbdfd9..17b946f9ba31 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1061,8 +1061,15 @@ int hci_conn_del(struct hci_conn *conn)
if (conn->type == ACL_LINK) {
struct hci_conn *sco = conn->link;
- if (sco)
+ if (sco) {
sco->link = NULL;
+ /* Due to race, SCO connection might be not established
+ * yet at this point. Delete it now, otherwise it is
+ * possible for it to be stuck and can't be deleted.
+ */
+ if (sco->handle == HCI_CONN_HANDLE_UNSET)
+ hci_conn_del(sco);
+ }
/* Unacked frames */
hdev->acl_cnt += conn->sent;
@@ -1243,6 +1250,8 @@ static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
if (conn != hci_lookup_le_connect(hdev))
goto done;
+ /* Flush to make sure we send create conn cancel command if needed */
+ flush_delayed_work(&conn->le_conn_timeout);
hci_conn_failed(conn, bt_status(err));
done:
@@ -1981,16 +1990,14 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
qos->latency = conn->le_conn_latency;
}
-static struct hci_conn *hci_bind_bis(struct hci_conn *conn,
- struct bt_iso_qos *qos)
+static void hci_bind_bis(struct hci_conn *conn,
+ struct bt_iso_qos *qos)
{
/* Update LINK PHYs according to QoS preference */
conn->le_tx_phy = qos->out.phy;
conn->le_tx_phy = qos->out.phy;
conn->iso_qos = *qos;
conn->state = BT_BOUND;
-
- return conn;
}
static int create_big_sync(struct hci_dev *hdev, void *data)
@@ -2119,11 +2126,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
if (IS_ERR(conn))
return conn;
- conn = hci_bind_bis(conn, qos);
- if (!conn) {
- hci_conn_drop(conn);
- return ERR_PTR(-ENOMEM);
- }
+ hci_bind_bis(conn, qos);
/* Add Basic Announcement into Peridic Adv Data if BASE is set */
if (base_len && base) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a3e0dc6a6e73..adfc3ea06d08 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2683,14 +2683,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
if (IS_ERR(skb))
return PTR_ERR(skb);
- /* Channel lock is released before requesting new skb and then
- * reacquired thus we need to recheck channel state.
- */
- if (chan->state != BT_CONNECTED) {
- kfree_skb(skb);
- return -ENOTCONN;
- }
-
l2cap_do_send(chan, skb);
return len;
}
@@ -2735,14 +2727,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
if (IS_ERR(skb))
return PTR_ERR(skb);
- /* Channel lock is released before requesting new skb and then
- * reacquired thus we need to recheck channel state.
- */
- if (chan->state != BT_CONNECTED) {
- kfree_skb(skb);
- return -ENOTCONN;
- }
-
l2cap_do_send(chan, skb);
err = len;
break;
@@ -2763,14 +2747,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
*/
err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
- /* The channel could have been closed while segmenting,
- * check that it is still connected.
- */
- if (chan->state != BT_CONNECTED) {
- __skb_queue_purge(&seg_queue);
- err = -ENOTCONN;
- }
-
if (err)
break;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index ca8f07f3542b..eebe256104bc 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1624,6 +1624,14 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
if (!skb)
return ERR_PTR(err);
+ /* Channel lock is released before requesting new skb and then
+ * reacquired thus we need to recheck channel state.
+ */
+ if (chan->state != BT_CONNECTED) {
+ kfree_skb(skb);
+ return ERR_PTR(-ENOTCONN);
+ }
+
skb->priority = sk->sk_priority;
bt_cb(skb)->l2cap.chan = chan;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index d2ea8e19aa1b..7add66f30e4d 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -859,6 +859,12 @@ static u32 get_supported_settings(struct hci_dev *hdev)
hdev->set_bdaddr)
settings |= MGMT_SETTING_CONFIGURATION;
+ if (cis_central_capable(hdev))
+ settings |= MGMT_SETTING_CIS_CENTRAL;
+
+ if (cis_peripheral_capable(hdev))
+ settings |= MGMT_SETTING_CIS_PERIPHERAL;
+
settings |= MGMT_SETTING_PHY_CONFIGURATION;
return settings;
@@ -932,6 +938,12 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
settings |= MGMT_SETTING_WIDEBAND_SPEECH;
+ if (cis_central_capable(hdev))
+ settings |= MGMT_SETTING_CIS_CENTRAL;
+
+ if (cis_peripheral_capable(hdev))
+ settings |= MGMT_SETTING_CIS_PERIPHERAL;
+
return settings;
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8d6fce9005bd..053ef8f25fae 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -35,6 +35,8 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/rfcomm.h>
+#include <trace/events/sock.h>
+
#define VERSION "1.11"
static bool disable_cfc;
@@ -186,6 +188,8 @@ static void rfcomm_l2state_change(struct sock *sk)
static void rfcomm_l2data_ready(struct sock *sk)
{
+ trace_sk_data_ready(sk);
+
BT_DBG("%p", sk);
rfcomm_schedule();
}
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 1ac4467928a9..ff4f89a2b02a 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -154,6 +154,23 @@ static bool bpf_dummy_ops_is_valid_access(int off, int size,
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
}
+static int bpf_dummy_ops_check_member(const struct btf_type *t,
+ const struct btf_member *member,
+ const struct bpf_prog *prog)
+{
+ u32 moff = __btf_member_bit_offset(t, member) / 8;
+
+ switch (moff) {
+ case offsetof(struct bpf_dummy_ops, test_sleepable):
+ break;
+ default:
+ if (prog->aux->sleepable)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
@@ -208,6 +225,7 @@ static void bpf_dummy_unreg(void *kdata)
struct bpf_struct_ops bpf_bpf_dummy_ops = {
.verifier_ops = &bpf_dummy_verifier_ops,
.init = bpf_dummy_init,
+ .check_member = bpf_dummy_ops_check_member,
.init_member = bpf_dummy_init_member,
.reg = bpf_dummy_reg,
.unreg = bpf_dummy_unreg,
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 2723623429ac..6f3d654b3339 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -234,7 +234,7 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
int i, n;
LIST_HEAD(list);
- n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs);
+ n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
if (unlikely(n == 0)) {
for (i = 0; i < nframes; i++)
xdp_return_frame(frames[i]);
@@ -396,10 +396,12 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
do {
run_ctx.prog_item = &item;
+ local_bh_disable();
if (xdp)
*retval = bpf_prog_run_xdp(prog, ctx);
else
*retval = bpf_prog_run(prog, ctx);
+ local_bh_enable();
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
bpf_reset_run_ctx(old_ctx);
bpf_test_timer_leave(&t);
@@ -484,7 +486,7 @@ out:
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in vmlinux BTF");
-int noinline bpf_fentry_test1(int a)
+__bpf_kfunc int bpf_fentry_test1(int a)
{
return a + 1;
}
@@ -529,27 +531,35 @@ int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
return (long)arg->a;
}
-int noinline bpf_modify_return_test(int a, int *b)
+__bpf_kfunc int bpf_modify_return_test(int a, int *b)
{
*b += 1;
return a + *b;
}
-u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
+__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
{
return a + b + c + d;
}
-int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
+__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
{
return a + b;
}
-struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
+__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
{
return sk;
}
+long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
+{
+ /* Provoke the compiler to assume that the caller has sign-extended a,
+ * b and c on platforms where this is required (e.g. s390x).
+ */
+ return (long)a + (long)b + (long)c + d;
+}
+
struct prog_test_member1 {
int a;
};
@@ -574,21 +584,21 @@ static struct prog_test_ref_kfunc prog_test_struct = {
.cnt = REFCOUNT_INIT(1),
};
-noinline struct prog_test_ref_kfunc *
+__bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
{
refcount_inc(&prog_test_struct.cnt);
return &prog_test_struct;
}
-noinline struct prog_test_member *
+__bpf_kfunc struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)
{
WARN_ON_ONCE(1);
return NULL;
}
-noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
+__bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
{
if (!p)
return;
@@ -596,11 +606,11 @@ noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
refcount_dec(&p->cnt);
}
-noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
+__bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
{
}
-noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
+__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
{
WARN_ON_ONCE(1);
}
@@ -613,12 +623,14 @@ static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const i
return (int *)p;
}
-noinline int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size)
+__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
+ const int rdwr_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
}
-noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
+__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
+ const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
@@ -628,16 +640,17 @@ noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
* Acquire functions must return struct pointers, so these ones are
* failing.
*/
-noinline int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
+__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
+ const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
-noinline void bpf_kfunc_call_int_mem_release(int *p)
+__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
{
}
-noinline struct prog_test_ref_kfunc *
+__bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
{
struct prog_test_ref_kfunc *p = READ_ONCE(*pp);
@@ -686,50 +699,55 @@ struct prog_test_fail3 {
char arr2[];
};
-noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
+__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
{
}
-noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
+__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
{
}
-noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
+__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
{
}
-noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
+__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
{
}
-noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
+__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
{
}
-noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
+__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
{
}
-noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
+__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
{
}
-noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
+__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
{
}
-noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
+__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
{
}
-noinline void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
+__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
{
}
-noinline void bpf_kfunc_call_test_destructive(void)
+__bpf_kfunc void bpf_kfunc_call_test_destructive(void)
{
}
+__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
+{
+ return arg;
+}
+
__diag_pop();
BTF_SET8_START(bpf_test_modify_return_ids)
@@ -746,6 +764,7 @@ BTF_SET8_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
@@ -767,6 +786,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
BTF_SET8_END(test_sk_check_kfunc_ids)
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
@@ -1300,6 +1320,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
return -EINVAL;
+ if (bpf_prog_is_dev_bound(prog->aux))
+ return -EINVAL;
+
if (do_live) {
if (!batch_size)
batch_size = NAPI_POLL_WEIGHT;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ad13b48e3e08..24f01ff113f0 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -269,7 +269,7 @@ static void brport_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t
net_ns_get_ownership(dev_net(p->dev), uid, gid);
}
-static struct kobj_type brport_ktype = {
+static const struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
.sysfs_ops = &brport_sysfs_ops,
#endif
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 00e5743647b0..25c48d81a597 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -259,7 +259,7 @@ static int __mdb_fill_info(struct sk_buff *skb,
#endif
} else {
ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
- e.state = MDB_PG_FLAGS_PERMANENT;
+ e.state = MDB_PERMANENT;
}
e.addr.proto = mp->addr.proto;
nest_ent = nla_nest_start_noflag(skb,
@@ -421,8 +421,6 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
- cb->seq = net->dev_base_seq;
-
for_each_netdev_rcu(net, dev) {
if (netif_is_bridge_master(dev)) {
struct net_bridge *br = netdev_priv(dev);
@@ -685,51 +683,58 @@ static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
};
-static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
- struct netlink_ext_ack *extack)
+static int validate_mdb_entry(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
{
+ struct br_mdb_entry *entry = nla_data(attr);
+
+ if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
+ return -EINVAL;
+ }
+
if (entry->ifindex == 0) {
NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
- return false;
+ return -EINVAL;
}
if (entry->addr.proto == htons(ETH_P_IP)) {
if (!ipv4_is_multicast(entry->addr.u.ip4)) {
NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
- return false;
+ return -EINVAL;
}
if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
- return false;
+ return -EINVAL;
}
#if IS_ENABLED(CONFIG_IPV6)
} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
- return false;
+ return -EINVAL;
}
#endif
} else if (entry->addr.proto == 0) {
/* L2 mdb */
if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
- return false;
+ return -EINVAL;
}
} else {
NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
- return false;
+ return -EINVAL;
}
if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
- return false;
+ return -EINVAL;
}
if (entry->vid >= VLAN_VID_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
}
static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
@@ -849,11 +854,10 @@ static int br_mdb_add_group_sg(const struct br_mdb_config *cfg,
}
p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
- MCAST_INCLUDE, cfg->rt_protocol);
- if (unlikely(!p)) {
- NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new (S, G) port group");
+ MCAST_INCLUDE, cfg->rt_protocol, extack);
+ if (unlikely(!p))
return -ENOMEM;
- }
+
rcu_assign_pointer(*pp, p);
if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
mod_timer(&p->timer,
@@ -1075,11 +1079,10 @@ static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
}
p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
- cfg->filter_mode, cfg->rt_protocol);
- if (unlikely(!p)) {
- NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new (*, G) port group");
+ cfg->filter_mode, cfg->rt_protocol,
+ extack);
+ if (unlikely(!p))
return -ENOMEM;
- }
err = br_mdb_add_group_srcs(cfg, p, brmctx, extack);
if (err)
@@ -1101,8 +1104,7 @@ static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
return 0;
err_del_port_group:
- hlist_del_init(&p->mglist);
- kfree(p);
+ br_multicast_del_port_group(p);
return err;
}
@@ -1297,6 +1299,14 @@ static int br_mdb_config_attrs_init(struct nlattr *set_attrs,
return 0;
}
+static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
+ [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
+ [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
+ validate_mdb_entry,
+ sizeof(struct br_mdb_entry)),
+ [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
+};
+
static int br_mdb_config_init(struct net *net, const struct nlmsghdr *nlh,
struct br_mdb_config *cfg,
struct netlink_ext_ack *extack)
@@ -1307,7 +1317,7 @@ static int br_mdb_config_init(struct net *net, const struct nlmsghdr *nlh,
int err;
err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
- MDBA_SET_ENTRY_MAX, NULL, extack);
+ MDBA_SET_ENTRY_MAX, mdba_policy, extack);
if (err)
return err;
@@ -1349,14 +1359,8 @@ static int br_mdb_config_init(struct net *net, const struct nlmsghdr *nlh,
NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
return -EINVAL;
}
- if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
- return -EINVAL;
- }
cfg->entry = nla_data(tb[MDBA_SET_ENTRY]);
- if (!is_valid_mdb_entry(cfg->entry, extack))
- return -EINVAL;
if (cfg->entry->ifindex != cfg->br->dev->ifindex) {
struct net_device *pdev;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index dea1ee1bd095..96d1fc78dd39 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -31,6 +31,7 @@
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#endif
+#include <trace/events/bridge.h>
#include "br_private.h"
#include "br_private_mcast_eht.h"
@@ -234,6 +235,29 @@ out:
return pmctx;
}
+static struct net_bridge_mcast_port *
+br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid)
+{
+ struct net_bridge_mcast_port *pmctx = NULL;
+ struct net_bridge_vlan *vlan;
+
+ lockdep_assert_held_once(&port->br->multicast_lock);
+
+ if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
+ return NULL;
+
+ /* Take RCU to access the vlan. */
+ rcu_read_lock();
+
+ vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid);
+ if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
+ pmctx = &vlan->port_mcast_ctx;
+
+ rcu_read_unlock();
+
+ return pmctx;
+}
+
/* when snooping we need to check if the contexts should be used
* in the following order:
* - if pmctx is non-NULL (port), check if it should be used
@@ -668,6 +692,101 @@ void br_multicast_del_group_src(struct net_bridge_group_src *src,
__br_multicast_del_group_src(src);
}
+static int
+br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx,
+ struct netlink_ext_ack *extack,
+ const char *what)
+{
+ u32 max = READ_ONCE(pmctx->mdb_max_entries);
+ u32 n = READ_ONCE(pmctx->mdb_n_entries);
+
+ if (max && n >= max) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u",
+ what, n, max);
+ return -E2BIG;
+ }
+
+ WRITE_ONCE(pmctx->mdb_n_entries, n + 1);
+ return 0;
+}
+
+static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx)
+{
+ u32 n = READ_ONCE(pmctx->mdb_n_entries);
+
+ WARN_ON_ONCE(n == 0);
+ WRITE_ONCE(pmctx->mdb_n_entries, n - 1);
+}
+
+static int br_multicast_port_ngroups_inc(struct net_bridge_port *port,
+ const struct br_ip *group,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge_mcast_port *pmctx;
+ int err;
+
+ lockdep_assert_held_once(&port->br->multicast_lock);
+
+ /* Always count on the port context. */
+ err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack,
+ "Port");
+ if (err) {
+ trace_br_mdb_full(port->dev, group);
+ return err;
+ }
+
+ /* Only count on the VLAN context if VID is given, and if snooping on
+ * that VLAN is enabled.
+ */
+ if (!group->vid)
+ return 0;
+
+ pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid);
+ if (!pmctx)
+ return 0;
+
+ err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN");
+ if (err) {
+ trace_br_mdb_full(port->dev, group);
+ goto dec_one_out;
+ }
+
+ return 0;
+
+dec_one_out:
+ br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
+ return err;
+}
+
+static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid)
+{
+ struct net_bridge_mcast_port *pmctx;
+
+ lockdep_assert_held_once(&port->br->multicast_lock);
+
+ if (vid) {
+ pmctx = br_multicast_port_vid_to_port_ctx(port, vid);
+ if (pmctx)
+ br_multicast_port_ngroups_dec_one(pmctx);
+ }
+ br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
+}
+
+u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx)
+{
+ return READ_ONCE(pmctx->mdb_n_entries);
+}
+
+void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max)
+{
+ WRITE_ONCE(pmctx->mdb_max_entries, max);
+}
+
+u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx)
+{
+ return READ_ONCE(pmctx->mdb_max_entries);
+}
+
static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
{
struct net_bridge_port_group *pg;
@@ -702,6 +821,7 @@ void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
} else {
br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
}
+ br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid);
hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
queue_work(system_long_wq, &br->mcast_gc_work);
@@ -1165,6 +1285,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
return mp;
if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
+ trace_br_mdb_full(br->dev, group);
br_mc_disabled_update(br->dev, false, NULL);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
return ERR_PTR(-E2BIG);
@@ -1284,14 +1405,22 @@ struct net_bridge_port_group *br_multicast_new_port_group(
unsigned char flags,
const unsigned char *src,
u8 filter_mode,
- u8 rt_protocol)
+ u8 rt_protocol,
+ struct netlink_ext_ack *extack)
{
struct net_bridge_port_group *p;
+ int err;
- p = kzalloc(sizeof(*p), GFP_ATOMIC);
- if (unlikely(!p))
+ err = br_multicast_port_ngroups_inc(port, group, extack);
+ if (err)
return NULL;
+ p = kzalloc(sizeof(*p), GFP_ATOMIC);
+ if (unlikely(!p)) {
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
+ goto dec_out;
+ }
+
p->key.addr = *group;
p->key.port = port;
p->flags = flags;
@@ -1305,8 +1434,8 @@ struct net_bridge_port_group *br_multicast_new_port_group(
if (!br_multicast_is_star_g(group) &&
rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
br_sg_port_rht_params)) {
- kfree(p);
- return NULL;
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group");
+ goto free_out;
}
rcu_assign_pointer(p->next, next);
@@ -1320,6 +1449,25 @@ struct net_bridge_port_group *br_multicast_new_port_group(
eth_broadcast_addr(p->eth_addr);
return p;
+
+free_out:
+ kfree(p);
+dec_out:
+ br_multicast_port_ngroups_dec(port, group->vid);
+ return NULL;
+}
+
+void br_multicast_del_port_group(struct net_bridge_port_group *p)
+{
+ struct net_bridge_port *port = p->key.port;
+ __u16 vid = p->key.addr.vid;
+
+ hlist_del_init(&p->mglist);
+ if (!br_multicast_is_star_g(&p->key.addr))
+ rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode,
+ br_sg_port_rht_params);
+ kfree(p);
+ br_multicast_port_ngroups_dec(port, vid);
}
void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
@@ -1387,7 +1535,7 @@ __br_multicast_add_group(struct net_bridge_mcast *brmctx,
}
p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
- filter_mode, RTPROT_KERNEL);
+ filter_mode, RTPROT_KERNEL, NULL);
if (unlikely(!p)) {
p = ERR_PTR(-ENOMEM);
goto out;
@@ -1933,6 +2081,25 @@ static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
br_ip4_multicast_add_router(brmctx, pmctx);
br_ip6_multicast_add_router(brmctx, pmctx);
}
+
+ if (br_multicast_port_ctx_is_vlan(pmctx)) {
+ struct net_bridge_port_group *pg;
+ u32 n = 0;
+
+ /* The mcast_n_groups counter might be wrong. First,
+ * BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries
+ * are flushed, thus mcast_n_groups after the toggle does not
+ * reflect the true values. And second, permanent entries added
+ * while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected
+ * either. Thus we have to refresh the counter.
+ */
+
+ hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) {
+ if (pg->key.addr.vid == pmctx->vlan->vid)
+ n++;
+ }
+ WRITE_ONCE(pmctx->mdb_n_entries, n);
+ }
}
void br_multicast_enable_port(struct net_bridge_port *port)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 9554abcfd5b4..638a4d5359db 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -214,7 +214,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto csum_error;
- len = ntohs(iph->tot_len);
+ len = skb_ip_totlen(skb);
if (skb->len < len) {
__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4316cc82ae17..9173e52b89e2 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -202,6 +202,8 @@ static inline size_t br_port_info_size(void)
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
+ + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_N_GROUPS */
+ + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_MAX_GROUPS */
#endif
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_RING_OPEN */
@@ -298,7 +300,11 @@ static int br_port_fill_attrs(struct sk_buff *skb,
nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
p->multicast_eht_hosts_limit) ||
nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
- p->multicast_eht_hosts_cnt))
+ p->multicast_eht_hosts_cnt) ||
+ nla_put_u32(skb, IFLA_BRPORT_MCAST_N_GROUPS,
+ br_multicast_ngroups_get(&p->multicast_ctx)) ||
+ nla_put_u32(skb, IFLA_BRPORT_MCAST_MAX_GROUPS,
+ br_multicast_ngroups_get_max(&p->multicast_ctx)))
return -EMSGSIZE;
#endif
@@ -858,6 +864,8 @@ static int br_afspec(struct net_bridge *br,
}
static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
+ [IFLA_BRPORT_UNSPEC] = { .strict_start_type =
+ IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + 1 },
[IFLA_BRPORT_STATE] = { .type = NLA_U8 },
[IFLA_BRPORT_COST] = { .type = NLA_U32 },
[IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
@@ -881,6 +889,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_MAB] = { .type = NLA_U8 },
[IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 },
+ [IFLA_BRPORT_MCAST_N_GROUPS] = { .type = NLA_REJECT },
+ [IFLA_BRPORT_MCAST_MAX_GROUPS] = { .type = NLA_U32 },
};
/* Change the state of the port and notify spanning tree */
@@ -1015,6 +1025,13 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[],
if (err)
return err;
}
+
+ if (tb[IFLA_BRPORT_MCAST_MAX_GROUPS]) {
+ u32 max_groups;
+
+ max_groups = nla_get_u32(tb[IFLA_BRPORT_MCAST_MAX_GROUPS]);
+ br_multicast_ngroups_set_max(&p->multicast_ctx, max_groups);
+ }
#endif
if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c
index 8914290c75d4..17abf092f7ca 100644
--- a/net/bridge/br_netlink_tunnel.c
+++ b/net/bridge/br_netlink_tunnel.c
@@ -188,6 +188,9 @@ initvars:
}
static const struct nla_policy vlan_tunnel_policy[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1] = {
+ [IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC] = {
+ .strict_start_type = IFLA_BRIDGE_VLAN_TUNNEL_FLAGS + 1
+ },
[IFLA_BRIDGE_VLAN_TUNNEL_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_VLAN_TUNNEL_VID] = { .type = NLA_U16 },
[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS] = { .type = NLA_U16 },
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 15ef7fd508ee..cef5f6ea850c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -126,6 +126,8 @@ struct net_bridge_mcast_port {
struct hlist_node ip6_rlist;
#endif /* IS_ENABLED(CONFIG_IPV6) */
unsigned char multicast_router;
+ u32 mdb_n_entries;
+ u32 mdb_max_entries;
#endif /* CONFIG_BRIDGE_IGMP_SNOOPING */
};
@@ -956,7 +958,9 @@ br_multicast_new_port_group(struct net_bridge_port *port,
const struct br_ip *group,
struct net_bridge_port_group __rcu *next,
unsigned char flags, const unsigned char *src,
- u8 filter_mode, u8 rt_protocol);
+ u8 filter_mode, u8 rt_protocol,
+ struct netlink_ext_ack *extack);
+void br_multicast_del_port_group(struct net_bridge_port_group *p);
int br_mdb_hash_init(struct net_bridge *br);
void br_mdb_hash_fini(struct net_bridge *br);
void br_mdb_notify(struct net_device *dev, struct net_bridge_mdb_entry *mp,
@@ -974,6 +978,9 @@ void br_multicast_uninit_stats(struct net_bridge *br);
void br_multicast_get_stats(const struct net_bridge *br,
const struct net_bridge_port *p,
struct br_mcast_stats *dest);
+u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx);
+void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max);
+u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx);
void br_mdb_init(void);
void br_mdb_uninit(void);
void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
@@ -1757,7 +1764,8 @@ static inline u16 br_vlan_flags(const struct net_bridge_vlan *v, u16 pvid)
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *range_end);
-bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v);
+bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v,
+ const struct net_bridge_port *p);
size_t br_vlan_opts_nl_size(void);
int br_vlan_process_options(const struct net_bridge *br,
const struct net_bridge_port *p,
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 7eb6fd5bb917..de18e9c1d7a7 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -104,9 +104,8 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
return 0;
if (err) {
- if (extack && !extack->_msg)
- NL_SET_ERR_MSG_MOD(extack,
- "bridge flag offload is not supported");
+ NL_SET_ERR_MSG_WEAK_MOD(extack,
+ "bridge flag offload is not supported");
return -EOPNOTSUPP;
}
@@ -115,9 +114,8 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
err = switchdev_port_attr_set(p->dev, &attr, extack);
if (err) {
- if (extack && !extack->_msg)
- NL_SET_ERR_MSG_MOD(extack,
- "error setting offload flag on port");
+ NL_SET_ERR_MSG_WEAK_MOD(extack,
+ "error setting offload flag on port");
return err;
}
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index bc75fa1e4666..8a3dbc09ba38 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1816,6 +1816,7 @@ out_err:
/* v_opts is used to dump the options which must be equal in the whole range */
static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
const struct net_bridge_vlan *v_opts,
+ const struct net_bridge_port *p,
u16 flags,
bool dump_stats)
{
@@ -1842,7 +1843,7 @@ static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
goto out_err;
if (v_opts) {
- if (!br_vlan_opts_fill(skb, v_opts))
+ if (!br_vlan_opts_fill(skb, v_opts, p))
goto out_err;
if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
@@ -1925,7 +1926,7 @@ void br_vlan_notify(const struct net_bridge *br,
goto out_kfree;
}
- if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
+ if (!br_vlan_fill_vids(skb, vid, vid_range, v, p, flags, false))
goto out_err;
nlmsg_end(skb, nlh);
@@ -2030,7 +2031,7 @@ static int br_vlan_dump_dev(const struct net_device *dev,
if (!br_vlan_fill_vids(skb, range_start->vid,
range_end->vid, range_start,
- vlan_flags, dump_stats)) {
+ p, vlan_flags, dump_stats)) {
err = -EMSGSIZE;
break;
}
@@ -2056,7 +2057,7 @@ update_end:
else if (!dump_global &&
!br_vlan_fill_vids(skb, range_start->vid,
range_end->vid, range_start,
- br_vlan_flags(range_start, pvid),
+ p, br_vlan_flags(range_start, pvid),
dump_stats))
err = -EMSGSIZE;
}
@@ -2131,6 +2132,8 @@ static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] =
[BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER] = { .type = NLA_U8 },
+ [BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS] = { .type = NLA_REJECT },
+ [BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS] = { .type = NLA_U32 },
};
static int br_vlan_rtm_process_one(struct net_device *dev,
diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c
index a2724d03278c..e378c2f3a9e2 100644
--- a/net/bridge/br_vlan_options.c
+++ b/net/bridge/br_vlan_options.c
@@ -48,7 +48,8 @@ bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr,
curr_mc_rtr == range_mc_rtr;
}
-bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v)
+bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v,
+ const struct net_bridge_port *p)
{
if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state(v)) ||
!__vlan_tun_put(skb, v))
@@ -58,6 +59,12 @@ bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v)
if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
br_vlan_multicast_router(v)))
return false;
+ if (p && !br_multicast_port_ctx_vlan_disabled(&v->port_mcast_ctx) &&
+ (nla_put_u32(skb, BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS,
+ br_multicast_ngroups_get(&v->port_mcast_ctx)) ||
+ nla_put_u32(skb, BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS,
+ br_multicast_ngroups_get_max(&v->port_mcast_ctx))))
+ return false;
#endif
return true;
@@ -70,6 +77,8 @@ size_t br_vlan_opts_nl_size(void)
+ nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_TINFO_ID */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_MCAST_ROUTER */
+ + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS */
+ + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS */
#endif
+ 0;
}
@@ -212,6 +221,22 @@ static int br_vlan_process_one_opts(const struct net_bridge *br,
return err;
*changed = true;
}
+ if (tb[BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS]) {
+ u32 val;
+
+ if (!p) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't set mcast_max_groups for non-port vlans");
+ return -EINVAL;
+ }
+ if (br_multicast_port_ctx_vlan_disabled(&v->port_mcast_ctx)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multicast snooping disabled on this VLAN");
+ return -EINVAL;
+ }
+
+ val = nla_get_u32(tb[BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS]);
+ br_multicast_ngroups_set_max(&v->port_mcast_ctx, val);
+ *changed = true;
+ }
#endif
return 0;
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index 5c5dd437f1c2..71056ee84773 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -212,7 +212,7 @@ static int nf_ct_br_ip_check(const struct sk_buff *skb)
iph->version != 4)
return -1;
- len = ntohs(iph->tot_len);
+ len = skb_ip_totlen(skb);
if (skb->len < nhoff + len ||
len < (iph->ihl * 4))
return -1;
@@ -256,7 +256,7 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return NF_ACCEPT;
- len = ntohs(ip_hdr(skb)->tot_len);
+ len = skb_ip_totlen(skb);
if (pskb_trim_rcsum(skb, len))
return NF_ACCEPT;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 78c9729a6057..4eebcc66c19a 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -533,10 +533,6 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg,
if (msg->msg_namelen)
goto err;
- ret = -EINVAL;
- if (unlikely(msg->msg_iter.nr_segs == 0) ||
- unlikely(msg->msg_iter.iov->iov_base == NULL))
- goto err;
noblock = msg->msg_flags & MSG_DONTWAIT;
timeo = sock_sndtimeo(sk, noblock);
diff --git a/net/can/gw.c b/net/can/gw.c
index 23a3d89cad81..37528826935e 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1139,6 +1139,13 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
if (gwj->dst.dev->type != ARPHRD_CAN)
goto out;
+ /* is sending the skb back to the incoming interface intended? */
+ if (gwj->src.dev == gwj->dst.dev &&
+ !(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK)) {
+ err = -EINVAL;
+ goto out;
+ }
+
ASSERT_RTNL();
err = cgw_register_filter(net, gwj);
diff --git a/net/can/isotp.c b/net/can/isotp.c
index fc81d77724a1..9bc344851704 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -1220,6 +1220,9 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (len < ISOTP_MIN_NAMELEN)
return -EINVAL;
+ if (addr->can_family != AF_CAN)
+ return -EINVAL;
+
/* sanitize tx CAN identifier */
if (tx_id & CAN_EFF_FLAG)
tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
diff --git a/net/can/raw.c b/net/can/raw.c
index ba86782ba8bb..f64469b98260 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -523,6 +523,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
struct can_filter sfilter; /* single filter */
struct net_device *dev = NULL;
can_err_mask_t err_mask = 0;
+ int fd_frames;
int count = 0;
int err = 0;
@@ -664,17 +665,17 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
break;
case CAN_RAW_FD_FRAMES:
- if (optlen != sizeof(ro->fd_frames))
+ if (optlen != sizeof(fd_frames))
return -EINVAL;
- if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
+ if (copy_from_sockptr(&fd_frames, optval, optlen))
return -EFAULT;
/* Enabling CAN XL includes CAN FD */
- if (ro->xl_frames && !ro->fd_frames) {
- ro->fd_frames = ro->xl_frames;
+ if (ro->xl_frames && !fd_frames)
return -EINVAL;
- }
+
+ ro->fd_frames = fd_frames;
break;
case CAN_RAW_XL_FRAMES:
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1d06e114ba3f..cd7b0bf5369e 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -17,6 +17,7 @@
#endif /* CONFIG_BLOCK */
#include <linux/dns_resolver.h>
#include <net/tcp.h>
+#include <trace/events/sock.h>
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/libceph.h>
@@ -344,6 +345,9 @@ static void con_sock_state_closed(struct ceph_connection *con)
static void ceph_sock_data_ready(struct sock *sk)
{
struct ceph_connection *con = sk->sk_user_data;
+
+ trace_sk_data_ready(sk);
+
if (atomic_read(&con->msgr->stopping)) {
return;
}
diff --git a/net/core/Makefile b/net/core/Makefile
index 5857cec87b83..8f367813bc68 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -12,7 +12,8 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
- fib_notifier.o xdp.o flow_offload.o gro.o
+ fib_notifier.o xdp.o flow_offload.o gro.o \
+ netdev-genl.o netdev-genl-gen.o
obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
@@ -33,7 +34,6 @@ obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o
obj-$(CONFIG_DST_CACHE) += dst_cache.o
obj-$(CONFIG_HWBM) += hwbm.o
-obj-$(CONFIG_NET_DEVLINK) += devlink.o
obj-$(CONFIG_GRO_CELLS) += gro_cells.o
obj-$(CONFIG_FAILOVER) += failover.o
obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
diff --git a/net/core/dev.c b/net/core/dev.c
index f23e287602b7..18dc8d75ead9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1614,6 +1614,7 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd)
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
+ N(XDP_FEAT_CHANGE)
}
#undef N
return "UNKNOWN_NETDEV_EVENT";
@@ -1840,7 +1841,7 @@ EXPORT_SYMBOL(register_netdevice_notifier_net);
* @nb: notifier
*
* Unregister a notifier previously registered by
- * register_netdevice_notifier(). The notifier is unlinked into the
+ * register_netdevice_notifier_net(). The notifier is unlinked from the
* kernel structures and may then be reused. A negative errno code
* is returned on a failure.
*
@@ -2993,6 +2994,8 @@ void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
dev->tso_max_size = min(GSO_MAX_SIZE, size);
if (size < READ_ONCE(dev->gso_max_size))
netif_set_gso_max_size(dev, size);
+ if (size < READ_ONCE(dev->gso_ipv4_max_size))
+ netif_set_gso_ipv4_max_size(dev, size);
}
EXPORT_SYMBOL(netif_set_tso_max_size);
@@ -5016,7 +5019,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
WARN_ON(refcount_read(&skb->users));
if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
- trace_consume_skb(skb);
+ trace_consume_skb(skb, net_tx_action);
else
trace_kfree_skb(skb, net_tx_action,
SKB_DROP_REASON_NOT_SPECIFIED);
@@ -6608,17 +6611,16 @@ static int napi_threaded_poll(void *data)
static void skb_defer_free_flush(struct softnet_data *sd)
{
struct sk_buff *skb, *next;
- unsigned long flags;
/* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
if (!READ_ONCE(sd->defer_list))
return;
- spin_lock_irqsave(&sd->defer_lock, flags);
+ spin_lock_irq(&sd->defer_lock);
skb = sd->defer_list;
sd->defer_list = NULL;
sd->defer_count = 0;
- spin_unlock_irqrestore(&sd->defer_lock, flags);
+ spin_unlock_irq(&sd->defer_lock);
while (skb != NULL) {
next = skb->next;
@@ -8311,9 +8313,8 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
}
}
if (dev->flags != old_flags) {
- pr_info("device %s %s promiscuous mode\n",
- dev->name,
- dev->flags & IFF_PROMISC ? "entered" : "left");
+ netdev_info(dev, "%s promiscuous mode\n",
+ dev->flags & IFF_PROMISC ? "entered" : "left");
if (audit_enabled) {
current_uid_gid(&uid, &gid);
audit_log(audit_context(), GFP_ATOMIC,
@@ -8381,6 +8382,8 @@ static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
}
}
if (dev->flags ^ old_flags) {
+ netdev_info(dev, "%s allmulticast mode\n",
+ dev->flags & IFF_ALLMULTI ? "entered" : "left");
dev_change_rx_flags(dev, IFF_ALLMULTI);
dev_set_rx_mode(dev);
if (notify)
@@ -9216,8 +9219,12 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
return -EEXIST;
}
- if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) {
- NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported");
+ if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
+ NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
+ return -EINVAL;
+ }
+ if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
+ NL_SET_ERR_MSG(extack, "Program bound to different device");
return -EINVAL;
}
if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
@@ -10603,6 +10610,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
dev->gso_max_segs = GSO_MAX_SEGS;
dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
+ dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
+ dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
dev->tso_max_segs = TSO_MAX_SEGS;
dev->upper_level = 1;
@@ -10822,6 +10831,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
dev_shutdown(dev);
dev_xdp_uninstall(dev);
+ bpf_dev_bound_netdev_unregister(dev);
netdev_offload_xstats_disable_all(dev);
diff --git a/net/core/dev.h b/net/core/dev.h
index 814ed5b7b960..e075e198092c 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -9,6 +9,7 @@ struct net_device;
struct netdev_bpf;
struct netdev_phys_item_id;
struct netlink_ext_ack;
+struct cpumask;
/* Random bits of netdevice that don't need to be exposed */
#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
@@ -100,6 +101,8 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
{
/* dev->gso_max_size is read locklessly from sk_setup_caps() */
WRITE_ONCE(dev->gso_max_size, size);
+ if (size <= GSO_LEGACY_MAX_SIZE)
+ WRITE_ONCE(dev->gso_ipv4_max_size, size);
}
static inline void netif_set_gso_max_segs(struct net_device *dev,
@@ -114,6 +117,23 @@ static inline void netif_set_gro_max_size(struct net_device *dev,
{
/* This pairs with the READ_ONCE() in skb_gro_receive() */
WRITE_ONCE(dev->gro_max_size, size);
+ if (size <= GRO_LEGACY_MAX_SIZE)
+ WRITE_ONCE(dev->gro_ipv4_max_size, size);
}
+static inline void netif_set_gso_ipv4_max_size(struct net_device *dev,
+ unsigned int size)
+{
+ /* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */
+ WRITE_ONCE(dev->gso_ipv4_max_size, size);
+}
+
+static inline void netif_set_gro_ipv4_max_size(struct net_device *dev,
+ unsigned int size)
+{
+ /* This pairs with the READ_ONCE() in skb_gro_receive() */
+ WRITE_ONCE(dev->gro_ipv4_max_size, size);
+}
+
+int rps_cpumask_housekeeping(struct cpumask *mask);
#endif
diff --git a/net/core/dst.c b/net/core/dst.c
index 6d2dd03dafa8..31c08a3386d3 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -82,12 +82,8 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
if (ops->gc &&
!(flags & DST_NOCOUNT) &&
- dst_entries_get_fast(ops) > ops->gc_thresh) {
- if (ops->gc(ops)) {
- pr_notice_ratelimited("Route cache is full: consider increasing sysctl net.ipv6.route.max_size.\n");
- return NULL;
- }
- }
+ dst_entries_get_fast(ops) > ops->gc_thresh)
+ ops->gc(ops);
dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
if (!dst)
diff --git a/net/core/filter.c b/net/core/filter.c
index 43cc1fe58a2c..1d6f165923bf 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3381,13 +3381,17 @@ static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
+#define BPF_F_ADJ_ROOM_DECAP_L3_MASK (BPF_F_ADJ_ROOM_DECAP_L3_IPV4 | \
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV6)
+
#define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \
BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
BPF_F_ADJ_ROOM_ENCAP_L2_ETH | \
BPF_F_ADJ_ROOM_ENCAP_L2( \
- BPF_ADJ_ROOM_ENCAP_L2_MASK))
+ BPF_ADJ_ROOM_ENCAP_L2_MASK) | \
+ BPF_F_ADJ_ROOM_DECAP_L3_MASK)
static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
u64 flags)
@@ -3501,6 +3505,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
int ret;
if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO |
+ BPF_F_ADJ_ROOM_DECAP_L3_MASK |
BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
return -EINVAL;
@@ -3519,6 +3524,14 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
if (unlikely(ret < 0))
return ret;
+ /* Match skb->protocol to new outer l3 protocol */
+ if (skb->protocol == htons(ETH_P_IP) &&
+ flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV6)
+ skb->protocol = htons(ETH_P_IPV6);
+ else if (skb->protocol == htons(ETH_P_IPV6) &&
+ flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV4)
+ skb->protocol = htons(ETH_P_IP);
+
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -3608,6 +3621,22 @@ BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
return -ENOTSUPP;
}
+ if (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) {
+ if (!shrink)
+ return -EINVAL;
+
+ switch (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) {
+ case BPF_F_ADJ_ROOM_DECAP_L3_IPV4:
+ len_min = sizeof(struct iphdr);
+ break;
+ case BPF_F_ADJ_ROOM_DECAP_L3_IPV6:
+ len_min = sizeof(struct ipv6hdr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
len_cur = skb->len - skb_network_offset(skb);
if ((shrink && (len_diff_abs >= len_cur ||
len_cur - len_diff_abs < len_min)) ||
@@ -4128,9 +4157,13 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
* bpf_redirect_info to actually enqueue the frame into a map type-specific
* bulk queue structure.
*
- * 3. Before exiting its NAPI poll loop, the driver will call xdp_do_flush(),
- * which will flush all the different bulk queues, thus completing the
- * redirect.
+ * 3. Before exiting its NAPI poll loop, the driver will call
+ * xdp_do_flush(), which will flush all the different bulk queues,
+ * thus completing the redirect. Note that xdp_do_flush() must be
+ * called before napi_complete_done() in the driver, as the
+ * XDP_REDIRECT logic relies on being inside a single NAPI instance
+ * through to the xdp_do_flush() call for RCU protection of all
+ * in-kernel data structures.
*/
/*
* Pointers to the map entries will be kept around for this whole sequence of
@@ -4285,16 +4318,13 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
enum bpf_map_type map_type = ri->map_type;
- /* XDP_REDIRECT is not fully supported yet for xdp frags since
- * not all XDP capable drivers can map non-linear xdp_frame in
- * ndo_xdp_xmit.
- */
- if (unlikely(xdp_buff_has_frags(xdp) &&
- map_type != BPF_MAP_TYPE_CPUMAP))
- return -EOPNOTSUPP;
+ if (map_type == BPF_MAP_TYPE_XSKMAP) {
+ /* XDP_REDIRECT is not supported AF_XDP yet. */
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ return -EOPNOTSUPP;
- if (map_type == BPF_MAP_TYPE_XSKMAP)
return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
+ }
return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp),
xdp_prog);
@@ -4618,7 +4648,8 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
struct ip_tunnel_info *info;
if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
- BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
+ BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER |
+ BPF_F_NO_TUNNEL_KEY)))
return -EINVAL;
if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
switch (size) {
@@ -4656,6 +4687,8 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
info->key.tun_flags &= ~TUNNEL_CSUM;
if (flags & BPF_F_SEQ_NUMBER)
info->key.tun_flags |= TUNNEL_SEQ;
+ if (flags & BPF_F_NO_TUNNEL_KEY)
+ info->key.tun_flags &= ~TUNNEL_KEY;
info->key.tun_id = cpu_to_be64(from->tunnel_id);
info->key.tos = from->tunnel_tos;
@@ -5172,7 +5205,7 @@ static int sol_tcp_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
{
- if (sk->sk_prot->setsockopt != tcp_setsockopt)
+ if (sk->sk_protocol != IPPROTO_TCP)
return -EINVAL;
switch (optname) {
@@ -5689,12 +5722,8 @@ static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
#endif
#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
-static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
- const struct neighbour *neigh,
- const struct net_device *dev, u32 mtu)
+static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, u32 mtu)
{
- memcpy(params->dmac, neigh->ha, ETH_ALEN);
- memcpy(params->smac, dev->dev_addr, ETH_ALEN);
params->h_vlan_TCI = 0;
params->h_vlan_proto = 0;
if (mtu)
@@ -5805,21 +5834,29 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
if (likely(nhc->nhc_gw_family != AF_INET6)) {
if (nhc->nhc_gw_family)
params->ipv4_dst = nhc->nhc_gw.ipv4;
-
- neigh = __ipv4_neigh_lookup_noref(dev,
- (__force u32)params->ipv4_dst);
} else {
struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
params->family = AF_INET6;
*dst = nhc->nhc_gw.ipv6;
- neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
}
- if (!neigh)
+ if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
+ goto set_fwd_params;
+
+ if (likely(nhc->nhc_gw_family != AF_INET6))
+ neigh = __ipv4_neigh_lookup_noref(dev,
+ (__force u32)params->ipv4_dst);
+ else
+ neigh = __ipv6_neigh_lookup_noref_stub(dev, params->ipv6_dst);
+
+ if (!neigh || !(neigh->nud_state & NUD_VALID))
return BPF_FIB_LKUP_RET_NO_NEIGH;
+ memcpy(params->dmac, neigh->ha, ETH_ALEN);
+ memcpy(params->smac, dev->dev_addr, ETH_ALEN);
- return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
+set_fwd_params:
+ return bpf_fib_set_fwd_params(params, mtu);
}
#endif
@@ -5927,24 +5964,33 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
params->rt_metric = res.f6i->fib6_metric;
params->ifindex = dev->ifindex;
+ if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
+ goto set_fwd_params;
+
/* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
* not needed here.
*/
neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
- if (!neigh)
+ if (!neigh || !(neigh->nud_state & NUD_VALID))
return BPF_FIB_LKUP_RET_NO_NEIGH;
+ memcpy(params->dmac, neigh->ha, ETH_ALEN);
+ memcpy(params->smac, dev->dev_addr, ETH_ALEN);
- return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
+set_fwd_params:
+ return bpf_fib_set_fwd_params(params, mtu);
}
#endif
+#define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \
+ BPF_FIB_LOOKUP_SKIP_NEIGH)
+
BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
struct bpf_fib_lookup *, params, int, plen, u32, flags)
{
if (plen < sizeof(*params))
return -EINVAL;
- if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
+ if (flags & ~BPF_FIB_LOOKUP_MASK)
return -EINVAL;
switch (params->family) {
@@ -5982,7 +6028,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
if (plen < sizeof(*params))
return -EINVAL;
- if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
+ if (flags & ~BPF_FIB_LOOKUP_MASK)
return -EINVAL;
if (params->tot_len)
@@ -6844,9 +6890,6 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
FIELD)); \
} while (0)
- if (insn > insn_buf)
- return insn - insn_buf;
-
switch (si->off) {
case offsetof(struct bpf_tcp_sock, rtt_min):
BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
@@ -7503,7 +7546,7 @@ static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv4_proto = {
.arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
.arg1_size = sizeof(struct iphdr),
.arg2_type = ARG_PTR_TO_MEM,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv6, struct ipv6hdr *, iph,
@@ -7535,7 +7578,7 @@ static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv6_proto = {
.arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
.arg1_size = sizeof(struct ipv6hdr),
.arg2_type = ARG_PTR_TO_MEM,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv4, struct iphdr *, iph,
@@ -8731,7 +8774,7 @@ static bool xdp_is_valid_access(int off, int size,
}
if (type == BPF_WRITE) {
- if (bpf_prog_is_dev_bound(prog->aux)) {
+ if (bpf_prog_is_offloaded(prog->aux)) {
switch (off) {
case offsetof(struct xdp_md, rx_queue_index):
return __is_valid_xdp_access(off, size);
@@ -10144,9 +10187,6 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
} while (0)
- if (insn > insn_buf)
- return insn - insn_buf;
-
switch (si->off) {
case offsetof(struct bpf_sock_ops, op):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
diff --git a/net/core/gro.c b/net/core/gro.c
index 4bac7ea6e025..a606705a0859 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -171,16 +171,18 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
if (p->pp_recycle != skb->pp_recycle)
return -ETOOMANYREFS;
- /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
- gro_max_size = READ_ONCE(p->dev->gro_max_size);
+ /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
+ gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
+ READ_ONCE(p->dev->gro_max_size) :
+ READ_ONCE(p->dev->gro_ipv4_max_size);
if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
return -E2BIG;
if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
- if (p->protocol != htons(ETH_P_IPV6) ||
- skb_headroom(p) < sizeof(struct hop_jumbo_hdr) ||
- ipv6_hdr(p)->nexthdr != IPPROTO_TCP ||
+ if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
+ (p->protocol == htons(ETH_P_IPV6) &&
+ skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
p->encapsulation)
return -E2BIG;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 4edd2176e238..6798f6d2423b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1674,11 +1674,21 @@ static void neigh_proxy_process(struct timer_list *t)
spin_unlock(&tbl->proxy_queue.lock);
}
+static unsigned long neigh_proxy_delay(struct neigh_parms *p)
+{
+ /* If proxy_delay is zero, do not call get_random_u32_below()
+ * as it is undefined behavior.
+ */
+ unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY);
+
+ return proxy_delay ?
+ jiffies + get_random_u32_below(proxy_delay) : jiffies;
+}
+
void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb)
{
- unsigned long sched_next = jiffies +
- get_random_u32_below(NEIGH_VAR(p, PROXY_DELAY));
+ unsigned long sched_next = neigh_proxy_delay(p);
if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
kfree_skb(skb);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index ca55dd747d6c..15e3f4606b5f 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -831,42 +831,18 @@ static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
return len < PAGE_SIZE ? len : -EINVAL;
}
-static ssize_t store_rps_map(struct netdev_rx_queue *queue,
- const char *buf, size_t len)
+static int netdev_rx_queue_set_rps_mask(struct netdev_rx_queue *queue,
+ cpumask_var_t mask)
{
- struct rps_map *old_map, *map;
- cpumask_var_t mask;
- int err, cpu, i;
static DEFINE_MUTEX(rps_map_mutex);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
-
- err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
- if (err) {
- free_cpumask_var(mask);
- return err;
- }
-
- if (!cpumask_empty(mask)) {
- cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_DOMAIN));
- cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_WQ));
- if (cpumask_empty(mask)) {
- free_cpumask_var(mask);
- return -EINVAL;
- }
- }
+ struct rps_map *old_map, *map;
+ int cpu, i;
map = kzalloc(max_t(unsigned int,
RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
GFP_KERNEL);
- if (!map) {
- free_cpumask_var(mask);
+ if (!map)
return -ENOMEM;
- }
i = 0;
for_each_cpu_and(cpu, mask, cpu_online_mask)
@@ -893,9 +869,45 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
if (old_map)
kfree_rcu(old_map, rcu);
+ return 0;
+}
+
+int rps_cpumask_housekeeping(struct cpumask *mask)
+{
+ if (!cpumask_empty(mask)) {
+ cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_DOMAIN));
+ cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_WQ));
+ if (cpumask_empty(mask))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t store_rps_map(struct netdev_rx_queue *queue,
+ const char *buf, size_t len)
+{
+ cpumask_var_t mask;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err)
+ goto out;
+
+ err = rps_cpumask_housekeeping(mask);
+ if (err)
+ goto out;
+ err = netdev_rx_queue_set_rps_mask(queue, mask);
+
+out:
free_cpumask_var(mask);
- return len;
+ return err ? : len;
}
static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
@@ -1040,7 +1052,7 @@ static void rx_queue_get_ownership(const struct kobject *kobj,
net_ns_get_ownership(net, uid, gid);
}
-static struct kobj_type rx_queue_ktype __ro_after_init = {
+static const struct kobj_type rx_queue_ktype = {
.sysfs_ops = &rx_queue_sysfs_ops,
.release = rx_queue_release,
.default_groups = rx_queue_default_groups,
@@ -1048,6 +1060,18 @@ static struct kobj_type rx_queue_ktype __ro_after_init = {
.get_ownership = rx_queue_get_ownership,
};
+static int rx_queue_default_mask(struct net_device *dev,
+ struct netdev_rx_queue *queue)
+{
+#if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL)
+ struct cpumask *rps_default_mask = READ_ONCE(dev_net(dev)->core.rps_default_mask);
+
+ if (rps_default_mask && !cpumask_empty(rps_default_mask))
+ return netdev_rx_queue_set_rps_mask(queue, rps_default_mask);
+#endif
+ return 0;
+}
+
static int rx_queue_add_kobject(struct net_device *dev, int index)
{
struct netdev_rx_queue *queue = dev->_rx + index;
@@ -1071,6 +1095,10 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
goto err;
}
+ error = rx_queue_default_mask(dev, queue);
+ if (error)
+ goto err;
+
kobject_uevent(kobj, KOBJ_ADD);
return error;
@@ -1643,7 +1671,7 @@ static void netdev_queue_get_ownership(const struct kobject *kobj,
net_ns_get_ownership(net, uid, gid);
}
-static struct kobj_type netdev_queue_ktype __ro_after_init = {
+static const struct kobj_type netdev_queue_ktype = {
.sysfs_ops = &netdev_queue_sysfs_ops,
.release = netdev_queue_release,
.default_groups = netdev_queue_default_groups,
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index c40cd8dd75c7..805b7385dd8d 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -41,6 +41,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_external_learn_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(fdb_delete);
EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(br_mdb_full);
#endif
#if IS_ENABLED(CONFIG_PAGE_POOL)
@@ -61,3 +62,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_send_reset);
EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_bad_csum);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(sk_data_ready);
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
new file mode 100644
index 000000000000..48812ec843f5
--- /dev/null
+++ b/net/core/netdev-genl-gen.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netdev-genl-gen.h"
+
+#include <linux/netdev.h>
+
+/* NETDEV_CMD_DEV_GET - do */
+static const struct nla_policy netdev_dev_get_nl_policy[NETDEV_A_DEV_IFINDEX + 1] = {
+ [NETDEV_A_DEV_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+/* Ops table for netdev */
+static const struct genl_split_ops netdev_nl_ops[2] = {
+ {
+ .cmd = NETDEV_CMD_DEV_GET,
+ .doit = netdev_nl_dev_get_doit,
+ .policy = netdev_dev_get_nl_policy,
+ .maxattr = NETDEV_A_DEV_IFINDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NETDEV_CMD_DEV_GET,
+ .dumpit = netdev_nl_dev_get_dumpit,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+};
+
+static const struct genl_multicast_group netdev_nl_mcgrps[] = {
+ [NETDEV_NLGRP_MGMT] = { "mgmt", },
+};
+
+struct genl_family netdev_nl_family __ro_after_init = {
+ .name = NETDEV_FAMILY_NAME,
+ .version = NETDEV_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = netdev_nl_ops,
+ .n_split_ops = ARRAY_SIZE(netdev_nl_ops),
+ .mcgrps = netdev_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(netdev_nl_mcgrps),
+};
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
new file mode 100644
index 000000000000..b16dc7e026bb
--- /dev/null
+++ b/net/core/netdev-genl-gen.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_NETDEV_GEN_H
+#define _LINUX_NETDEV_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <linux/netdev.h>
+
+int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info);
+int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+
+enum {
+ NETDEV_NLGRP_MGMT,
+};
+
+extern struct genl_family netdev_nl_family;
+
+#endif /* _LINUX_NETDEV_GEN_H */
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
new file mode 100644
index 000000000000..a4270fafdf11
--- /dev/null
+++ b/net/core/netdev-genl.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+#include "netdev-genl-gen.h"
+
+static int
+netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
+ u32 portid, u32 seq, int flags, u32 cmd)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(rsp, portid, seq, &netdev_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
+ nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
+ netdev->xdp_features, NETDEV_A_DEV_PAD)) {
+ genlmsg_cancel(rsp, hdr);
+ return -EINVAL;
+ }
+
+ genlmsg_end(rsp, hdr);
+
+ return 0;
+}
+
+static void
+netdev_genl_dev_notify(struct net_device *netdev, int cmd)
+{
+ struct sk_buff *ntf;
+
+ if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
+ NETDEV_NLGRP_MGMT))
+ return;
+
+ ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!ntf)
+ return;
+
+ if (netdev_nl_dev_fill(netdev, ntf, 0, 0, 0, cmd)) {
+ nlmsg_free(ntf);
+ return;
+ }
+
+ genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf,
+ 0, NETDEV_NLGRP_MGMT, GFP_KERNEL);
+}
+
+int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *netdev;
+ struct sk_buff *rsp;
+ u32 ifindex;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
+ return -EINVAL;
+
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ rtnl_lock();
+
+ netdev = __dev_get_by_index(genl_info_net(info), ifindex);
+ if (netdev)
+ err = netdev_nl_dev_fill(netdev, rsp, info->snd_portid,
+ info->snd_seq, 0, info->genlhdr->cmd);
+ else
+ err = -ENODEV;
+
+ rtnl_unlock();
+
+ if (err)
+ goto err_free_msg;
+
+ return genlmsg_reply(rsp, info);
+
+err_free_msg:
+ nlmsg_free(rsp);
+ return err;
+}
+
+int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct net_device *netdev;
+ int idx = 0, s_idx;
+ int h, s_h;
+ int err;
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+
+ rtnl_lock();
+
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ struct hlist_head *head;
+
+ idx = 0;
+ head = &net->dev_index_head[h];
+ hlist_for_each_entry(netdev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+ err = netdev_nl_dev_fill(netdev, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, 0,
+ NETDEV_CMD_DEV_GET);
+ if (err < 0)
+ break;
+cont:
+ idx++;
+ }
+ }
+
+ rtnl_unlock();
+
+ if (err != -EMSGSIZE)
+ return err;
+
+ cb->args[1] = idx;
+ cb->args[0] = h;
+ cb->seq = net->dev_base_seq;
+
+ return skb->len;
+}
+
+static int netdev_genl_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF);
+ break;
+ case NETDEV_UNREGISTER:
+ netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF);
+ break;
+ case NETDEV_XDP_FEAT_CHANGE:
+ netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block netdev_genl_nb = {
+ .notifier_call = netdev_genl_netdevice_event,
+};
+
+static int __init netdev_genl_init(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&netdev_genl_nb);
+ if (err)
+ return err;
+
+ err = genl_register_family(&netdev_nl_family);
+ if (err)
+ goto err_unreg_ntf;
+
+ return 0;
+
+err_unreg_ntf:
+ unregister_netdevice_notifier(&netdev_genl_nb);
+ return err;
+}
+
+subsys_initcall(netdev_genl_init);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9be762e1d042..a089b704b986 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -682,7 +682,7 @@ int netpoll_setup(struct netpoll *np)
}
if (!netif_running(ndev)) {
- unsigned long atmost, atleast;
+ unsigned long atmost;
np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
@@ -694,7 +694,6 @@ int netpoll_setup(struct netpoll *np)
}
rtnl_unlock();
- atleast = jiffies + HZ/10;
atmost = jiffies + carrier_timeout * HZ;
while (!netif_carrier_ok(ndev)) {
if (time_after(jiffies, atmost)) {
@@ -704,15 +703,6 @@ int netpoll_setup(struct netpoll *np)
msleep(1);
}
- /* If carrier appears to come up instantly, we don't
- * trust it and pause so that we don't pump all our
- * queued console messages into the bitbucket.
- */
-
- if (time_before(jiffies, atleast)) {
- np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
- msleep(4000);
- }
rtnl_lock();
}
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 9b203d8660e4..193c18799865 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -511,8 +511,8 @@ static void page_pool_return_page(struct page_pool *pool, struct page *page)
static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
{
int ret;
- /* BH protection not needed if current is serving softirq */
- if (in_serving_softirq())
+ /* BH protection not needed if current is softirq */
+ if (in_softirq())
ret = ptr_ring_produce(&pool->ring, page);
else
ret = ptr_ring_produce_bh(&pool->ring, page);
@@ -570,7 +570,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
page_pool_dma_sync_for_device(pool, page,
dma_sync_size);
- if (allow_direct && in_serving_softirq() &&
+ if (allow_direct && in_softirq() &&
page_pool_recycle_in_cache(page, pool))
return NULL;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 64289bc98887..5d8eb57867a9 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -58,7 +58,7 @@
#include "dev.h"
#define RTNL_MAX_TYPE 50
-#define RTNL_SLAVE_MAX_TYPE 40
+#define RTNL_SLAVE_MAX_TYPE 42
struct rtnl_link {
rtnl_doit_func doit;
@@ -1074,6 +1074,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
+ nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
+ nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
+ + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
+ + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
+ nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
+ nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
+ nla_total_size(1) /* IFLA_OPERSTATE */
@@ -1807,6 +1809,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
+ nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) ||
+ nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) ||
nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
#ifdef CONFIG_RPS
@@ -1968,6 +1972,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
[IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
[IFLA_ALLMULTI] = { .type = NLA_REJECT },
+ [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
+ [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -2883,6 +2889,29 @@ static int do_setlink(const struct sk_buff *skb,
}
}
+ if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
+ u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
+
+ if (max_size > dev->tso_max_size) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ if (dev->gso_ipv4_max_size ^ max_size) {
+ netif_set_gso_ipv4_max_size(dev, max_size);
+ status |= DO_SETLINK_MODIFIED;
+ }
+ }
+
+ if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
+ u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
+
+ if (dev->gro_ipv4_max_size ^ gro_max_size) {
+ netif_set_gro_ipv4_max_size(dev, gro_max_size);
+ status |= DO_SETLINK_MODIFIED;
+ }
+ }
+
if (tb[IFLA_OPERSTATE])
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
@@ -3325,6 +3354,10 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
if (tb[IFLA_GRO_MAX_SIZE])
netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
+ if (tb[IFLA_GSO_IPV4_MAX_SIZE])
+ netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
+ if (tb[IFLA_GRO_IPV4_MAX_SIZE])
+ netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
return dev;
}
diff --git a/net/core/scm.c b/net/core/scm.c
index 5c356f0dee30..acb7d776fa6e 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -229,6 +229,8 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
if (msg->msg_control_is_user) {
struct cmsghdr __user *cm = msg->msg_control_user;
+ check_object_size(data, cmlen - sizeof(*cm), true);
+
if (!user_write_access_begin(cm, cmlen))
goto efault;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a31ff4d83ecc..eb7d33b41e71 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -79,15 +79,44 @@
#include <linux/capability.h>
#include <linux/user_namespace.h>
#include <linux/indirect_call_wrapper.h>
+#include <linux/textsearch.h>
#include "dev.h"
#include "sock_destructor.h"
-struct kmem_cache *skbuff_head_cache __ro_after_init;
+struct kmem_cache *skbuff_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#endif
+
+/* skb_small_head_cache and related code is only supported
+ * for CONFIG_SLAB and CONFIG_SLUB.
+ * As soon as SLOB is removed from the kernel, we can clean up this.
+ */
+#if !defined(CONFIG_SLOB)
+# define HAVE_SKB_SMALL_HEAD_CACHE 1
+#endif
+
+#ifdef HAVE_SKB_SMALL_HEAD_CACHE
+static struct kmem_cache *skb_small_head_cache __ro_after_init;
+
+#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
+
+/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
+ * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
+ * size, and we can differentiate heads from skb_small_head_cache
+ * vs system slabs by looking at their size (skb_end_offset()).
+ */
+#define SKB_SMALL_HEAD_CACHE_SIZE \
+ (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \
+ (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \
+ SKB_SMALL_HEAD_SIZE)
+
+#define SKB_SMALL_HEAD_HEADROOM \
+ SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
+#endif /* HAVE_SKB_SMALL_HEAD_CACHE */
+
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
@@ -256,7 +285,7 @@ static struct sk_buff *napi_skb_cache_get(void)
struct sk_buff *skb;
if (unlikely(!nc->skb_count)) {
- nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
+ nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
GFP_ATOMIC,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
@@ -265,7 +294,7 @@ static struct sk_buff *napi_skb_cache_get(void)
}
skb = nc->skb_cache[--nc->skb_count];
- kasan_unpoison_object_data(skbuff_head_cache, skb);
+ kasan_unpoison_object_data(skbuff_cache, skb);
return skb;
}
@@ -323,7 +352,7 @@ struct sk_buff *slab_build_skb(void *data)
struct sk_buff *skb;
unsigned int size;
- skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -374,7 +403,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -386,8 +415,6 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
/* build_skb() is wrapper over __build_skb(), that specifically
* takes care of skb->head and skb->pfmemalloc
- * This means that if @frag_size is not zero, then @data must be backed
- * by a page fragment, not kmalloc() or vmalloc()
*/
struct sk_buff *build_skb(void *data, unsigned int frag_size)
{
@@ -406,7 +433,7 @@ EXPORT_SYMBOL(build_skb);
* build_skb_around - build a network buffer around provided skb
* @skb: sk_buff provide by caller, must be memset cleared
* @data: data buffer provided by caller
- * @frag_size: size of data, or 0 if head was kmalloced
+ * @frag_size: size of data
*/
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size)
@@ -428,7 +455,7 @@ EXPORT_SYMBOL(build_skb_around);
/**
* __napi_build_skb - build a network buffer
* @data: data buffer provided by caller
- * @frag_size: size of data, or 0 if head was kmalloced
+ * @frag_size: size of data
*
* Version of __build_skb() that uses NAPI percpu caches to obtain
* skbuff_head instead of inplace allocation.
@@ -452,7 +479,7 @@ static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
/**
* napi_build_skb - build a network buffer
* @data: data buffer provided by caller
- * @frag_size: size of data, or 0 if head was kmalloced
+ * @frag_size: size of data
*
* Version of __napi_build_skb() that takes care of skb->head_frag
* and skb->pfmemalloc when the data is a page or page fragment.
@@ -479,17 +506,37 @@ EXPORT_SYMBOL(napi_build_skb);
* may be used. Otherwise, the packet data may be discarded until enough
* memory is free
*/
-static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
+static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
bool *pfmemalloc)
{
- void *obj;
bool ret_pfmemalloc = false;
+ unsigned int obj_size;
+ void *obj;
+
+ obj_size = SKB_HEAD_ALIGN(*size);
+#ifdef HAVE_SKB_SMALL_HEAD_CACHE
+ if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
+ !(flags & KMALLOC_NOT_NORMAL_BITS)) {
+ /* skb_small_head_cache has non power of two size,
+ * likely forcing SLUB to use order-3 pages.
+ * We deliberately attempt a NOMEMALLOC allocation only.
+ */
+ obj = kmem_cache_alloc_node(skb_small_head_cache,
+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
+ node);
+ if (obj) {
+ *size = SKB_SMALL_HEAD_CACHE_SIZE;
+ goto out;
+ }
+ }
+#endif
+ *size = obj_size = kmalloc_size_roundup(obj_size);
/*
* Try a regular allocation, when that fails and we're not entitled
* to the reserves, fail.
*/
- obj = kmalloc_node_track_caller(size,
+ obj = kmalloc_node_track_caller(obj_size,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
if (obj || !(gfp_pfmemalloc_allowed(flags)))
@@ -497,7 +544,7 @@ static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
- obj = kmalloc_node_track_caller(size, flags, node);
+ obj = kmalloc_node_track_caller(obj_size, flags, node);
out:
if (pfmemalloc)
@@ -534,12 +581,11 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
{
struct kmem_cache *cache;
struct sk_buff *skb;
- unsigned int osize;
bool pfmemalloc;
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
- ? skbuff_fclone_cache : skbuff_head_cache;
+ ? skbuff_fclone_cache : skbuff_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
@@ -559,18 +605,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
* Both skb->head and skb_shared_info are cache line aligned.
*/
- size = SKB_DATA_ALIGN(size);
- size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- osize = kmalloc_size_roundup(size);
- data = kmalloc_reserve(osize, gfp_mask, node, &pfmemalloc);
+ data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
if (unlikely(!data))
goto nodata;
/* kmalloc_size_roundup() might give us more room than requested.
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
- size = SKB_WITH_OVERHEAD(osize);
- prefetchw(data + size);
+ prefetchw(data + SKB_WITH_OVERHEAD(size));
/*
* Only clear those fields we need to clear, not those that we will
@@ -578,7 +620,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- __build_skb_around(skb, data, osize);
+ __build_skb_around(skb, data, size);
skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) {
@@ -633,8 +675,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
goto skb_success;
}
- len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- len = SKB_DATA_ALIGN(len);
+ len = SKB_HEAD_ALIGN(len);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
@@ -733,8 +774,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
} else {
- len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- len = SKB_DATA_ALIGN(len);
+ len = SKB_HEAD_ALIGN(len);
data = page_frag_alloc(&nc->page, len, gfp_mask);
pfmemalloc = nc->page.pfmemalloc;
@@ -810,6 +850,16 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data)
return page_pool_return_skb_page(virt_to_page(data));
}
+static void skb_kfree_head(void *head, unsigned int end_offset)
+{
+#ifdef HAVE_SKB_SMALL_HEAD_CACHE
+ if (end_offset == SKB_SMALL_HEAD_HEADROOM)
+ kmem_cache_free(skb_small_head_cache, head);
+ else
+#endif
+ kfree(head);
+}
+
static void skb_free_head(struct sk_buff *skb)
{
unsigned char *head = skb->head;
@@ -819,7 +869,7 @@ static void skb_free_head(struct sk_buff *skb)
return;
skb_free_frag(head);
} else {
- kfree(head);
+ skb_kfree_head(head, skb_end_offset(skb));
}
}
@@ -871,7 +921,7 @@ static void kfree_skbmem(struct sk_buff *skb)
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
- kmem_cache_free(skbuff_head_cache, skb);
+ kmem_cache_free(skbuff_cache, skb);
return;
case SKB_FCLONE_ORIG:
@@ -932,6 +982,21 @@ void __kfree_skb(struct sk_buff *skb)
}
EXPORT_SYMBOL(__kfree_skb);
+static __always_inline
+bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
+{
+ if (unlikely(!skb_unref(skb)))
+ return false;
+
+ DEBUG_NET_WARN_ON_ONCE(reason <= 0 || reason >= SKB_DROP_REASON_MAX);
+
+ if (reason == SKB_CONSUMED)
+ trace_consume_skb(skb, __builtin_return_address(0));
+ else
+ trace_kfree_skb(skb, __builtin_return_address(0), reason);
+ return true;
+}
+
/**
* kfree_skb_reason - free an sk_buff with special reason
* @skb: buffer to free
@@ -944,28 +1009,58 @@ EXPORT_SYMBOL(__kfree_skb);
void __fix_address
kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
{
- if (unlikely(!skb_unref(skb)))
+ if (__kfree_skb_reason(skb, reason))
+ __kfree_skb(skb);
+}
+EXPORT_SYMBOL(kfree_skb_reason);
+
+#define KFREE_SKB_BULK_SIZE 16
+
+struct skb_free_array {
+ unsigned int skb_count;
+ void *skb_array[KFREE_SKB_BULK_SIZE];
+};
+
+static void kfree_skb_add_bulk(struct sk_buff *skb,
+ struct skb_free_array *sa,
+ enum skb_drop_reason reason)
+{
+ /* if SKB is a clone, don't handle this case */
+ if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
+ __kfree_skb(skb);
return;
+ }
- DEBUG_NET_WARN_ON_ONCE(reason <= 0 || reason >= SKB_DROP_REASON_MAX);
+ skb_release_all(skb, reason);
+ sa->skb_array[sa->skb_count++] = skb;
- if (reason == SKB_CONSUMED)
- trace_consume_skb(skb);
- else
- trace_kfree_skb(skb, __builtin_return_address(0), reason);
- __kfree_skb(skb);
+ if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
+ kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
+ sa->skb_array);
+ sa->skb_count = 0;
+ }
}
-EXPORT_SYMBOL(kfree_skb_reason);
-void kfree_skb_list_reason(struct sk_buff *segs,
- enum skb_drop_reason reason)
+void __fix_address
+kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
{
+ struct skb_free_array sa;
+
+ sa.skb_count = 0;
+
while (segs) {
struct sk_buff *next = segs->next;
- kfree_skb_reason(segs, reason);
+ if (__kfree_skb_reason(segs, reason)) {
+ skb_poison_list(segs);
+ kfree_skb_add_bulk(segs, &sa, reason);
+ }
+
segs = next;
}
+
+ if (sa.skb_count)
+ kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
}
EXPORT_SYMBOL(kfree_skb_list_reason);
@@ -1094,7 +1189,7 @@ void consume_skb(struct sk_buff *skb)
if (!skb_unref(skb))
return;
- trace_consume_skb(skb);
+ trace_consume_skb(skb, __builtin_return_address(0));
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
@@ -1109,7 +1204,7 @@ EXPORT_SYMBOL(consume_skb);
*/
void __consume_stateless_skb(struct sk_buff *skb)
{
- trace_consume_skb(skb);
+ trace_consume_skb(skb, __builtin_return_address(0));
skb_release_data(skb, SKB_CONSUMED);
kfree_skbmem(skb);
}
@@ -1119,15 +1214,15 @@ static void napi_skb_cache_put(struct sk_buff *skb)
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
u32 i;
- kasan_poison_object_data(skbuff_head_cache, skb);
+ kasan_poison_object_data(skbuff_cache, skb);
nc->skb_cache[nc->skb_count++] = skb;
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
- kasan_unpoison_object_data(skbuff_head_cache,
+ kasan_unpoison_object_data(skbuff_cache,
nc->skb_cache[i]);
- kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF,
+ kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);
nc->skb_count = NAPI_SKB_CACHE_HALF;
}
@@ -1165,7 +1260,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
/* if reaching here SKB is ready to free */
- trace_consume_skb(skb);
+ trace_consume_skb(skb, __builtin_return_address(0));
/* if SKB is a clone, don't handle this case */
if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
@@ -1311,14 +1406,18 @@ EXPORT_SYMBOL_GPL(skb_morph);
int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
{
- unsigned long max_pg, num_pg, new_pg, old_pg;
+ unsigned long max_pg, num_pg, new_pg, old_pg, rlim;
struct user_struct *user;
if (capable(CAP_IPC_LOCK) || !size)
return 0;
+ rlim = rlimit(RLIMIT_MEMLOCK);
+ if (rlim == RLIM_INFINITY)
+ return 0;
+
num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
- max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ max_pg = rlim >> PAGE_SHIFT;
user = mmp->user ? : current_user();
old_pg = atomic_long_read(&user->locked_vm);
@@ -1711,7 +1810,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
+ n = kmem_cache_alloc(skbuff_cache, gfp_mask);
if (!n)
return NULL;
@@ -1893,10 +1992,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- size = SKB_DATA_ALIGN(size);
- size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- size = kmalloc_size_roundup(size);
- data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
+ data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
size = SKB_WITH_OVERHEAD(size);
@@ -1959,7 +2055,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
return 0;
nofrags:
- kfree(data);
+ skb_kfree_head(data, size);
nodata:
return -ENOMEM;
}
@@ -4584,7 +4680,7 @@ static void skb_extensions_init(void) {}
void __init skb_init(void)
{
- skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
+ skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
@@ -4596,6 +4692,19 @@ void __init skb_init(void)
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
+#ifdef HAVE_SKB_SMALL_HEAD_CACHE
+ /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes.
+ * struct skb_shared_info is located at the end of skb->head,
+ * and should not be copied to/from user.
+ */
+ skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
+ SKB_SMALL_HEAD_CACHE_SIZE,
+ 0,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC,
+ 0,
+ SKB_SMALL_HEAD_HEADROOM,
+ NULL);
+#endif
skb_extensions_init();
}
@@ -5450,7 +5559,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
if (head_stolen) {
skb_release_head_state(skb);
- kmem_cache_free(skbuff_head_cache, skb);
+ kmem_cache_free(skbuff_cache, skb);
} else {
__kfree_skb(skb);
}
@@ -6244,10 +6353,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- size = SKB_DATA_ALIGN(size);
- size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- size = kmalloc_size_roundup(size);
- data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
+ data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
size = SKB_WITH_OVERHEAD(size);
@@ -6263,7 +6369,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
if (skb_cloned(skb)) {
/* drop the old head gracefully */
if (skb_orphan_frags(skb, gfp_mask)) {
- kfree(data);
+ skb_kfree_head(data, size);
return -ENOMEM;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -6363,10 +6469,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- size = SKB_DATA_ALIGN(size);
- size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- size = kmalloc_size_roundup(size);
- data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
+ data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
size = SKB_WITH_OVERHEAD(size);
@@ -6374,7 +6477,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
memcpy((struct skb_shared_info *)(data + size),
skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
if (skb_orphan_frags(skb, gfp_mask)) {
- kfree(data);
+ skb_kfree_head(data, size);
return -ENOMEM;
}
shinfo = (struct skb_shared_info *)(data + size);
@@ -6410,7 +6513,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
if (skb_has_frag_list(skb))
kfree_skb_list(skb_shinfo(skb)->frag_list);
- kfree(data);
+ skb_kfree_head(data, size);
return -ENOMEM;
}
skb_release_data(skb, SKB_CONSUMED);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 53d0251788aa..f81883759d38 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -8,6 +8,7 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <net/tls.h>
+#include <trace/events/sock.h>
static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
{
@@ -1114,6 +1115,8 @@ static void sk_psock_strp_data_ready(struct sock *sk)
{
struct sk_psock *psock;
+ trace_sk_data_ready(sk);
+
rcu_read_lock();
psock = sk_psock(sk);
if (likely(psock)) {
@@ -1210,6 +1213,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
+ trace_sk_data_ready(sk);
+
if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
return;
sock->ops->read_skb(sk, sk_psock_verdict_recv);
diff --git a/net/core/sock.c b/net/core/sock.c
index 6f27c24016fe..341c565dbc26 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2340,17 +2340,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
smp_wmb();
refcount_set(&newsk->sk_refcnt, 2);
- /* Increment the counter in the same struct proto as the master
- * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
- * is the same as sk->sk_prot->socks, as this field was copied
- * with memcpy).
- *
- * This _changes_ the previous behaviour, where
- * tcp_create_openreq_child always was incrementing the
- * equivalent to tcp_prot->socks (inet_sock_nr), so this have
- * to be taken into account in all callers. -acme
- */
- sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
sk_tx_queue_clear(newsk);
RCU_INIT_POINTER(newsk->sk_wq, NULL);
@@ -2375,17 +2364,22 @@ void sk_free_unlock_clone(struct sock *sk)
}
EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
-static void sk_trim_gso_size(struct sock *sk)
+static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
{
- if (sk->sk_gso_max_size <= GSO_LEGACY_MAX_SIZE)
- return;
+ bool is_ipv6 = false;
+ u32 max_size;
+
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6 &&
- sk_is_tcp(sk) &&
- !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
- return;
+ is_ipv6 = (sk->sk_family == AF_INET6 &&
+ !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
#endif
- sk->sk_gso_max_size = GSO_LEGACY_MAX_SIZE;
+ /* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
+ max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) :
+ READ_ONCE(dst->dev->gso_ipv4_max_size);
+ if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
+ max_size = GSO_LEGACY_MAX_SIZE;
+
+ return max_size - (MAX_TCP_HEADER + 1);
}
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
@@ -2405,10 +2399,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
- /* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */
- sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size);
- sk_trim_gso_size(sk);
- sk->sk_gso_max_size -= (MAX_TCP_HEADER + 1);
+ sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
}
@@ -3293,6 +3284,8 @@ void sock_def_readable(struct sock *sk)
{
struct socket_wq *wq;
+ trace_sk_data_ready(sk);
+
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
@@ -3381,7 +3374,7 @@ void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
}
EXPORT_SYMBOL(sk_stop_timer_sync);
-void sock_init_data(struct socket *sock, struct sock *sk)
+void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
{
sk_init_common(sk);
sk->sk_send_head = NULL;
@@ -3401,11 +3394,10 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_type = sock->type;
RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
sock->sk = sk;
- sk->sk_uid = SOCK_INODE(sock)->i_uid;
} else {
RCU_INIT_POINTER(sk->sk_wq, NULL);
- sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
}
+ sk->sk_uid = uid;
rwlock_init(&sk->sk_callback_lock);
if (sk->sk_kern_sock)
@@ -3463,6 +3455,16 @@ void sock_init_data(struct socket *sock, struct sock *sk)
refcount_set(&sk->sk_refcnt, 1);
atomic_set(&sk->sk_drops, 0);
}
+EXPORT_SYMBOL(sock_init_data_uid);
+
+void sock_init_data(struct socket *sock, struct sock *sk)
+{
+ kuid_t uid = sock ?
+ SOCK_INODE(sock)->i_uid :
+ make_kuid(sock_net(sk)->user_ns, 0);
+
+ sock_init_data_uid(sock, sk, uid);
+}
EXPORT_SYMBOL(sock_init_data);
void lock_sock_nested(struct sock *sk, int subclass)
@@ -3697,8 +3699,6 @@ void sk_common_release(struct sock *sk)
xfrm_sk_free_policy(sk);
- sk_refcnt_debug_release(sk);
-
sock_put(sk);
}
EXPORT_SYMBOL(sk_common_release);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 5b1ce656baa1..74842b453407 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -16,6 +16,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/sched/isolation.h>
#include <net/ip.h>
#include <net/sock.h>
@@ -45,7 +46,82 @@ EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
int sysctl_devconf_inherit_init_net __read_mostly;
EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
+#if IS_ENABLED(CONFIG_NET_FLOW_LIMIT) || IS_ENABLED(CONFIG_RPS)
+static void dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos,
+ struct cpumask *mask)
+{
+ char kbuf[128];
+ int len;
+
+ if (*ppos || !*lenp) {
+ *lenp = 0;
+ return;
+ }
+
+ len = min(sizeof(kbuf) - 1, *lenp);
+ len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask));
+ if (!len) {
+ *lenp = 0;
+ return;
+ }
+
+ if (len < *lenp)
+ kbuf[len++] = '\n';
+ memcpy(buffer, kbuf, len);
+ *lenp = len;
+ *ppos += len;
+}
+#endif
+
#ifdef CONFIG_RPS
+
+static struct cpumask *rps_default_mask_cow_alloc(struct net *net)
+{
+ struct cpumask *rps_default_mask;
+
+ if (net->core.rps_default_mask)
+ return net->core.rps_default_mask;
+
+ rps_default_mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!rps_default_mask)
+ return NULL;
+
+ /* pairs with READ_ONCE in rx_queue_default_mask() */
+ WRITE_ONCE(net->core.rps_default_mask, rps_default_mask);
+ return rps_default_mask;
+}
+
+static int rps_default_mask_sysctl(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = (struct net *)table->data;
+ int err = 0;
+
+ rtnl_lock();
+ if (write) {
+ struct cpumask *rps_default_mask = rps_default_mask_cow_alloc(net);
+
+ err = -ENOMEM;
+ if (!rps_default_mask)
+ goto done;
+
+ err = cpumask_parse(buffer, rps_default_mask);
+ if (err)
+ goto done;
+
+ err = rps_cpumask_housekeeping(rps_default_mask);
+ if (err)
+ goto done;
+ } else {
+ dump_cpumask(buffer, lenp, ppos,
+ net->core.rps_default_mask ? : cpu_none_mask);
+ }
+
+done:
+ rtnl_unlock();
+ return err;
+}
+
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
@@ -155,13 +231,6 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
write_unlock:
mutex_unlock(&flow_limit_update_mutex);
} else {
- char kbuf[128];
-
- if (*ppos || !*lenp) {
- *lenp = 0;
- goto done;
- }
-
cpumask_clear(mask);
rcu_read_lock();
for_each_possible_cpu(i) {
@@ -171,17 +240,7 @@ write_unlock:
}
rcu_read_unlock();
- len = min(sizeof(kbuf) - 1, *lenp);
- len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask));
- if (!len) {
- *lenp = 0;
- goto done;
- }
- if (len < *lenp)
- kbuf[len++] = '\n';
- memcpy(buffer, kbuf, len);
- *lenp = len;
- *ppos += len;
+ dump_cpumask(buffer, lenp, ppos, mask);
}
done:
@@ -598,6 +657,14 @@ static struct ctl_table net_core_table[] = {
};
static struct ctl_table netns_core_table[] = {
+#if IS_ENABLED(CONFIG_RPS)
+ {
+ .procname = "rps_default_mask",
+ .data = &init_net,
+ .mode = 0644,
+ .proc_handler = rps_default_mask_sysctl
+ },
+#endif
{
.procname = "somaxconn",
.data = &init_net.core.sysctl_somaxconn,
@@ -643,11 +710,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
for (tmp = tbl; tmp->procname; tmp++)
tmp->data += (char *)net - (char *)&init_net;
-
- /* Don't export any sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns) {
- tbl[0].procname = NULL;
- }
}
net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
@@ -670,6 +732,9 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
tbl = net->core.sysctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->core.sysctl_hdr);
BUG_ON(tbl == netns_core_table);
+#if IS_ENABLED(CONFIG_RPS)
+ kfree(net->core.rps_default_mask);
+#endif
kfree(tbl);
}
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 844c9d99dc0e..8c92fc553317 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -4,6 +4,8 @@
* Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
*/
#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
#include <linux/filter.h>
#include <linux/types.h>
#include <linux/mm.h>
@@ -602,8 +604,7 @@ EXPORT_SYMBOL_GPL(xdp_warn);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
{
- n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
- n_skb, skbs);
+ n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
if (unlikely(!n_skb))
return -ENOMEM;
@@ -672,7 +673,7 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -709,3 +710,84 @@ struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
return nxdpf;
}
+
+__diag_push();
+__diag_ignore_all("-Wmissing-prototypes",
+ "Global functions as their definitions will be in vmlinux BTF");
+
+/**
+ * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp.
+ * @ctx: XDP context pointer.
+ * @timestamp: Return value pointer.
+ *
+ * Returns 0 on success or ``-errno`` on error.
+ */
+__bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash.
+ * @ctx: XDP context pointer.
+ * @hash: Return value pointer.
+ *
+ * Returns 0 on success or ``-errno`` on error.
+ */
+__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+ return -EOPNOTSUPP;
+}
+
+__diag_pop();
+
+BTF_SET8_START(xdp_metadata_kfunc_ids)
+#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, 0)
+XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+BTF_SET8_END(xdp_metadata_kfunc_ids)
+
+static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &xdp_metadata_kfunc_ids,
+};
+
+BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)
+#define XDP_METADATA_KFUNC(name, str) BTF_ID(func, str)
+XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+
+u32 bpf_xdp_metadata_kfunc_id(int id)
+{
+ /* xdp_metadata_kfunc_ids is sorted and can't be used */
+ return xdp_metadata_kfunc_ids_unsorted[id];
+}
+
+bool bpf_dev_bound_kfunc_id(u32 btf_id)
+{
+ return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id);
+}
+
+static int __init xdp_metadata_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set);
+}
+late_initcall(xdp_metadata_init);
+
+void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
+{
+ dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+ if (support_sg)
+ dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT_SG;
+
+ call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+}
+EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target);
+
+void xdp_features_clear_redirect_target(struct net_device *dev)
+{
+ dev->xdp_features &= ~(NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG);
+ call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+}
+EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index f9949e051f49..c0c438128575 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -178,6 +178,7 @@ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
};
static LIST_HEAD(dcb_app_list);
+static LIST_HEAD(dcb_rewr_list);
static DEFINE_SPINLOCK(dcb_lock);
static enum ieee_attrs_app dcbnl_app_attr_type_get(u8 selector)
@@ -1099,11 +1100,46 @@ out:
return err;
}
+/* Set or delete APP table or rewrite table entries. The APP struct is validated
+ * and the appropriate callback function is called.
+ */
+static int dcbnl_app_table_setdel(struct nlattr *attr,
+ struct net_device *netdev,
+ int (*setdel)(struct net_device *dev,
+ struct dcb_app *app))
+{
+ struct dcb_app *app_data;
+ enum ieee_attrs_app type;
+ struct nlattr *attr_itr;
+ int rem, err;
+
+ nla_for_each_nested(attr_itr, attr, rem) {
+ type = nla_type(attr_itr);
+
+ if (!dcbnl_app_attr_type_validate(type))
+ continue;
+
+ if (nla_len(attr_itr) < sizeof(struct dcb_app))
+ return -ERANGE;
+
+ app_data = nla_data(attr_itr);
+
+ if (!dcbnl_app_selector_validate(type, app_data->selector))
+ return -EINVAL;
+
+ err = setdel(netdev, app_data);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
- struct nlattr *ieee, *app;
+ struct nlattr *ieee, *app, *rewr;
struct dcb_app_type *itr;
int dcbx;
int err;
@@ -1206,6 +1242,27 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
spin_unlock_bh(&dcb_lock);
nla_nest_end(skb, app);
+ rewr = nla_nest_start(skb, DCB_ATTR_DCB_REWR_TABLE);
+ if (!rewr)
+ return -EMSGSIZE;
+
+ spin_lock_bh(&dcb_lock);
+ list_for_each_entry(itr, &dcb_rewr_list, list) {
+ if (itr->ifindex == netdev->ifindex) {
+ enum ieee_attrs_app type =
+ dcbnl_app_attr_type_get(itr->app.selector);
+ err = nla_put(skb, type, sizeof(itr->app), &itr->app);
+ if (err) {
+ spin_unlock_bh(&dcb_lock);
+ nla_nest_cancel(skb, rewr);
+ return -EMSGSIZE;
+ }
+ }
+ }
+
+ spin_unlock_bh(&dcb_lock);
+ nla_nest_end(skb, rewr);
+
if (ops->dcbnl_getapptrust) {
err = dcbnl_getapptrust(netdev, skb);
if (err)
@@ -1567,37 +1624,20 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
goto err;
}
- if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
- struct nlattr *attr;
- int rem;
-
- nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
- enum ieee_attrs_app type = nla_type(attr);
- struct dcb_app *app_data;
-
- if (!dcbnl_app_attr_type_validate(type))
- continue;
-
- if (nla_len(attr) < sizeof(struct dcb_app)) {
- err = -ERANGE;
- goto err;
- }
-
- app_data = nla_data(attr);
-
- if (!dcbnl_app_selector_validate(type,
- app_data->selector)) {
- err = -EINVAL;
- goto err;
- }
+ if (ieee[DCB_ATTR_DCB_REWR_TABLE]) {
+ err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE],
+ netdev,
+ ops->dcbnl_setrewr ?: dcb_setrewr);
+ if (err)
+ goto err;
+ }
- if (ops->ieee_setapp)
- err = ops->ieee_setapp(netdev, app_data);
- else
- err = dcb_ieee_setapp(netdev, app_data);
- if (err)
- goto err;
- }
+ if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
+ err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE],
+ netdev, ops->ieee_setapp ?:
+ dcb_ieee_setapp);
+ if (err)
+ goto err;
}
if (ieee[DCB_ATTR_DCB_APP_TRUST_TABLE]) {
@@ -1684,31 +1724,19 @@ static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
return err;
if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
- struct nlattr *attr;
- int rem;
-
- nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
- enum ieee_attrs_app type = nla_type(attr);
- struct dcb_app *app_data;
-
- if (!dcbnl_app_attr_type_validate(type))
- continue;
-
- app_data = nla_data(attr);
-
- if (!dcbnl_app_selector_validate(type,
- app_data->selector)) {
- err = -EINVAL;
- goto err;
- }
+ err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE],
+ netdev, ops->ieee_delapp ?:
+ dcb_ieee_delapp);
+ if (err)
+ goto err;
+ }
- if (ops->ieee_delapp)
- err = ops->ieee_delapp(netdev, app_data);
- else
- err = dcb_ieee_delapp(netdev, app_data);
- if (err)
- goto err;
- }
+ if (ieee[DCB_ATTR_DCB_REWR_TABLE]) {
+ err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE],
+ netdev,
+ ops->dcbnl_delrewr ?: dcb_delrewr);
+ if (err)
+ goto err;
}
err:
@@ -1939,6 +1967,22 @@ out:
return ret;
}
+static struct dcb_app_type *dcb_rewr_lookup(const struct dcb_app *app,
+ int ifindex, int proto)
+{
+ struct dcb_app_type *itr;
+
+ list_for_each_entry(itr, &dcb_rewr_list, list) {
+ if (itr->app.selector == app->selector &&
+ itr->app.priority == app->priority &&
+ itr->ifindex == ifindex &&
+ ((proto == -1) || itr->app.protocol == proto))
+ return itr;
+ }
+
+ return NULL;
+}
+
static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
int ifindex, int prio)
{
@@ -1955,7 +1999,8 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
return NULL;
}
-static int dcb_app_add(const struct dcb_app *app, int ifindex)
+static int dcb_app_add(struct list_head *list, const struct dcb_app *app,
+ int ifindex)
{
struct dcb_app_type *entry;
@@ -1965,7 +2010,7 @@ static int dcb_app_add(const struct dcb_app *app, int ifindex)
memcpy(&entry->app, app, sizeof(*app));
entry->ifindex = ifindex;
- list_add(&entry->list, &dcb_app_list);
+ list_add(&entry->list, list);
return 0;
}
@@ -2028,7 +2073,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
}
/* App type does not exist add new application type */
if (new->priority)
- err = dcb_app_add(new, dev->ifindex);
+ err = dcb_app_add(&dcb_app_list, new, dev->ifindex);
out:
spin_unlock_bh(&dcb_lock);
if (!err)
@@ -2061,6 +2106,63 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
}
EXPORT_SYMBOL(dcb_ieee_getapp_mask);
+/* Get protocol value from rewrite entry. */
+u16 dcb_getrewr(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app_type *itr;
+ u16 proto = 0;
+
+ spin_lock_bh(&dcb_lock);
+ itr = dcb_rewr_lookup(app, dev->ifindex, -1);
+ if (itr)
+ proto = itr->app.protocol;
+ spin_unlock_bh(&dcb_lock);
+
+ return proto;
+}
+EXPORT_SYMBOL(dcb_getrewr);
+
+ /* Add rewrite entry to the rewrite list. */
+int dcb_setrewr(struct net_device *dev, struct dcb_app *new)
+{
+ int err;
+
+ spin_lock_bh(&dcb_lock);
+ /* Search for existing match and abort if found. */
+ if (dcb_rewr_lookup(new, dev->ifindex, new->protocol)) {
+ err = -EEXIST;
+ goto out;
+ }
+
+ err = dcb_app_add(&dcb_rewr_list, new, dev->ifindex);
+out:
+ spin_unlock_bh(&dcb_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(dcb_setrewr);
+
+/* Delete rewrite entry from the rewrite list. */
+int dcb_delrewr(struct net_device *dev, struct dcb_app *del)
+{
+ struct dcb_app_type *itr;
+ int err = -ENOENT;
+
+ spin_lock_bh(&dcb_lock);
+ /* Search for existing match and remove it. */
+ itr = dcb_rewr_lookup(del, dev->ifindex, del->protocol);
+ if (itr) {
+ list_del(&itr->list);
+ kfree(itr);
+ err = 0;
+ }
+
+ spin_unlock_bh(&dcb_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(dcb_delrewr);
+
/**
* dcb_ieee_setapp - add IEEE dcb application data to app list
* @dev: network interface
@@ -2088,7 +2190,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
goto out;
}
- err = dcb_app_add(new, dev->ifindex);
+ err = dcb_app_add(&dcb_app_list, new, dev->ifindex);
out:
spin_unlock_bh(&dcb_lock);
if (!err)
@@ -2130,6 +2232,58 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
}
EXPORT_SYMBOL(dcb_ieee_delapp);
+/* dcb_getrewr_prio_pcp_mask_map - For a given device, find mapping from
+ * priorities to the PCP and DEI values assigned to that priority.
+ */
+void dcb_getrewr_prio_pcp_mask_map(const struct net_device *dev,
+ struct dcb_rewr_prio_pcp_map *p_map)
+{
+ int ifindex = dev->ifindex;
+ struct dcb_app_type *itr;
+ u8 prio;
+
+ memset(p_map->map, 0, sizeof(p_map->map));
+
+ spin_lock_bh(&dcb_lock);
+ list_for_each_entry(itr, &dcb_rewr_list, list) {
+ if (itr->ifindex == ifindex &&
+ itr->app.selector == DCB_APP_SEL_PCP &&
+ itr->app.protocol < 16 &&
+ itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
+ prio = itr->app.priority;
+ p_map->map[prio] |= 1 << itr->app.protocol;
+ }
+ }
+ spin_unlock_bh(&dcb_lock);
+}
+EXPORT_SYMBOL(dcb_getrewr_prio_pcp_mask_map);
+
+/* dcb_getrewr_prio_dscp_mask_map - For a given device, find mapping from
+ * priorities to the DSCP values assigned to that priority.
+ */
+void dcb_getrewr_prio_dscp_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_prio_map *p_map)
+{
+ int ifindex = dev->ifindex;
+ struct dcb_app_type *itr;
+ u8 prio;
+
+ memset(p_map->map, 0, sizeof(p_map->map));
+
+ spin_lock_bh(&dcb_lock);
+ list_for_each_entry(itr, &dcb_rewr_list, list) {
+ if (itr->ifindex == ifindex &&
+ itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
+ itr->app.protocol < 64 &&
+ itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
+ prio = itr->app.priority;
+ p_map->map[prio] |= 1ULL << itr->app.protocol;
+ }
+ }
+ spin_unlock_bh(&dcb_lock);
+}
+EXPORT_SYMBOL(dcb_getrewr_prio_dscp_mask_map);
+
/*
* dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
* priorities to the DSCP values assigned to that priority. Initialize p_map
diff --git a/net/devlink/Makefile b/net/devlink/Makefile
new file mode 100644
index 000000000000..ef91a76646a3
--- /dev/null
+++ b/net/devlink/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y := leftover.o core.o netlink.o dev.o health.o
diff --git a/net/devlink/core.c b/net/devlink/core.c
new file mode 100644
index 000000000000..777b091ef74d
--- /dev/null
+++ b/net/devlink/core.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include <net/genetlink.h>
+
+#include "devl_internal.h"
+
+DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
+
+void *devlink_priv(struct devlink *devlink)
+{
+ return &devlink->priv;
+}
+EXPORT_SYMBOL_GPL(devlink_priv);
+
+struct devlink *priv_to_devlink(void *priv)
+{
+ return container_of(priv, struct devlink, priv);
+}
+EXPORT_SYMBOL_GPL(priv_to_devlink);
+
+struct device *devlink_to_dev(const struct devlink *devlink)
+{
+ return devlink->dev;
+}
+EXPORT_SYMBOL_GPL(devlink_to_dev);
+
+struct net *devlink_net(const struct devlink *devlink)
+{
+ return read_pnet(&devlink->_net);
+}
+EXPORT_SYMBOL_GPL(devlink_net);
+
+void devl_assert_locked(struct devlink *devlink)
+{
+ lockdep_assert_held(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_assert_locked);
+
+#ifdef CONFIG_LOCKDEP
+/* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
+bool devl_lock_is_held(struct devlink *devlink)
+{
+ return lockdep_is_held(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_lock_is_held);
+#endif
+
+void devl_lock(struct devlink *devlink)
+{
+ mutex_lock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_lock);
+
+int devl_trylock(struct devlink *devlink)
+{
+ return mutex_trylock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_trylock);
+
+void devl_unlock(struct devlink *devlink)
+{
+ mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_unlock);
+
+/**
+ * devlink_try_get() - try to obtain a reference on a devlink instance
+ * @devlink: instance to reference
+ *
+ * Obtain a reference on a devlink instance. A reference on a devlink instance
+ * only implies that it's safe to take the instance lock. It does not imply
+ * that the instance is registered, use devl_is_registered() after taking
+ * the instance lock to check registration status.
+ */
+struct devlink *__must_check devlink_try_get(struct devlink *devlink)
+{
+ if (refcount_inc_not_zero(&devlink->refcount))
+ return devlink;
+ return NULL;
+}
+
+static void devlink_release(struct work_struct *work)
+{
+ struct devlink *devlink;
+
+ devlink = container_of(to_rcu_work(work), struct devlink, rwork);
+
+ mutex_destroy(&devlink->lock);
+ lockdep_unregister_key(&devlink->lock_key);
+ kfree(devlink);
+}
+
+void devlink_put(struct devlink *devlink)
+{
+ if (refcount_dec_and_test(&devlink->refcount))
+ queue_rcu_work(system_wq, &devlink->rwork);
+}
+
+struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp)
+{
+ struct devlink *devlink = NULL;
+
+ rcu_read_lock();
+retry:
+ devlink = xa_find(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
+ if (!devlink)
+ goto unlock;
+
+ if (!devlink_try_get(devlink))
+ goto next;
+ if (!net_eq(devlink_net(devlink), net)) {
+ devlink_put(devlink);
+ goto next;
+ }
+unlock:
+ rcu_read_unlock();
+ return devlink;
+
+next:
+ (*indexp)++;
+ goto retry;
+}
+
+/**
+ * devl_register - Register devlink instance
+ * @devlink: devlink
+ */
+int devl_register(struct devlink *devlink)
+{
+ ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+ devl_assert_locked(devlink);
+
+ xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
+ devlink_notify_register(devlink);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_register);
+
+void devlink_register(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ devl_register(devlink);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_register);
+
+/**
+ * devl_unregister - Unregister devlink instance
+ * @devlink: devlink
+ */
+void devl_unregister(struct devlink *devlink)
+{
+ ASSERT_DEVLINK_REGISTERED(devlink);
+ devl_assert_locked(devlink);
+
+ devlink_notify_unregister(devlink);
+ xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
+}
+EXPORT_SYMBOL_GPL(devl_unregister);
+
+void devlink_unregister(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_unregister);
+
+/**
+ * devlink_alloc_ns - Allocate new devlink instance resources
+ * in specific namespace
+ *
+ * @ops: ops
+ * @priv_size: size of user private data
+ * @net: net namespace
+ * @dev: parent device
+ *
+ * Allocate new devlink instance resources, including devlink index
+ * and name.
+ */
+struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
+ size_t priv_size, struct net *net,
+ struct device *dev)
+{
+ struct devlink *devlink;
+ static u32 last_id;
+ int ret;
+
+ WARN_ON(!ops || !dev);
+ if (!devlink_reload_actions_valid(ops))
+ return NULL;
+
+ devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
+ if (!devlink)
+ return NULL;
+
+ ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
+ &last_id, GFP_KERNEL);
+ if (ret < 0)
+ goto err_xa_alloc;
+
+ devlink->netdevice_nb.notifier_call = devlink_port_netdevice_event;
+ ret = register_netdevice_notifier(&devlink->netdevice_nb);
+ if (ret)
+ goto err_register_netdevice_notifier;
+
+ devlink->dev = dev;
+ devlink->ops = ops;
+ xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
+ xa_init_flags(&devlink->params, XA_FLAGS_ALLOC);
+ xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
+ write_pnet(&devlink->_net, net);
+ INIT_LIST_HEAD(&devlink->rate_list);
+ INIT_LIST_HEAD(&devlink->linecard_list);
+ INIT_LIST_HEAD(&devlink->sb_list);
+ INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
+ INIT_LIST_HEAD(&devlink->resource_list);
+ INIT_LIST_HEAD(&devlink->region_list);
+ INIT_LIST_HEAD(&devlink->reporter_list);
+ INIT_LIST_HEAD(&devlink->trap_list);
+ INIT_LIST_HEAD(&devlink->trap_group_list);
+ INIT_LIST_HEAD(&devlink->trap_policer_list);
+ INIT_RCU_WORK(&devlink->rwork, devlink_release);
+ lockdep_register_key(&devlink->lock_key);
+ mutex_init(&devlink->lock);
+ lockdep_set_class(&devlink->lock, &devlink->lock_key);
+ refcount_set(&devlink->refcount, 1);
+
+ return devlink;
+
+err_register_netdevice_notifier:
+ xa_erase(&devlinks, devlink->index);
+err_xa_alloc:
+ kfree(devlink);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(devlink_alloc_ns);
+
+/**
+ * devlink_free - Free devlink instance resources
+ *
+ * @devlink: devlink
+ */
+void devlink_free(struct devlink *devlink)
+{
+ ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+
+ WARN_ON(!list_empty(&devlink->trap_policer_list));
+ WARN_ON(!list_empty(&devlink->trap_group_list));
+ WARN_ON(!list_empty(&devlink->trap_list));
+ WARN_ON(!list_empty(&devlink->reporter_list));
+ WARN_ON(!list_empty(&devlink->region_list));
+ WARN_ON(!list_empty(&devlink->resource_list));
+ WARN_ON(!list_empty(&devlink->dpipe_table_list));
+ WARN_ON(!list_empty(&devlink->sb_list));
+ WARN_ON(!list_empty(&devlink->rate_list));
+ WARN_ON(!list_empty(&devlink->linecard_list));
+ WARN_ON(!xa_empty(&devlink->ports));
+
+ xa_destroy(&devlink->snapshot_ids);
+ xa_destroy(&devlink->params);
+ xa_destroy(&devlink->ports);
+
+ WARN_ON_ONCE(unregister_netdevice_notifier(&devlink->netdevice_nb));
+
+ xa_erase(&devlinks, devlink->index);
+
+ devlink_put(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_free);
+
+static void __net_exit devlink_pernet_pre_exit(struct net *net)
+{
+ struct devlink *devlink;
+ u32 actions_performed;
+ unsigned long index;
+ int err;
+
+ /* In case network namespace is getting destroyed, reload
+ * all devlink instances from this namespace into init_net.
+ */
+ devlinks_xa_for_each_registered_get(net, index, devlink) {
+ devl_lock(devlink);
+ err = 0;
+ if (devl_is_registered(devlink))
+ err = devlink_reload(devlink, &init_net,
+ DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
+ DEVLINK_RELOAD_LIMIT_UNSPEC,
+ &actions_performed, NULL);
+ devl_unlock(devlink);
+ devlink_put(devlink);
+ if (err && err != -EOPNOTSUPP)
+ pr_warn("Failed to reload devlink instance into init_net\n");
+ }
+}
+
+static struct pernet_operations devlink_pernet_ops __net_initdata = {
+ .pre_exit = devlink_pernet_pre_exit,
+};
+
+static int __init devlink_init(void)
+{
+ int err;
+
+ err = genl_register_family(&devlink_nl_family);
+ if (err)
+ goto out;
+ err = register_pernet_subsys(&devlink_pernet_ops);
+
+out:
+ WARN_ON(err);
+ return err;
+}
+
+subsys_initcall(devlink_init);
diff --git a/net/devlink/dev.c b/net/devlink/dev.c
new file mode 100644
index 000000000000..bf1d6f1bcfc7
--- /dev/null
+++ b/net/devlink/dev.c
@@ -0,0 +1,1346 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include <net/genetlink.h>
+#include <net/sock.h>
+#include "devl_internal.h"
+
+struct devlink_info_req {
+ struct sk_buff *msg;
+ void (*version_cb)(const char *version_name,
+ enum devlink_info_version_type version_type,
+ void *version_cb_priv);
+ void *version_cb_priv;
+};
+
+struct devlink_reload_combination {
+ enum devlink_reload_action action;
+ enum devlink_reload_limit limit;
+};
+
+static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = {
+ {
+ /* can't reinitialize driver with no down time */
+ .action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
+ .limit = DEVLINK_RELOAD_LIMIT_NO_RESET,
+ },
+};
+
+static bool
+devlink_reload_combination_is_invalid(enum devlink_reload_action action,
+ enum devlink_reload_limit limit)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++)
+ if (devlink_reload_invalid_combinations[i].action == action &&
+ devlink_reload_invalid_combinations[i].limit == limit)
+ return true;
+ return false;
+}
+
+static bool
+devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action)
+{
+ return test_bit(action, &devlink->ops->reload_actions);
+}
+
+static bool
+devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit)
+{
+ return test_bit(limit, &devlink->ops->reload_limits);
+}
+
+static int devlink_reload_stat_put(struct sk_buff *msg,
+ enum devlink_reload_limit limit, u32 value)
+{
+ struct nlattr *reload_stats_entry;
+
+ reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY);
+ if (!reload_stats_entry)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) ||
+ nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value))
+ goto nla_put_failure;
+ nla_nest_end(msg, reload_stats_entry);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, reload_stats_entry);
+ return -EMSGSIZE;
+}
+
+static int
+devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote)
+{
+ struct nlattr *reload_stats_attr, *act_info, *act_stats;
+ int i, j, stat_idx;
+ u32 value;
+
+ if (!is_remote)
+ reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS);
+ else
+ reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS);
+
+ if (!reload_stats_attr)
+ return -EMSGSIZE;
+
+ for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) {
+ if ((!is_remote &&
+ !devlink_reload_action_is_supported(devlink, i)) ||
+ i == DEVLINK_RELOAD_ACTION_UNSPEC)
+ continue;
+ act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO);
+ if (!act_info)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i))
+ goto action_info_nest_cancel;
+ act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS);
+ if (!act_stats)
+ goto action_info_nest_cancel;
+
+ for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) {
+ /* Remote stats are shown even if not locally supported.
+ * Stats of actions with unspecified limit are shown
+ * though drivers don't need to register unspecified
+ * limit.
+ */
+ if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC &&
+ !devlink_reload_limit_is_supported(devlink, j)) ||
+ devlink_reload_combination_is_invalid(i, j))
+ continue;
+
+ stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i;
+ if (!is_remote)
+ value = devlink->stats.reload_stats[stat_idx];
+ else
+ value = devlink->stats.remote_reload_stats[stat_idx];
+ if (devlink_reload_stat_put(msg, j, value))
+ goto action_stats_nest_cancel;
+ }
+ nla_nest_end(msg, act_stats);
+ nla_nest_end(msg, act_info);
+ }
+ nla_nest_end(msg, reload_stats_attr);
+ return 0;
+
+action_stats_nest_cancel:
+ nla_nest_cancel(msg, act_stats);
+action_info_nest_cancel:
+ nla_nest_cancel(msg, act_info);
+nla_put_failure:
+ nla_nest_cancel(msg, reload_stats_attr);
+ return -EMSGSIZE;
+}
+
+static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags)
+{
+ struct nlattr *dev_stats;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed))
+ goto nla_put_failure;
+
+ dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS);
+ if (!dev_stats)
+ goto nla_put_failure;
+
+ if (devlink_reload_stats_put(msg, devlink, false))
+ goto dev_stats_nest_cancel;
+ if (devlink_reload_stats_put(msg, devlink, true))
+ goto dev_stats_nest_cancel;
+
+ nla_nest_end(msg, dev_stats);
+ genlmsg_end(msg, hdr);
+ return 0;
+
+dev_stats_nest_cancel:
+ nla_nest_cancel(msg, dev_stats);
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
+{
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL);
+ WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int
+devlink_nl_cmd_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI);
+}
+
+const struct devlink_cmd devl_cmd_get = {
+ .dump_one = devlink_nl_cmd_get_dump_one,
+};
+
+static void devlink_reload_failed_set(struct devlink *devlink,
+ bool reload_failed)
+{
+ if (devlink->reload_failed == reload_failed)
+ return;
+ devlink->reload_failed = reload_failed;
+ devlink_notify(devlink, DEVLINK_CMD_NEW);
+}
+
+bool devlink_is_reload_failed(const struct devlink *devlink)
+{
+ return devlink->reload_failed;
+}
+EXPORT_SYMBOL_GPL(devlink_is_reload_failed);
+
+static void
+__devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats,
+ enum devlink_reload_limit limit, u32 actions_performed)
+{
+ unsigned long actions = actions_performed;
+ int stat_idx;
+ int action;
+
+ for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) {
+ stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action;
+ reload_stats[stat_idx]++;
+ }
+ devlink_notify(devlink, DEVLINK_CMD_NEW);
+}
+
+static void
+devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit,
+ u32 actions_performed)
+{
+ __devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit,
+ actions_performed);
+}
+
+/**
+ * devlink_remote_reload_actions_performed - Update devlink on reload actions
+ * performed which are not a direct result of devlink reload call.
+ *
+ * This should be called by a driver after performing reload actions in case it was not
+ * a result of devlink reload call. For example fw_activate was performed as a result
+ * of devlink reload triggered fw_activate on another host.
+ * The motivation for this function is to keep data on reload actions performed on this
+ * function whether it was done due to direct devlink reload call or not.
+ *
+ * @devlink: devlink
+ * @limit: reload limit
+ * @actions_performed: bitmask of actions performed
+ */
+void devlink_remote_reload_actions_performed(struct devlink *devlink,
+ enum devlink_reload_limit limit,
+ u32 actions_performed)
+{
+ if (WARN_ON(!actions_performed ||
+ actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
+ actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) ||
+ limit > DEVLINK_RELOAD_LIMIT_MAX))
+ return;
+
+ __devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit,
+ actions_performed);
+}
+EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed);
+
+static struct net *devlink_netns_get(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID];
+ struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD];
+ struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID];
+ struct net *net;
+
+ if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) {
+ NL_SET_ERR_MSG(info->extack, "multiple netns identifying attributes specified");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (netns_pid_attr) {
+ net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr));
+ } else if (netns_fd_attr) {
+ net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr));
+ } else if (netns_id_attr) {
+ net = get_net_ns_by_id(sock_net(skb->sk),
+ nla_get_u32(netns_id_attr));
+ if (!net)
+ net = ERR_PTR(-EINVAL);
+ } else {
+ WARN_ON(1);
+ net = ERR_PTR(-EINVAL);
+ }
+ if (IS_ERR(net)) {
+ NL_SET_ERR_MSG(info->extack, "Unknown network namespace");
+ return ERR_PTR(-EINVAL);
+ }
+ if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+ put_net(net);
+ return ERR_PTR(-EPERM);
+ }
+ return net;
+}
+
+static void devlink_reload_netns_change(struct devlink *devlink,
+ struct net *curr_net,
+ struct net *dest_net)
+{
+ /* Userspace needs to be notified about devlink objects
+ * removed from original and entering new network namespace.
+ * The rest of the devlink objects are re-created during
+ * reload process so the notifications are generated separatelly.
+ */
+ devlink_notify_unregister(devlink);
+ write_pnet(&devlink->_net, dest_net);
+ devlink_notify_register(devlink);
+}
+
+int devlink_reload(struct devlink *devlink, struct net *dest_net,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed, struct netlink_ext_ack *extack)
+{
+ u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+ struct net *curr_net;
+ int err;
+
+ memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
+ sizeof(remote_reload_stats));
+
+ err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
+ if (err)
+ return err;
+
+ curr_net = devlink_net(devlink);
+ if (dest_net && !net_eq(dest_net, curr_net))
+ devlink_reload_netns_change(devlink, curr_net, dest_net);
+
+ if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ devlink_params_driverinit_load_new(devlink);
+
+ err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
+ devlink_reload_failed_set(devlink, !!err);
+ if (err)
+ return err;
+
+ WARN_ON(!(*actions_performed & BIT(action)));
+ /* Catch driver on updating the remote action within devlink reload */
+ WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
+ sizeof(remote_reload_stats)));
+ devlink_reload_stats_update(devlink, limit, *actions_performed);
+ return 0;
+}
+
+static int
+devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed,
+ enum devlink_command cmd, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd);
+ if (!hdr)
+ goto free_msg;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed,
+ actions_performed))
+ goto nla_put_failure;
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
+int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ enum devlink_reload_action action;
+ enum devlink_reload_limit limit;
+ struct net *dest_net = NULL;
+ u32 actions_performed;
+ int err;
+
+ err = devlink_resources_validate(devlink, NULL, info);
+ if (err) {
+ NL_SET_ERR_MSG(info->extack, "resources size validation failed");
+ return err;
+ }
+
+ if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
+ action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
+ else
+ action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT;
+
+ if (!devlink_reload_action_is_supported(devlink, action)) {
+ NL_SET_ERR_MSG(info->extack, "Requested reload action is not supported by the driver");
+ return -EOPNOTSUPP;
+ }
+
+ limit = DEVLINK_RELOAD_LIMIT_UNSPEC;
+ if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) {
+ struct nla_bitfield32 limits;
+ u32 limits_selected;
+
+ limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]);
+ limits_selected = limits.value & limits.selector;
+ if (!limits_selected) {
+ NL_SET_ERR_MSG(info->extack, "Invalid limit selected");
+ return -EINVAL;
+ }
+ for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++)
+ if (limits_selected & BIT(limit))
+ break;
+ /* UAPI enables multiselection, but currently it is not used */
+ if (limits_selected != BIT(limit)) {
+ NL_SET_ERR_MSG(info->extack, "Multiselection of limit is not supported");
+ return -EOPNOTSUPP;
+ }
+ if (!devlink_reload_limit_is_supported(devlink, limit)) {
+ NL_SET_ERR_MSG(info->extack, "Requested limit is not supported by the driver");
+ return -EOPNOTSUPP;
+ }
+ if (devlink_reload_combination_is_invalid(action, limit)) {
+ NL_SET_ERR_MSG(info->extack, "Requested limit is invalid for this action");
+ return -EINVAL;
+ }
+ }
+ if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+ info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+ info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+ dest_net = devlink_netns_get(skb, info);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
+ if (!net_eq(dest_net, devlink_net(devlink)) &&
+ action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "Changing namespace is only supported for reinit action");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
+
+ if (dest_net)
+ put_net(dest_net);
+
+ if (err)
+ return err;
+ /* For backward compatibility generate reply only if attributes used by user */
+ if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS])
+ return 0;
+
+ return devlink_nl_reload_actions_performed_snd(devlink, actions_performed,
+ DEVLINK_CMD_RELOAD, info);
+}
+
+bool devlink_reload_actions_valid(const struct devlink_ops *ops)
+{
+ const struct devlink_reload_combination *comb;
+ int i;
+
+ if (!devlink_reload_supported(ops)) {
+ if (WARN_ON(ops->reload_actions))
+ return false;
+ return true;
+ }
+
+ if (WARN_ON(!ops->reload_actions ||
+ ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
+ ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX)))
+ return false;
+
+ if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) ||
+ ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX)))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) {
+ comb = &devlink_reload_invalid_combinations[i];
+ if (ops->reload_actions == BIT(comb->action) &&
+ ops->reload_limits == BIT(comb->limit))
+ return false;
+ }
+ return true;
+}
+
+static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags)
+{
+ const struct devlink_ops *ops = devlink->ops;
+ enum devlink_eswitch_encap_mode encap_mode;
+ u8 inline_mode;
+ void *hdr;
+ int err = 0;
+ u16 mode;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = devlink_nl_put_handle(msg, devlink);
+ if (err)
+ goto nla_put_failure;
+
+ if (ops->eswitch_mode_get) {
+ err = ops->eswitch_mode_get(devlink, &mode);
+ if (err)
+ goto nla_put_failure;
+ err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ if (ops->eswitch_inline_mode_get) {
+ err = ops->eswitch_inline_mode_get(devlink, &inline_mode);
+ if (err)
+ goto nla_put_failure;
+ err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE,
+ inline_mode);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ if (ops->eswitch_encap_mode_get) {
+ err = ops->eswitch_encap_mode_get(devlink, &encap_mode);
+ if (err)
+ goto nla_put_failure;
+ err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET,
+ info->snd_portid, info->snd_seq, 0);
+
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ const struct devlink_ops *ops = devlink->ops;
+ enum devlink_eswitch_encap_mode encap_mode;
+ u8 inline_mode;
+ int err = 0;
+ u16 mode;
+
+ if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) {
+ if (!ops->eswitch_mode_set)
+ return -EOPNOTSUPP;
+ mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
+ err = devlink_rate_nodes_check(devlink, mode, info->extack);
+ if (err)
+ return err;
+ err = ops->eswitch_mode_set(devlink, mode, info->extack);
+ if (err)
+ return err;
+ }
+
+ if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) {
+ if (!ops->eswitch_inline_mode_set)
+ return -EOPNOTSUPP;
+ inline_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
+ err = ops->eswitch_inline_mode_set(devlink, inline_mode,
+ info->extack);
+ if (err)
+ return err;
+ }
+
+ if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) {
+ if (!ops->eswitch_encap_mode_set)
+ return -EOPNOTSUPP;
+ encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
+ err = ops->eswitch_encap_mode_set(devlink, encap_mode,
+ info->extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
+{
+ if (!req->msg)
+ return 0;
+ return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn);
+}
+EXPORT_SYMBOL_GPL(devlink_info_serial_number_put);
+
+int devlink_info_board_serial_number_put(struct devlink_info_req *req,
+ const char *bsn)
+{
+ if (!req->msg)
+ return 0;
+ return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER,
+ bsn);
+}
+EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put);
+
+static int devlink_info_version_put(struct devlink_info_req *req, int attr,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type)
+{
+ struct nlattr *nest;
+ int err;
+
+ if (req->version_cb)
+ req->version_cb(version_name, version_type,
+ req->version_cb_priv);
+
+ if (!req->msg)
+ return 0;
+
+ nest = nla_nest_start_noflag(req->msg, attr);
+ if (!nest)
+ return -EMSGSIZE;
+
+ err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME,
+ version_name);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE,
+ version_value);
+ if (err)
+ goto nla_put_failure;
+
+ nla_nest_end(req->msg, nest);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(req->msg, nest);
+ return err;
+}
+
+int devlink_info_version_fixed_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED,
+ version_name, version_value,
+ DEVLINK_INFO_VERSION_TYPE_NONE);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put);
+
+int devlink_info_version_stored_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
+ version_name, version_value,
+ DEVLINK_INFO_VERSION_TYPE_NONE);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_stored_put);
+
+int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
+ version_name, version_value,
+ version_type);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext);
+
+int devlink_info_version_running_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
+ version_name, version_value,
+ DEVLINK_INFO_VERSION_TYPE_NONE);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_running_put);
+
+int devlink_info_version_running_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
+ version_name, version_value,
+ version_type);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext);
+
+static int devlink_nl_driver_info_get(struct device_driver *drv,
+ struct devlink_info_req *req)
+{
+ if (!drv)
+ return 0;
+
+ if (drv->name[0])
+ return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME,
+ drv->name);
+
+ return 0;
+}
+
+static int
+devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags, struct netlink_ext_ack *extack)
+{
+ struct device *dev = devlink_to_dev(devlink);
+ struct devlink_info_req req = {};
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = -EMSGSIZE;
+ if (devlink_nl_put_handle(msg, devlink))
+ goto err_cancel_msg;
+
+ req.msg = msg;
+ if (devlink->ops->info_get) {
+ err = devlink->ops->info_get(devlink, &req, extack);
+ if (err)
+ goto err_cancel_msg;
+ }
+
+ err = devlink_nl_driver_info_get(dev->driver, &req);
+ if (err)
+ goto err_cancel_msg;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+int devlink_nl_cmd_info_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
+ info->snd_portid, info->snd_seq, 0,
+ info->extack);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int
+devlink_nl_cmd_info_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb)
+{
+ int err;
+
+ err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ cb->extack);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+
+const struct devlink_cmd devl_cmd_info_get = {
+ .dump_one = devlink_nl_cmd_info_get_dump_one,
+};
+
+static int devlink_nl_flash_update_fill(struct sk_buff *msg,
+ struct devlink *devlink,
+ enum devlink_command cmd,
+ struct devlink_flash_notify *params)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS)
+ goto out;
+
+ if (params->status_msg &&
+ nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG,
+ params->status_msg))
+ goto nla_put_failure;
+ if (params->component &&
+ nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT,
+ params->component))
+ goto nla_put_failure;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
+ params->done, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
+ params->total, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
+ params->timeout, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+out:
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static void __devlink_flash_update_notify(struct devlink *devlink,
+ enum devlink_command cmd,
+ struct devlink_flash_notify *params)
+{
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE &&
+ cmd != DEVLINK_CMD_FLASH_UPDATE_END &&
+ cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS);
+
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_flash_update_fill(msg, devlink, cmd, params);
+ if (err)
+ goto out_free_msg;
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+ return;
+
+out_free_msg:
+ nlmsg_free(msg);
+}
+
+static void devlink_flash_update_begin_notify(struct devlink *devlink)
+{
+ struct devlink_flash_notify params = {};
+
+ __devlink_flash_update_notify(devlink,
+ DEVLINK_CMD_FLASH_UPDATE,
+ &params);
+}
+
+static void devlink_flash_update_end_notify(struct devlink *devlink)
+{
+ struct devlink_flash_notify params = {};
+
+ __devlink_flash_update_notify(devlink,
+ DEVLINK_CMD_FLASH_UPDATE_END,
+ &params);
+}
+
+void devlink_flash_update_status_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long done,
+ unsigned long total)
+{
+ struct devlink_flash_notify params = {
+ .status_msg = status_msg,
+ .component = component,
+ .done = done,
+ .total = total,
+ };
+
+ __devlink_flash_update_notify(devlink,
+ DEVLINK_CMD_FLASH_UPDATE_STATUS,
+ &params);
+}
+EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify);
+
+void devlink_flash_update_timeout_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long timeout)
+{
+ struct devlink_flash_notify params = {
+ .status_msg = status_msg,
+ .component = component,
+ .timeout = timeout,
+ };
+
+ __devlink_flash_update_notify(devlink,
+ DEVLINK_CMD_FLASH_UPDATE_STATUS,
+ &params);
+}
+EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify);
+
+struct devlink_flash_component_lookup_ctx {
+ const char *lookup_name;
+ bool lookup_name_found;
+};
+
+static void
+devlink_flash_component_lookup_cb(const char *version_name,
+ enum devlink_info_version_type version_type,
+ void *version_cb_priv)
+{
+ struct devlink_flash_component_lookup_ctx *lookup_ctx = version_cb_priv;
+
+ if (version_type != DEVLINK_INFO_VERSION_TYPE_COMPONENT ||
+ lookup_ctx->lookup_name_found)
+ return;
+
+ lookup_ctx->lookup_name_found =
+ !strcmp(lookup_ctx->lookup_name, version_name);
+}
+
+static int devlink_flash_component_get(struct devlink *devlink,
+ struct nlattr *nla_component,
+ const char **p_component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink_flash_component_lookup_ctx lookup_ctx = {};
+ struct devlink_info_req req = {};
+ const char *component;
+ int ret;
+
+ if (!nla_component)
+ return 0;
+
+ component = nla_data(nla_component);
+
+ if (!devlink->ops->info_get) {
+ NL_SET_ERR_MSG_ATTR(extack, nla_component,
+ "component update is not supported by this device");
+ return -EOPNOTSUPP;
+ }
+
+ lookup_ctx.lookup_name = component;
+ req.version_cb = devlink_flash_component_lookup_cb;
+ req.version_cb_priv = &lookup_ctx;
+
+ ret = devlink->ops->info_get(devlink, &req, NULL);
+ if (ret)
+ return ret;
+
+ if (!lookup_ctx.lookup_name_found) {
+ NL_SET_ERR_MSG_ATTR(extack, nla_component,
+ "selected component is not supported by this device");
+ return -EINVAL;
+ }
+ *p_component = component;
+ return 0;
+}
+
+int devlink_nl_cmd_flash_update(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *nla_overwrite_mask, *nla_file_name;
+ struct devlink_flash_update_params params = {};
+ struct devlink *devlink = info->user_ptr[0];
+ const char *file_name;
+ u32 supported_params;
+ int ret;
+
+ if (!devlink->ops->flash_update)
+ return -EOPNOTSUPP;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME))
+ return -EINVAL;
+
+ ret = devlink_flash_component_get(devlink,
+ info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT],
+ &params.component, info->extack);
+ if (ret)
+ return ret;
+
+ supported_params = devlink->ops->supported_flash_update_params;
+
+ nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK];
+ if (nla_overwrite_mask) {
+ struct nla_bitfield32 sections;
+
+ if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK)) {
+ NL_SET_ERR_MSG_ATTR(info->extack, nla_overwrite_mask,
+ "overwrite settings are not supported by this device");
+ return -EOPNOTSUPP;
+ }
+ sections = nla_get_bitfield32(nla_overwrite_mask);
+ params.overwrite_mask = sections.value & sections.selector;
+ }
+
+ nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME];
+ file_name = nla_data(nla_file_name);
+ ret = request_firmware(&params.fw, file_name, devlink->dev);
+ if (ret) {
+ NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name,
+ "failed to locate the requested firmware file");
+ return ret;
+ }
+
+ devlink_flash_update_begin_notify(devlink);
+ ret = devlink->ops->flash_update(devlink, &params, info->extack);
+ devlink_flash_update_end_notify(devlink);
+
+ release_firmware(params.fw);
+
+ return ret;
+}
+
+static void __devlink_compat_running_version(struct devlink *devlink,
+ char *buf, size_t len)
+{
+ struct devlink_info_req req = {};
+ const struct nlattr *nlattr;
+ struct sk_buff *msg;
+ int rem, err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ req.msg = msg;
+ err = devlink->ops->info_get(devlink, &req, NULL);
+ if (err)
+ goto free_msg;
+
+ nla_for_each_attr(nlattr, (void *)msg->data, msg->len, rem) {
+ const struct nlattr *kv;
+ int rem_kv;
+
+ if (nla_type(nlattr) != DEVLINK_ATTR_INFO_VERSION_RUNNING)
+ continue;
+
+ nla_for_each_nested(kv, nlattr, rem_kv) {
+ if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE)
+ continue;
+
+ strlcat(buf, nla_data(kv), len);
+ strlcat(buf, " ", len);
+ }
+ }
+free_msg:
+ nlmsg_free(msg);
+}
+
+void devlink_compat_running_version(struct devlink *devlink,
+ char *buf, size_t len)
+{
+ if (!devlink->ops->info_get)
+ return;
+
+ devl_lock(devlink);
+ if (devl_is_registered(devlink))
+ __devlink_compat_running_version(devlink, buf, len);
+ devl_unlock(devlink);
+}
+
+int devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
+{
+ struct devlink_flash_update_params params = {};
+ int ret;
+
+ devl_lock(devlink);
+ if (!devl_is_registered(devlink)) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (!devlink->ops->flash_update) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
+ ret = request_firmware(&params.fw, file_name, devlink->dev);
+ if (ret)
+ goto out_unlock;
+
+ devlink_flash_update_begin_notify(devlink);
+ ret = devlink->ops->flash_update(devlink, &params, NULL);
+ devlink_flash_update_end_notify(devlink);
+
+ release_firmware(params.fw);
+out_unlock:
+ devl_unlock(devlink);
+
+ return ret;
+}
+
+static int
+devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink,
+ u32 portid, u32 seq, int flags,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *selftests;
+ void *hdr;
+ int err;
+ int i;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags,
+ DEVLINK_CMD_SELFTESTS_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = -EMSGSIZE;
+ if (devlink_nl_put_handle(msg, devlink))
+ goto err_cancel_msg;
+
+ selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
+ if (!selftests)
+ goto err_cancel_msg;
+
+ for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
+ i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
+ if (devlink->ops->selftest_check(devlink, i, extack)) {
+ err = nla_put_flag(msg, i);
+ if (err)
+ goto err_cancel_msg;
+ }
+ }
+
+ nla_nest_end(msg, selftests);
+ genlmsg_end(msg, hdr);
+ return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ if (!devlink->ops->selftest_check)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid,
+ info->snd_seq, 0, info->extack);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int
+devlink_nl_cmd_selftests_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb)
+{
+ if (!devlink->ops->selftest_check)
+ return 0;
+
+ return devlink_nl_selftests_fill(msg, devlink,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ cb->extack);
+}
+
+const struct devlink_cmd devl_cmd_selftests_get = {
+ .dump_one = devlink_nl_cmd_selftests_get_dump_one,
+};
+
+static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id,
+ enum devlink_selftest_status test_status)
+{
+ struct nlattr *result_attr;
+
+ result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT);
+ if (!result_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) ||
+ nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS,
+ test_status))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, result_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, result_attr);
+ return -EMSGSIZE;
+}
+
+static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = {
+ [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG },
+};
+
+int devlink_nl_cmd_selftests_run(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1];
+ struct devlink *devlink = info->user_ptr[0];
+ struct nlattr *attrs, *selftests;
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+ int i;
+
+ if (!devlink->ops->selftest_run || !devlink->ops->selftest_check)
+ return -EOPNOTSUPP;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SELFTESTS))
+ return -EINVAL;
+
+ attrs = info->attrs[DEVLINK_ATTR_SELFTESTS];
+
+ err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs,
+ devlink_selftest_nl_policy, info->extack);
+ if (err < 0)
+ return err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = -EMSGSIZE;
+ hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+ &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN);
+ if (!hdr)
+ goto free_msg;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto genlmsg_cancel;
+
+ selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
+ if (!selftests)
+ goto genlmsg_cancel;
+
+ for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
+ i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
+ enum devlink_selftest_status test_status;
+
+ if (nla_get_flag(tb[i])) {
+ if (!devlink->ops->selftest_check(devlink, i,
+ info->extack)) {
+ if (devlink_selftest_result_put(msg, i,
+ DEVLINK_SELFTEST_STATUS_SKIP))
+ goto selftests_nest_cancel;
+ continue;
+ }
+
+ test_status = devlink->ops->selftest_run(devlink, i,
+ info->extack);
+ if (devlink_selftest_result_put(msg, i, test_status))
+ goto selftests_nest_cancel;
+ }
+ }
+
+ nla_nest_end(msg, selftests);
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+selftests_nest_cancel:
+ nla_nest_cancel(msg, selftests);
+genlmsg_cancel:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return err;
+}
diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h
new file mode 100644
index 000000000000..e133f423294a
--- /dev/null
+++ b/net/devlink/devl_internal.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/xarray.h>
+#include <net/devlink.h>
+#include <net/net_namespace.h>
+
+#define DEVLINK_REGISTERED XA_MARK_1
+
+#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
+ (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
+
+struct devlink_dev_stats {
+ u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+ u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+};
+
+struct devlink {
+ u32 index;
+ struct xarray ports;
+ struct list_head rate_list;
+ struct list_head sb_list;
+ struct list_head dpipe_table_list;
+ struct list_head resource_list;
+ struct xarray params;
+ struct list_head region_list;
+ struct list_head reporter_list;
+ struct devlink_dpipe_headers *dpipe_headers;
+ struct list_head trap_list;
+ struct list_head trap_group_list;
+ struct list_head trap_policer_list;
+ struct list_head linecard_list;
+ const struct devlink_ops *ops;
+ struct xarray snapshot_ids;
+ struct devlink_dev_stats stats;
+ struct device *dev;
+ possible_net_t _net;
+ /* Serializes access to devlink instance specific objects such as
+ * port, sb, dpipe, resource, params, region, traps and more.
+ */
+ struct mutex lock;
+ struct lock_class_key lock_key;
+ u8 reload_failed:1;
+ refcount_t refcount;
+ struct rcu_work rwork;
+ struct notifier_block netdevice_nb;
+ char priv[] __aligned(NETDEV_ALIGN);
+};
+
+extern struct xarray devlinks;
+extern struct genl_family devlink_nl_family;
+
+/* devlink instances are open to the access from the user space after
+ * devlink_register() call. Such logical barrier allows us to have certain
+ * expectations related to locking.
+ *
+ * Before *_register() - we are in initialization stage and no parallel
+ * access possible to the devlink instance. All drivers perform that phase
+ * by implicitly holding device_lock.
+ *
+ * After *_register() - users and driver can access devlink instance at
+ * the same time.
+ */
+#define ASSERT_DEVLINK_REGISTERED(d) \
+ WARN_ON_ONCE(!xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
+#define ASSERT_DEVLINK_NOT_REGISTERED(d) \
+ WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
+
+/* Iterate over devlink pointers which were possible to get reference to.
+ * devlink_put() needs to be called for each iterated devlink pointer
+ * in loop body in order to release the reference.
+ */
+#define devlinks_xa_for_each_registered_get(net, index, devlink) \
+ for (index = 0; (devlink = devlinks_xa_find_get(net, &index)); index++)
+
+struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp);
+
+static inline bool devl_is_registered(struct devlink *devlink)
+{
+ devl_assert_locked(devlink);
+ return xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
+}
+
+/* Netlink */
+#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
+#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
+#define DEVLINK_NL_FLAG_NEED_RATE BIT(2)
+#define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3)
+#define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4)
+
+enum devlink_multicast_groups {
+ DEVLINK_MCGRP_CONFIG,
+};
+
+/* state held across netlink dumps */
+struct devlink_nl_dump_state {
+ unsigned long instance;
+ int idx;
+ union {
+ /* DEVLINK_CMD_REGION_READ */
+ struct {
+ u64 start_offset;
+ };
+ /* DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET */
+ struct {
+ u64 dump_ts;
+ };
+ };
+};
+
+struct devlink_cmd {
+ int (*dump_one)(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb);
+};
+
+extern const struct genl_small_ops devlink_nl_ops[56];
+
+struct devlink *
+devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs);
+
+void devlink_notify_unregister(struct devlink *devlink);
+void devlink_notify_register(struct devlink *devlink);
+
+int devlink_nl_instance_iter_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb);
+
+static inline struct devlink_nl_dump_state *
+devlink_dump_state(struct netlink_callback *cb)
+{
+ NL_ASSERT_DUMP_CTX_FITS(struct devlink_nl_dump_state);
+
+ return (struct devlink_nl_dump_state *)cb->ctx;
+}
+
+static inline int
+devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
+{
+ if (nla_put_string(msg, DEVLINK_ATTR_BUS_NAME, devlink->dev->bus->name))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, DEVLINK_ATTR_DEV_NAME, dev_name(devlink->dev)))
+ return -EMSGSIZE;
+ return 0;
+}
+
+/* Commands */
+extern const struct devlink_cmd devl_cmd_get;
+extern const struct devlink_cmd devl_cmd_port_get;
+extern const struct devlink_cmd devl_cmd_sb_get;
+extern const struct devlink_cmd devl_cmd_sb_pool_get;
+extern const struct devlink_cmd devl_cmd_sb_port_pool_get;
+extern const struct devlink_cmd devl_cmd_sb_tc_pool_bind_get;
+extern const struct devlink_cmd devl_cmd_param_get;
+extern const struct devlink_cmd devl_cmd_region_get;
+extern const struct devlink_cmd devl_cmd_info_get;
+extern const struct devlink_cmd devl_cmd_health_reporter_get;
+extern const struct devlink_cmd devl_cmd_trap_get;
+extern const struct devlink_cmd devl_cmd_trap_group_get;
+extern const struct devlink_cmd devl_cmd_trap_policer_get;
+extern const struct devlink_cmd devl_cmd_rate_get;
+extern const struct devlink_cmd devl_cmd_linecard_get;
+extern const struct devlink_cmd devl_cmd_selftests_get;
+
+/* Notify */
+void devlink_notify(struct devlink *devlink, enum devlink_command cmd);
+
+/* Ports */
+int devlink_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr);
+
+struct devlink_port *
+devlink_port_get_from_info(struct devlink *devlink, struct genl_info *info);
+struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
+ struct nlattr **attrs);
+
+/* Reload */
+bool devlink_reload_actions_valid(const struct devlink_ops *ops);
+int devlink_reload(struct devlink *devlink, struct net *dest_net,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed, struct netlink_ext_ack *extack);
+
+static inline bool devlink_reload_supported(const struct devlink_ops *ops)
+{
+ return ops->reload_down && ops->reload_up;
+}
+
+/* Params */
+void devlink_params_driverinit_load_new(struct devlink *devlink);
+
+/* Resources */
+struct devlink_resource;
+int devlink_resources_validate(struct devlink *devlink,
+ struct devlink_resource *resource,
+ struct genl_info *info);
+
+/* Line cards */
+struct devlink_linecard;
+
+struct devlink_linecard *
+devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info);
+
+/* Rates */
+int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
+struct devlink_rate *
+devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info);
+struct devlink_rate *
+devlink_rate_node_get_from_info(struct devlink *devlink,
+ struct genl_info *info);
+/* Devlink nl cmds */
+int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_info_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_flash_update(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_selftests_run(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
+ struct genl_info *info);
diff --git a/net/devlink/health.c b/net/devlink/health.c
new file mode 100644
index 000000000000..0839706d5741
--- /dev/null
+++ b/net/devlink/health.c
@@ -0,0 +1,1333 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include <net/genetlink.h>
+#include <net/sock.h>
+#include <trace/events/devlink.h>
+#include "devl_internal.h"
+
+struct devlink_fmsg_item {
+ struct list_head list;
+ int attrtype;
+ u8 nla_type;
+ u16 len;
+ int value[];
+};
+
+struct devlink_fmsg {
+ struct list_head item_list;
+ bool putting_binary; /* This flag forces enclosing of binary data
+ * in an array brackets. It forces using
+ * of designated API:
+ * devlink_fmsg_binary_pair_nest_start()
+ * devlink_fmsg_binary_pair_nest_end()
+ */
+};
+
+static struct devlink_fmsg *devlink_fmsg_alloc(void)
+{
+ struct devlink_fmsg *fmsg;
+
+ fmsg = kzalloc(sizeof(*fmsg), GFP_KERNEL);
+ if (!fmsg)
+ return NULL;
+
+ INIT_LIST_HEAD(&fmsg->item_list);
+
+ return fmsg;
+}
+
+static void devlink_fmsg_free(struct devlink_fmsg *fmsg)
+{
+ struct devlink_fmsg_item *item, *tmp;
+
+ list_for_each_entry_safe(item, tmp, &fmsg->item_list, list) {
+ list_del(&item->list);
+ kfree(item);
+ }
+ kfree(fmsg);
+}
+
+struct devlink_health_reporter {
+ struct list_head list;
+ void *priv;
+ const struct devlink_health_reporter_ops *ops;
+ struct devlink *devlink;
+ struct devlink_port *devlink_port;
+ struct devlink_fmsg *dump_fmsg;
+ struct mutex dump_lock; /* lock parallel read/write from dump buffers */
+ u64 graceful_period;
+ bool auto_recover;
+ bool auto_dump;
+ u8 health_state;
+ u64 dump_ts;
+ u64 dump_real_ts;
+ u64 error_count;
+ u64 recovery_count;
+ u64 last_recovery_ts;
+};
+
+void *
+devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
+{
+ return reporter->priv;
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_priv);
+
+static struct devlink_health_reporter *
+__devlink_health_reporter_find_by_name(struct list_head *reporter_list,
+ const char *reporter_name)
+{
+ struct devlink_health_reporter *reporter;
+
+ list_for_each_entry(reporter, reporter_list, list)
+ if (!strcmp(reporter->ops->name, reporter_name))
+ return reporter;
+ return NULL;
+}
+
+static struct devlink_health_reporter *
+devlink_health_reporter_find_by_name(struct devlink *devlink,
+ const char *reporter_name)
+{
+ return __devlink_health_reporter_find_by_name(&devlink->reporter_list,
+ reporter_name);
+}
+
+static struct devlink_health_reporter *
+devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port,
+ const char *reporter_name)
+{
+ return __devlink_health_reporter_find_by_name(&devlink_port->reporter_list,
+ reporter_name);
+}
+
+static struct devlink_health_reporter *
+__devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv)
+{
+ struct devlink_health_reporter *reporter;
+
+ if (WARN_ON(graceful_period && !ops->recover))
+ return ERR_PTR(-EINVAL);
+
+ reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
+ if (!reporter)
+ return ERR_PTR(-ENOMEM);
+
+ reporter->priv = priv;
+ reporter->ops = ops;
+ reporter->devlink = devlink;
+ reporter->graceful_period = graceful_period;
+ reporter->auto_recover = !!ops->recover;
+ reporter->auto_dump = !!ops->dump;
+ mutex_init(&reporter->dump_lock);
+ return reporter;
+}
+
+/**
+ * devl_port_health_reporter_create() - create devlink health reporter for
+ * specified port instance
+ *
+ * @port: devlink_port to which health reports will relate
+ * @ops: devlink health reporter ops
+ * @graceful_period: min time (in msec) between recovery attempts
+ * @priv: driver priv pointer
+ */
+struct devlink_health_reporter *
+devl_port_health_reporter_create(struct devlink_port *port,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv)
+{
+ struct devlink_health_reporter *reporter;
+
+ devl_assert_locked(port->devlink);
+
+ if (__devlink_health_reporter_find_by_name(&port->reporter_list,
+ ops->name))
+ return ERR_PTR(-EEXIST);
+
+ reporter = __devlink_health_reporter_create(port->devlink, ops,
+ graceful_period, priv);
+ if (IS_ERR(reporter))
+ return reporter;
+
+ reporter->devlink_port = port;
+ list_add_tail(&reporter->list, &port->reporter_list);
+ return reporter;
+}
+EXPORT_SYMBOL_GPL(devl_port_health_reporter_create);
+
+struct devlink_health_reporter *
+devlink_port_health_reporter_create(struct devlink_port *port,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv)
+{
+ struct devlink_health_reporter *reporter;
+ struct devlink *devlink = port->devlink;
+
+ devl_lock(devlink);
+ reporter = devl_port_health_reporter_create(port, ops,
+ graceful_period, priv);
+ devl_unlock(devlink);
+ return reporter;
+}
+EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create);
+
+/**
+ * devl_health_reporter_create - create devlink health reporter
+ *
+ * @devlink: devlink instance which the health reports will relate
+ * @ops: devlink health reporter ops
+ * @graceful_period: min time (in msec) between recovery attempts
+ * @priv: driver priv pointer
+ */
+struct devlink_health_reporter *
+devl_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv)
+{
+ struct devlink_health_reporter *reporter;
+
+ devl_assert_locked(devlink);
+
+ if (devlink_health_reporter_find_by_name(devlink, ops->name))
+ return ERR_PTR(-EEXIST);
+
+ reporter = __devlink_health_reporter_create(devlink, ops,
+ graceful_period, priv);
+ if (IS_ERR(reporter))
+ return reporter;
+
+ list_add_tail(&reporter->list, &devlink->reporter_list);
+ return reporter;
+}
+EXPORT_SYMBOL_GPL(devl_health_reporter_create);
+
+struct devlink_health_reporter *
+devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv)
+{
+ struct devlink_health_reporter *reporter;
+
+ devl_lock(devlink);
+ reporter = devl_health_reporter_create(devlink, ops,
+ graceful_period, priv);
+ devl_unlock(devlink);
+ return reporter;
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
+
+static void
+devlink_health_reporter_free(struct devlink_health_reporter *reporter)
+{
+ mutex_destroy(&reporter->dump_lock);
+ if (reporter->dump_fmsg)
+ devlink_fmsg_free(reporter->dump_fmsg);
+ kfree(reporter);
+}
+
+/**
+ * devl_health_reporter_destroy() - destroy devlink health reporter
+ *
+ * @reporter: devlink health reporter to destroy
+ */
+void
+devl_health_reporter_destroy(struct devlink_health_reporter *reporter)
+{
+ devl_assert_locked(reporter->devlink);
+
+ list_del(&reporter->list);
+ devlink_health_reporter_free(reporter);
+}
+EXPORT_SYMBOL_GPL(devl_health_reporter_destroy);
+
+void
+devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
+{
+ struct devlink *devlink = reporter->devlink;
+
+ devl_lock(devlink);
+ devl_health_reporter_destroy(reporter);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy);
+
+static int
+devlink_nl_health_reporter_fill(struct sk_buff *msg,
+ struct devlink_health_reporter *reporter,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags)
+{
+ struct devlink *devlink = reporter->devlink;
+ struct nlattr *reporter_attr;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto genlmsg_cancel;
+
+ if (reporter->devlink_port) {
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, reporter->devlink_port->index))
+ goto genlmsg_cancel;
+ }
+ reporter_attr = nla_nest_start_noflag(msg,
+ DEVLINK_ATTR_HEALTH_REPORTER);
+ if (!reporter_attr)
+ goto genlmsg_cancel;
+ if (nla_put_string(msg, DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+ reporter->ops->name))
+ goto reporter_nest_cancel;
+ if (nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_STATE,
+ reporter->health_state))
+ goto reporter_nest_cancel;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT,
+ reporter->error_count, DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT,
+ reporter->recovery_count, DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (reporter->ops->recover &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD,
+ reporter->graceful_period,
+ DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (reporter->ops->recover &&
+ nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER,
+ reporter->auto_recover))
+ goto reporter_nest_cancel;
+ if (reporter->dump_fmsg &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS,
+ jiffies_to_msecs(reporter->dump_ts),
+ DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (reporter->dump_fmsg &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
+ reporter->dump_real_ts, DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (reporter->ops->dump &&
+ nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP,
+ reporter->auto_dump))
+ goto reporter_nest_cancel;
+
+ nla_nest_end(msg, reporter_attr);
+ genlmsg_end(msg, hdr);
+ return 0;
+
+reporter_nest_cancel:
+ nla_nest_cancel(msg, reporter_attr);
+genlmsg_cancel:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static struct devlink_health_reporter *
+devlink_health_reporter_get_from_attrs(struct devlink *devlink,
+ struct nlattr **attrs)
+{
+ struct devlink_port *devlink_port;
+ char *reporter_name;
+
+ if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME])
+ return NULL;
+
+ reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]);
+ devlink_port = devlink_port_get_from_attrs(devlink, attrs);
+ if (IS_ERR(devlink_port))
+ return devlink_health_reporter_find_by_name(devlink,
+ reporter_name);
+ else
+ return devlink_port_health_reporter_find_by_name(devlink_port,
+ reporter_name);
+}
+
+static struct devlink_health_reporter *
+devlink_health_reporter_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ return devlink_health_reporter_get_from_attrs(devlink, info->attrs);
+}
+
+int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ struct sk_buff *msg;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_health_reporter_fill(msg, reporter,
+ DEVLINK_CMD_HEALTH_REPORTER_GET,
+ info->snd_portid, info->snd_seq,
+ 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int
+devlink_nl_cmd_health_reporter_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_health_reporter *reporter;
+ struct devlink_port *port;
+ unsigned long port_index;
+ int idx = 0;
+ int err;
+
+ list_for_each_entry(reporter, &devlink->reporter_list, list) {
+ if (idx < state->idx) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_health_reporter_fill(msg, reporter,
+ DEVLINK_CMD_HEALTH_REPORTER_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ state->idx = idx;
+ return err;
+ }
+ idx++;
+ }
+ xa_for_each(&devlink->ports, port_index, port) {
+ list_for_each_entry(reporter, &port->reporter_list, list) {
+ if (idx < state->idx) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_health_reporter_fill(msg, reporter,
+ DEVLINK_CMD_HEALTH_REPORTER_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ state->idx = idx;
+ return err;
+ }
+ idx++;
+ }
+ }
+
+ return 0;
+}
+
+const struct devlink_cmd devl_cmd_health_reporter_get = {
+ .dump_one = devlink_nl_cmd_health_reporter_get_dump_one,
+};
+
+int devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->recover &&
+ (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] ||
+ info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]))
+ return -EOPNOTSUPP;
+
+ if (!reporter->ops->dump &&
+ info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP])
+ return -EOPNOTSUPP;
+
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
+ reporter->graceful_period =
+ nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]);
+
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])
+ reporter->auto_recover =
+ nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]);
+
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP])
+ reporter->auto_dump =
+ nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]);
+
+ return 0;
+}
+
+static void devlink_recover_notify(struct devlink_health_reporter *reporter,
+ enum devlink_command cmd)
+{
+ struct devlink *devlink = reporter->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
+ WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_health_reporter_fill(msg, reporter, cmd, 0, 0, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+ 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+void
+devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter)
+{
+ reporter->recovery_count++;
+ reporter->last_recovery_ts = jiffies;
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_recovery_done);
+
+static int
+devlink_health_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx, struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (reporter->health_state == DEVLINK_HEALTH_REPORTER_STATE_HEALTHY)
+ return 0;
+
+ if (!reporter->ops->recover)
+ return -EOPNOTSUPP;
+
+ err = reporter->ops->recover(reporter, priv_ctx, extack);
+ if (err)
+ return err;
+
+ devlink_health_reporter_recovery_done(reporter);
+ reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
+ devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
+
+ return 0;
+}
+
+static void
+devlink_health_dump_clear(struct devlink_health_reporter *reporter)
+{
+ if (!reporter->dump_fmsg)
+ return;
+ devlink_fmsg_free(reporter->dump_fmsg);
+ reporter->dump_fmsg = NULL;
+}
+
+static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!reporter->ops->dump)
+ return 0;
+
+ if (reporter->dump_fmsg)
+ return 0;
+
+ reporter->dump_fmsg = devlink_fmsg_alloc();
+ if (!reporter->dump_fmsg) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ err = devlink_fmsg_obj_nest_start(reporter->dump_fmsg);
+ if (err)
+ goto dump_err;
+
+ err = reporter->ops->dump(reporter, reporter->dump_fmsg,
+ priv_ctx, extack);
+ if (err)
+ goto dump_err;
+
+ err = devlink_fmsg_obj_nest_end(reporter->dump_fmsg);
+ if (err)
+ goto dump_err;
+
+ reporter->dump_ts = jiffies;
+ reporter->dump_real_ts = ktime_get_real_ns();
+
+ return 0;
+
+dump_err:
+ devlink_health_dump_clear(reporter);
+ return err;
+}
+
+int devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ enum devlink_health_reporter_state prev_health_state;
+ struct devlink *devlink = reporter->devlink;
+ unsigned long recover_ts_threshold;
+ int ret;
+
+ /* write a log message of the current error */
+ WARN_ON(!msg);
+ trace_devlink_health_report(devlink, reporter->ops->name, msg);
+ reporter->error_count++;
+ prev_health_state = reporter->health_state;
+ reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+ devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
+
+ /* abort if the previous error wasn't recovered */
+ recover_ts_threshold = reporter->last_recovery_ts +
+ msecs_to_jiffies(reporter->graceful_period);
+ if (reporter->auto_recover &&
+ (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
+ (reporter->last_recovery_ts && reporter->recovery_count &&
+ time_is_after_jiffies(recover_ts_threshold)))) {
+ trace_devlink_health_recover_aborted(devlink,
+ reporter->ops->name,
+ reporter->health_state,
+ jiffies -
+ reporter->last_recovery_ts);
+ return -ECANCELED;
+ }
+
+ if (reporter->auto_dump) {
+ mutex_lock(&reporter->dump_lock);
+ /* store current dump of current error, for later analysis */
+ devlink_health_do_dump(reporter, priv_ctx, NULL);
+ mutex_unlock(&reporter->dump_lock);
+ }
+
+ if (!reporter->auto_recover)
+ return 0;
+
+ devl_lock(devlink);
+ ret = devlink_health_reporter_recover(reporter, priv_ctx, NULL);
+ devl_unlock(devlink);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devlink_health_report);
+
+void
+devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
+ enum devlink_health_reporter_state state)
+{
+ if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY &&
+ state != DEVLINK_HEALTH_REPORTER_STATE_ERROR))
+ return;
+
+ if (reporter->health_state == state)
+ return;
+
+ reporter->health_state = state;
+ trace_devlink_health_reporter_state_update(reporter->devlink,
+ reporter->ops->name, state);
+ devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update);
+
+int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ return devlink_health_reporter_recover(reporter, NULL, info->extack);
+}
+
+static int devlink_fmsg_nest_common(struct devlink_fmsg *fmsg,
+ int attrtype)
+{
+ struct devlink_fmsg_item *item;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ item->attrtype = attrtype;
+ list_add_tail(&item->list, &fmsg->item_list);
+
+ return 0;
+}
+
+int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
+{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
+
+static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg)
+{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END);
+}
+
+int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
+{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_nest_end(fmsg);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end);
+
+#define DEVLINK_FMSG_MAX_SIZE (GENLMSG_DEFAULT_SIZE - GENL_HDRLEN - NLA_HDRLEN)
+
+static int devlink_fmsg_put_name(struct devlink_fmsg *fmsg, const char *name)
+{
+ struct devlink_fmsg_item *item;
+
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE)
+ return -EMSGSIZE;
+
+ item = kzalloc(sizeof(*item) + strlen(name) + 1, GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ item->nla_type = NLA_NUL_STRING;
+ item->len = strlen(name) + 1;
+ item->attrtype = DEVLINK_ATTR_FMSG_OBJ_NAME;
+ memcpy(&item->value, name, item->len);
+ list_add_tail(&item->list, &fmsg->item_list);
+
+ return 0;
+}
+
+int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
+{
+ int err;
+
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_put_name(fmsg, name);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_start);
+
+int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_nest_end(fmsg);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end);
+
+int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name)
+{
+ int err;
+
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_ARR_NEST_START);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_start);
+
+int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ err = devlink_fmsg_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
+
+int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ fmsg->putting_binary = true;
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_start);
+
+int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ if (!fmsg->putting_binary)
+ return -EINVAL;
+
+ fmsg->putting_binary = false;
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_end);
+
+static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg,
+ const void *value, u16 value_len,
+ u8 value_nla_type)
+{
+ struct devlink_fmsg_item *item;
+
+ if (value_len > DEVLINK_FMSG_MAX_SIZE)
+ return -EMSGSIZE;
+
+ item = kzalloc(sizeof(*item) + value_len, GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ item->nla_type = value_nla_type;
+ item->len = value_len;
+ item->attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
+ memcpy(&item->value, value, item->len);
+ list_add_tail(&item->list, &fmsg->item_list);
+
+ return 0;
+}
+
+static int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
+{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG);
+}
+
+static int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
+{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8);
+}
+
+int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
+{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
+
+static int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
+{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64);
+}
+
+int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
+{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1,
+ NLA_NUL_STRING);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
+
+int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len)
+{
+ if (!fmsg->putting_binary)
+ return -EINVAL;
+
+ return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
+
+int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ bool value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_bool_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_bool_pair_put);
+
+int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u8 value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u8_pair_put);
+
+int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u32 value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u32_pair_put);
+
+int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u64 value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u64_pair_put);
+
+int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const char *value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_string_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put);
+
+int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const void *value, u32 value_len)
+{
+ u32 data_size;
+ int end_err;
+ u32 offset;
+ int err;
+
+ err = devlink_fmsg_binary_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ for (offset = 0; offset < value_len; offset += data_size) {
+ data_size = value_len - offset;
+ if (data_size > DEVLINK_FMSG_MAX_SIZE)
+ data_size = DEVLINK_FMSG_MAX_SIZE;
+ err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
+ if (err)
+ break;
+ /* Exit from loop with a break (instead of
+ * return) to make sure putting_binary is turned off in
+ * devlink_fmsg_binary_pair_nest_end
+ */
+ }
+
+ end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
+ if (end_err)
+ err = end_err;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
+
+static int
+devlink_fmsg_item_fill_type(struct devlink_fmsg_item *msg, struct sk_buff *skb)
+{
+ switch (msg->nla_type) {
+ case NLA_FLAG:
+ case NLA_U8:
+ case NLA_U32:
+ case NLA_U64:
+ case NLA_NUL_STRING:
+ case NLA_BINARY:
+ return nla_put_u8(skb, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE,
+ msg->nla_type);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb)
+{
+ int attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
+ u8 tmp;
+
+ switch (msg->nla_type) {
+ case NLA_FLAG:
+ /* Always provide flag data, regardless of its value */
+ tmp = *(bool *)msg->value;
+
+ return nla_put_u8(skb, attrtype, tmp);
+ case NLA_U8:
+ return nla_put_u8(skb, attrtype, *(u8 *)msg->value);
+ case NLA_U32:
+ return nla_put_u32(skb, attrtype, *(u32 *)msg->value);
+ case NLA_U64:
+ return nla_put_u64_64bit(skb, attrtype, *(u64 *)msg->value,
+ DEVLINK_ATTR_PAD);
+ case NLA_NUL_STRING:
+ return nla_put_string(skb, attrtype, (char *)&msg->value);
+ case NLA_BINARY:
+ return nla_put(skb, attrtype, msg->len, (void *)&msg->value);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb,
+ int *start)
+{
+ struct devlink_fmsg_item *item;
+ struct nlattr *fmsg_nlattr;
+ int err = 0;
+ int i = 0;
+
+ fmsg_nlattr = nla_nest_start_noflag(skb, DEVLINK_ATTR_FMSG);
+ if (!fmsg_nlattr)
+ return -EMSGSIZE;
+
+ list_for_each_entry(item, &fmsg->item_list, list) {
+ if (i < *start) {
+ i++;
+ continue;
+ }
+
+ switch (item->attrtype) {
+ case DEVLINK_ATTR_FMSG_OBJ_NEST_START:
+ case DEVLINK_ATTR_FMSG_PAIR_NEST_START:
+ case DEVLINK_ATTR_FMSG_ARR_NEST_START:
+ case DEVLINK_ATTR_FMSG_NEST_END:
+ err = nla_put_flag(skb, item->attrtype);
+ break;
+ case DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA:
+ err = devlink_fmsg_item_fill_type(item, skb);
+ if (err)
+ break;
+ err = devlink_fmsg_item_fill_data(item, skb);
+ break;
+ case DEVLINK_ATTR_FMSG_OBJ_NAME:
+ err = nla_put_string(skb, item->attrtype,
+ (char *)&item->value);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ if (!err)
+ *start = ++i;
+ else
+ break;
+ }
+
+ nla_nest_end(skb, fmsg_nlattr);
+ return err;
+}
+
+static int devlink_fmsg_snd(struct devlink_fmsg *fmsg,
+ struct genl_info *info,
+ enum devlink_command cmd, int flags)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ bool last = false;
+ int index = 0;
+ void *hdr;
+ int err;
+
+ while (!last) {
+ int tmp_index = index;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+ &devlink_nl_family, flags | NLM_F_MULTI, cmd);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto nla_put_failure;
+ }
+
+ err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
+ if (!err)
+ last = true;
+ else if (err != -EMSGSIZE || tmp_index == index)
+ goto nla_put_failure;
+
+ genlmsg_end(skb, hdr);
+ err = genlmsg_reply(skb, info);
+ if (err)
+ return err;
+ }
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+ NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = -EMSGSIZE;
+ goto nla_put_failure;
+ }
+
+ return genlmsg_reply(skb, info);
+
+nla_put_failure:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int devlink_fmsg_dumpit(struct devlink_fmsg *fmsg, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ enum devlink_command cmd)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ int index = state->idx;
+ int tmp_index = index;
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, cmd);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto nla_put_failure;
+ }
+
+ err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
+ if ((err && err != -EMSGSIZE) || tmp_index == index)
+ goto nla_put_failure;
+
+ state->idx = index;
+ genlmsg_end(skb, hdr);
+ return skb->len;
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return err;
+}
+
+int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ struct devlink_fmsg *fmsg;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->diagnose)
+ return -EOPNOTSUPP;
+
+ fmsg = devlink_fmsg_alloc();
+ if (!fmsg)
+ return -ENOMEM;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ goto out;
+
+ err = reporter->ops->diagnose(reporter, fmsg, info->extack);
+ if (err)
+ goto out;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ goto out;
+
+ err = devlink_fmsg_snd(fmsg, info,
+ DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 0);
+
+out:
+ devlink_fmsg_free(fmsg);
+ return err;
+}
+
+static struct devlink_health_reporter *
+devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
+{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ struct devlink_health_reporter *reporter;
+ struct nlattr **attrs = info->attrs;
+ struct devlink *devlink;
+
+ devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
+ if (IS_ERR(devlink))
+ return NULL;
+ devl_unlock(devlink);
+
+ reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
+ devlink_put(devlink);
+ return reporter;
+}
+
+int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_health_reporter *reporter;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_cb(cb);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->dump)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&reporter->dump_lock);
+ if (!state->idx) {
+ err = devlink_health_do_dump(reporter, NULL, cb->extack);
+ if (err)
+ goto unlock;
+ state->dump_ts = reporter->dump_ts;
+ }
+ if (!reporter->dump_fmsg || state->dump_ts != reporter->dump_ts) {
+ NL_SET_ERR_MSG(cb->extack, "Dump trampled, please retry");
+ err = -EAGAIN;
+ goto unlock;
+ }
+
+ err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
+ DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
+unlock:
+ mutex_unlock(&reporter->dump_lock);
+ return err;
+}
+
+int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->dump)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&reporter->dump_lock);
+ devlink_health_dump_clear(reporter);
+ mutex_unlock(&reporter->dump_lock);
+ return 0;
+}
+
+int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->test)
+ return -EOPNOTSUPP;
+
+ return reporter->ops->test(reporter, info->extack);
+}
diff --git a/net/core/devlink.c b/net/devlink/leftover.c
index 0bfc144df8b9..dffca2f9bfa7 100644
--- a/net/core/devlink.c
+++ b/net/devlink/leftover.c
@@ -31,58 +31,12 @@
#define CREATE_TRACE_POINTS
#include <trace/events/devlink.h>
-#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
- (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
-
-struct devlink_dev_stats {
- u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
- u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
-};
-
-struct devlink {
- u32 index;
- struct xarray ports;
- struct list_head rate_list;
- struct list_head sb_list;
- struct list_head dpipe_table_list;
- struct list_head resource_list;
- struct list_head param_list;
- struct list_head region_list;
- struct list_head reporter_list;
- struct mutex reporters_lock; /* protects reporter_list */
- struct devlink_dpipe_headers *dpipe_headers;
- struct list_head trap_list;
- struct list_head trap_group_list;
- struct list_head trap_policer_list;
- struct list_head linecard_list;
- struct mutex linecards_lock; /* protects linecard_list */
- const struct devlink_ops *ops;
- u64 features;
- struct xarray snapshot_ids;
- struct devlink_dev_stats stats;
- struct device *dev;
- possible_net_t _net;
- /* Serializes access to devlink instance specific objects such as
- * port, sb, dpipe, resource, params, region, traps and more.
- */
- struct mutex lock;
- struct lock_class_key lock_key;
- u8 reload_failed:1;
- refcount_t refcount;
- struct completion comp;
- struct rcu_head rcu;
- struct notifier_block netdevice_nb;
- char priv[] __aligned(NETDEV_ALIGN);
-};
-
-struct devlink_linecard_ops;
-struct devlink_linecard_type;
+#include "devl_internal.h"
struct devlink_linecard {
struct list_head list;
struct devlink *devlink;
unsigned int index;
- refcount_t refcount;
const struct devlink_linecard_ops *ops;
void *priv;
enum devlink_linecard_state state;
@@ -122,24 +76,6 @@ struct devlink_resource {
void *occ_get_priv;
};
-void *devlink_priv(struct devlink *devlink)
-{
- return &devlink->priv;
-}
-EXPORT_SYMBOL_GPL(devlink_priv);
-
-struct devlink *priv_to_devlink(void *priv)
-{
- return container_of(priv, struct devlink, priv);
-}
-EXPORT_SYMBOL_GPL(priv_to_devlink);
-
-struct device *devlink_to_dev(const struct devlink *devlink)
-{
- return devlink->dev;
-}
-EXPORT_SYMBOL_GPL(devlink_to_dev);
-
static struct devlink_dpipe_field devlink_dpipe_fields_ethernet[] = {
{
.name = "destination mac",
@@ -207,176 +143,6 @@ static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_
NLA_POLICY_BITFIELD32(DEVLINK_PORT_FN_CAPS_VALID_MASK),
};
-static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = {
- [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG },
-};
-
-static DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
-#define DEVLINK_REGISTERED XA_MARK_1
-#define DEVLINK_UNREGISTERING XA_MARK_2
-
-/* devlink instances are open to the access from the user space after
- * devlink_register() call. Such logical barrier allows us to have certain
- * expectations related to locking.
- *
- * Before *_register() - we are in initialization stage and no parallel
- * access possible to the devlink instance. All drivers perform that phase
- * by implicitly holding device_lock.
- *
- * After *_register() - users and driver can access devlink instance at
- * the same time.
- */
-#define ASSERT_DEVLINK_REGISTERED(d) \
- WARN_ON_ONCE(!xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
-#define ASSERT_DEVLINK_NOT_REGISTERED(d) \
- WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
-
-struct net *devlink_net(const struct devlink *devlink)
-{
- return read_pnet(&devlink->_net);
-}
-EXPORT_SYMBOL_GPL(devlink_net);
-
-static void __devlink_put_rcu(struct rcu_head *head)
-{
- struct devlink *devlink = container_of(head, struct devlink, rcu);
-
- complete(&devlink->comp);
-}
-
-void devlink_put(struct devlink *devlink)
-{
- if (refcount_dec_and_test(&devlink->refcount))
- /* Make sure unregister operation that may await the completion
- * is unblocked only after all users are after the end of
- * RCU grace period.
- */
- call_rcu(&devlink->rcu, __devlink_put_rcu);
-}
-
-struct devlink *__must_check devlink_try_get(struct devlink *devlink)
-{
- if (refcount_inc_not_zero(&devlink->refcount))
- return devlink;
- return NULL;
-}
-
-void devl_assert_locked(struct devlink *devlink)
-{
- lockdep_assert_held(&devlink->lock);
-}
-EXPORT_SYMBOL_GPL(devl_assert_locked);
-
-#ifdef CONFIG_LOCKDEP
-/* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
-bool devl_lock_is_held(struct devlink *devlink)
-{
- return lockdep_is_held(&devlink->lock);
-}
-EXPORT_SYMBOL_GPL(devl_lock_is_held);
-#endif
-
-void devl_lock(struct devlink *devlink)
-{
- mutex_lock(&devlink->lock);
-}
-EXPORT_SYMBOL_GPL(devl_lock);
-
-int devl_trylock(struct devlink *devlink)
-{
- return mutex_trylock(&devlink->lock);
-}
-EXPORT_SYMBOL_GPL(devl_trylock);
-
-void devl_unlock(struct devlink *devlink)
-{
- mutex_unlock(&devlink->lock);
-}
-EXPORT_SYMBOL_GPL(devl_unlock);
-
-static struct devlink *
-devlinks_xa_find_get(struct net *net, unsigned long *indexp, xa_mark_t filter,
- void * (*xa_find_fn)(struct xarray *, unsigned long *,
- unsigned long, xa_mark_t))
-{
- struct devlink *devlink;
-
- rcu_read_lock();
-retry:
- devlink = xa_find_fn(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
- if (!devlink)
- goto unlock;
-
- /* In case devlink_unregister() was already called and "unregistering"
- * mark was set, do not allow to get a devlink reference here.
- * This prevents live-lock of devlink_unregister() wait for completion.
- */
- if (xa_get_mark(&devlinks, *indexp, DEVLINK_UNREGISTERING))
- goto retry;
-
- /* For a possible retry, the xa_find_after() should be always used */
- xa_find_fn = xa_find_after;
- if (!devlink_try_get(devlink))
- goto retry;
- if (!net_eq(devlink_net(devlink), net)) {
- devlink_put(devlink);
- goto retry;
- }
-unlock:
- rcu_read_unlock();
- return devlink;
-}
-
-static struct devlink *devlinks_xa_find_get_first(struct net *net,
- unsigned long *indexp,
- xa_mark_t filter)
-{
- return devlinks_xa_find_get(net, indexp, filter, xa_find);
-}
-
-static struct devlink *devlinks_xa_find_get_next(struct net *net,
- unsigned long *indexp,
- xa_mark_t filter)
-{
- return devlinks_xa_find_get(net, indexp, filter, xa_find_after);
-}
-
-/* Iterate over devlink pointers which were possible to get reference to.
- * devlink_put() needs to be called for each iterated devlink pointer
- * in loop body in order to release the reference.
- */
-#define devlinks_xa_for_each_get(net, index, devlink, filter) \
- for (index = 0, \
- devlink = devlinks_xa_find_get_first(net, &index, filter); \
- devlink; devlink = devlinks_xa_find_get_next(net, &index, filter))
-
-#define devlinks_xa_for_each_registered_get(net, index, devlink) \
- devlinks_xa_for_each_get(net, index, devlink, DEVLINK_REGISTERED)
-
-static struct devlink *devlink_get_from_attrs(struct net *net,
- struct nlattr **attrs)
-{
- struct devlink *devlink;
- unsigned long index;
- char *busname;
- char *devname;
-
- if (!attrs[DEVLINK_ATTR_BUS_NAME] || !attrs[DEVLINK_ATTR_DEV_NAME])
- return ERR_PTR(-EINVAL);
-
- busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]);
- devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
-
- devlinks_xa_for_each_registered_get(net, index, devlink) {
- if (strcmp(devlink->dev->bus->name, busname) == 0 &&
- strcmp(dev_name(devlink->dev), devname) == 0)
- return devlink;
- devlink_put(devlink);
- }
-
- return ERR_PTR(-ENODEV);
-}
-
#define ASSERT_DEVLINK_PORT_REGISTERED(devlink_port) \
WARN_ON_ONCE(!(devlink_port)->registered)
#define ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port) \
@@ -390,8 +156,8 @@ static struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
return xa_load(&devlink->ports, port_index);
}
-static struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
- struct nlattr **attrs)
+struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
+ struct nlattr **attrs)
{
if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
u32 port_index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
@@ -405,8 +171,8 @@ static struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
return ERR_PTR(-EINVAL);
}
-static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
- struct genl_info *info)
+struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
{
return devlink_port_get_from_attrs(devlink, info->attrs);
}
@@ -466,13 +232,13 @@ devlink_rate_node_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
return devlink_rate_node_get_by_name(devlink, rate_node_name);
}
-static struct devlink_rate *
+struct devlink_rate *
devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info)
{
return devlink_rate_node_get_from_attrs(devlink, info->attrs);
}
-static struct devlink_rate *
+struct devlink_rate *
devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
{
struct nlattr **attrs = info->attrs;
@@ -511,11 +277,7 @@ devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
u32 linecard_index = nla_get_u32(attrs[DEVLINK_ATTR_LINECARD_INDEX]);
struct devlink_linecard *linecard;
- mutex_lock(&devlink->linecards_lock);
linecard = devlink_linecard_get_by_index(devlink, linecard_index);
- if (linecard)
- refcount_inc(&linecard->refcount);
- mutex_unlock(&devlink->linecards_lock);
if (!linecard)
return ERR_PTR(-ENODEV);
return linecard;
@@ -523,20 +285,12 @@ devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
return ERR_PTR(-EINVAL);
}
-static struct devlink_linecard *
+struct devlink_linecard *
devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info)
{
return devlink_linecard_get_from_attrs(devlink, info->attrs);
}
-static void devlink_linecard_put(struct devlink_linecard *linecard)
-{
- if (refcount_dec_and_test(&linecard->refcount)) {
- mutex_destroy(&linecard->state_lock);
- kfree(linecard);
- }
-}
-
struct devlink_sb {
struct list_head list;
unsigned int index;
@@ -838,104 +592,6 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
return NULL;
}
-#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
-#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
-#define DEVLINK_NL_FLAG_NEED_RATE BIT(2)
-#define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3)
-#define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4)
-
-static int devlink_nl_pre_doit(const struct genl_split_ops *ops,
- struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink_linecard *linecard;
- struct devlink_port *devlink_port;
- struct devlink *devlink;
- int err;
-
- devlink = devlink_get_from_attrs(genl_info_net(info), info->attrs);
- if (IS_ERR(devlink))
- return PTR_ERR(devlink);
- devl_lock(devlink);
- info->user_ptr[0] = devlink;
- if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
- devlink_port = devlink_port_get_from_info(devlink, info);
- if (IS_ERR(devlink_port)) {
- err = PTR_ERR(devlink_port);
- goto unlock;
- }
- info->user_ptr[1] = devlink_port;
- } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) {
- devlink_port = devlink_port_get_from_info(devlink, info);
- if (!IS_ERR(devlink_port))
- info->user_ptr[1] = devlink_port;
- } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE) {
- struct devlink_rate *devlink_rate;
-
- devlink_rate = devlink_rate_get_from_info(devlink, info);
- if (IS_ERR(devlink_rate)) {
- err = PTR_ERR(devlink_rate);
- goto unlock;
- }
- info->user_ptr[1] = devlink_rate;
- } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE_NODE) {
- struct devlink_rate *rate_node;
-
- rate_node = devlink_rate_node_get_from_info(devlink, info);
- if (IS_ERR(rate_node)) {
- err = PTR_ERR(rate_node);
- goto unlock;
- }
- info->user_ptr[1] = rate_node;
- } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
- linecard = devlink_linecard_get_from_info(devlink, info);
- if (IS_ERR(linecard)) {
- err = PTR_ERR(linecard);
- goto unlock;
- }
- info->user_ptr[1] = linecard;
- }
- return 0;
-
-unlock:
- devl_unlock(devlink);
- devlink_put(devlink);
- return err;
-}
-
-static void devlink_nl_post_doit(const struct genl_split_ops *ops,
- struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink_linecard *linecard;
- struct devlink *devlink;
-
- devlink = info->user_ptr[0];
- if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
- linecard = info->user_ptr[1];
- devlink_linecard_put(linecard);
- }
- devl_unlock(devlink);
- devlink_put(devlink);
-}
-
-static struct genl_family devlink_nl_family;
-
-enum devlink_multicast_groups {
- DEVLINK_MCGRP_CONFIG,
-};
-
-static const struct genl_multicast_group devlink_nl_mcgrps[] = {
- [DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME },
-};
-
-static int devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
-{
- if (nla_put_string(msg, DEVLINK_ATTR_BUS_NAME, devlink->dev->bus->name))
- return -EMSGSIZE;
- if (nla_put_string(msg, DEVLINK_ATTR_DEV_NAME, dev_name(devlink->dev)))
- return -EMSGSIZE;
- return 0;
-}
-
static int devlink_nl_put_nested_handle(struct sk_buff *msg, struct devlink *devlink)
{
struct nlattr *nested_attr;
@@ -972,185 +628,6 @@ size_t devlink_nl_port_handle_size(struct devlink_port *devlink_port)
+ nla_total_size(4); /* DEVLINK_ATTR_PORT_INDEX */
}
-struct devlink_reload_combination {
- enum devlink_reload_action action;
- enum devlink_reload_limit limit;
-};
-
-static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = {
- {
- /* can't reinitialize driver with no down time */
- .action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
- .limit = DEVLINK_RELOAD_LIMIT_NO_RESET,
- },
-};
-
-static bool
-devlink_reload_combination_is_invalid(enum devlink_reload_action action,
- enum devlink_reload_limit limit)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++)
- if (devlink_reload_invalid_combinations[i].action == action &&
- devlink_reload_invalid_combinations[i].limit == limit)
- return true;
- return false;
-}
-
-static bool
-devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action)
-{
- return test_bit(action, &devlink->ops->reload_actions);
-}
-
-static bool
-devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit)
-{
- return test_bit(limit, &devlink->ops->reload_limits);
-}
-
-static int devlink_reload_stat_put(struct sk_buff *msg,
- enum devlink_reload_limit limit, u32 value)
-{
- struct nlattr *reload_stats_entry;
-
- reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY);
- if (!reload_stats_entry)
- return -EMSGSIZE;
-
- if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) ||
- nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value))
- goto nla_put_failure;
- nla_nest_end(msg, reload_stats_entry);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, reload_stats_entry);
- return -EMSGSIZE;
-}
-
-static int devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote)
-{
- struct nlattr *reload_stats_attr, *act_info, *act_stats;
- int i, j, stat_idx;
- u32 value;
-
- if (!is_remote)
- reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS);
- else
- reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS);
-
- if (!reload_stats_attr)
- return -EMSGSIZE;
-
- for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) {
- if ((!is_remote &&
- !devlink_reload_action_is_supported(devlink, i)) ||
- i == DEVLINK_RELOAD_ACTION_UNSPEC)
- continue;
- act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO);
- if (!act_info)
- goto nla_put_failure;
-
- if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i))
- goto action_info_nest_cancel;
- act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS);
- if (!act_stats)
- goto action_info_nest_cancel;
-
- for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) {
- /* Remote stats are shown even if not locally supported.
- * Stats of actions with unspecified limit are shown
- * though drivers don't need to register unspecified
- * limit.
- */
- if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC &&
- !devlink_reload_limit_is_supported(devlink, j)) ||
- devlink_reload_combination_is_invalid(i, j))
- continue;
-
- stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i;
- if (!is_remote)
- value = devlink->stats.reload_stats[stat_idx];
- else
- value = devlink->stats.remote_reload_stats[stat_idx];
- if (devlink_reload_stat_put(msg, j, value))
- goto action_stats_nest_cancel;
- }
- nla_nest_end(msg, act_stats);
- nla_nest_end(msg, act_info);
- }
- nla_nest_end(msg, reload_stats_attr);
- return 0;
-
-action_stats_nest_cancel:
- nla_nest_cancel(msg, act_stats);
-action_info_nest_cancel:
- nla_nest_cancel(msg, act_info);
-nla_put_failure:
- nla_nest_cancel(msg, reload_stats_attr);
- return -EMSGSIZE;
-}
-
-static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
- enum devlink_command cmd, u32 portid,
- u32 seq, int flags)
-{
- struct nlattr *dev_stats;
- void *hdr;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
- if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed))
- goto nla_put_failure;
-
- dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS);
- if (!dev_stats)
- goto nla_put_failure;
-
- if (devlink_reload_stats_put(msg, devlink, false))
- goto dev_stats_nest_cancel;
- if (devlink_reload_stats_put(msg, devlink, true))
- goto dev_stats_nest_cancel;
-
- nla_nest_end(msg, dev_stats);
- genlmsg_end(msg, hdr);
- return 0;
-
-dev_stats_nest_cancel:
- nla_nest_cancel(msg, dev_stats);
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-static void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
-{
- struct sk_buff *msg;
- int err;
-
- WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL);
- WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0);
- if (err) {
- nlmsg_free(msg);
- return;
- }
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
- msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
static int devlink_nl_port_attrs_put(struct sk_buff *msg,
struct devlink_port *devlink_port)
{
@@ -1333,13 +810,12 @@ static int devlink_port_fn_state_fill(const struct devlink_ops *ops,
}
if (!devlink_port_fn_state_valid(state)) {
WARN_ON_ONCE(1);
- NL_SET_ERR_MSG_MOD(extack, "Invalid state read from driver");
+ NL_SET_ERR_MSG(extack, "Invalid state read from driver");
return -EINVAL;
}
if (!devlink_port_fn_opstate_valid(opstate)) {
WARN_ON_ONCE(1);
- NL_SET_ERR_MSG_MOD(extack,
- "Invalid operational state read from driver");
+ NL_SET_ERR_MSG(extack, "Invalid operational state read from driver");
return -EINVAL;
}
if (nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_STATE, state) ||
@@ -1537,47 +1013,40 @@ static void devlink_rate_notify(struct devlink_rate *devlink_rate,
0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
-static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int
+devlink_nl_cmd_rate_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb)
{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_rate *devlink_rate;
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
int idx = 0;
int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- devl_lock(devlink);
- list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
- enum devlink_command cmd = DEVLINK_CMD_RATE_NEW;
- u32 id = NETLINK_CB(cb->skb).portid;
+ list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+ enum devlink_command cmd = DEVLINK_CMD_RATE_NEW;
+ u32 id = NETLINK_CB(cb->skb).portid;
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_rate_fill(msg, devlink_rate, cmd, id,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI, NULL);
- if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
+ if (idx < state->idx) {
idx++;
+ continue;
+ }
+ err = devlink_nl_rate_fill(msg, devlink_rate, cmd, id,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, NULL);
+ if (err) {
+ state->idx = idx;
+ break;
}
- devl_unlock(devlink);
- devlink_put(devlink);
+ idx++;
}
-out:
- if (err != -EMSGSIZE)
- return err;
- cb->args[0] = idx;
- return msg->len;
+ return err;
}
+const struct devlink_cmd devl_cmd_rate_get = {
+ .dump_one = devlink_nl_cmd_rate_get_dump_one,
+};
+
static int devlink_nl_cmd_rate_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
@@ -1612,58 +1081,6 @@ devlink_rate_is_parent_node(struct devlink_rate *devlink_rate,
return false;
}
-static int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct sk_buff *msg;
- int err;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
- info->snd_portid, info->snd_seq, 0);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
-{
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
- int idx = 0;
- int err;
-
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- if (idx < start) {
- idx++;
- devlink_put(devlink);
- continue;
- }
-
- devl_lock(devlink);
- err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI);
- devl_unlock(devlink);
- devlink_put(devlink);
-
- if (err)
- goto out;
- idx++;
- }
-out:
- cb->args[0] = idx;
- return msg->len;
-}
-
static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
@@ -1686,43 +1103,34 @@ static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
-static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int
+devlink_nl_cmd_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb)
{
- struct devlink *devlink;
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_port *devlink_port;
- unsigned long index, port_index;
- int start = cb->args[0];
- int idx = 0;
- int err;
+ unsigned long port_index;
+ int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- devl_lock(devlink);
- xa_for_each(&devlink->ports, port_index, devlink_port) {
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_port_fill(msg, devlink_port,
- DEVLINK_CMD_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI, cb->extack);
- if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
- idx++;
+ xa_for_each_start(&devlink->ports, port_index, devlink_port, state->idx) {
+ err = devlink_nl_port_fill(msg, devlink_port,
+ DEVLINK_CMD_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, cb->extack);
+ if (err) {
+ state->idx = port_index;
+ break;
}
- devl_unlock(devlink);
- devlink_put(devlink);
}
-out:
- cb->args[0] = idx;
- return msg->len;
+
+ return err;
}
+const struct devlink_cmd devl_cmd_port_get = {
+ .dump_one = devlink_nl_cmd_port_get_dump_one,
+};
+
static int devlink_port_type_set(struct devlink_port *devlink_port,
enum devlink_port_type port_type)
@@ -1756,16 +1164,16 @@ static int devlink_port_function_hw_addr_set(struct devlink_port *port,
hw_addr = nla_data(attr);
hw_addr_len = nla_len(attr);
if (hw_addr_len > MAX_ADDR_LEN) {
- NL_SET_ERR_MSG_MOD(extack, "Port function hardware address too long");
+ NL_SET_ERR_MSG(extack, "Port function hardware address too long");
return -EINVAL;
}
if (port->type == DEVLINK_PORT_TYPE_ETH) {
if (hw_addr_len != ETH_ALEN) {
- NL_SET_ERR_MSG_MOD(extack, "Address must be 6 bytes for Ethernet device");
+ NL_SET_ERR_MSG(extack, "Address must be 6 bytes for Ethernet device");
return -EINVAL;
}
if (!is_unicast_ether_addr(hw_addr)) {
- NL_SET_ERR_MSG_MOD(extack, "Non-unicast hardware address unsupported");
+ NL_SET_ERR_MSG(extack, "Non-unicast hardware address unsupported");
return -EINVAL;
}
}
@@ -1841,7 +1249,7 @@ static int devlink_port_function_set(struct devlink_port *port,
err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr,
devlink_function_nl_policy, extack);
if (err < 0) {
- NL_SET_ERR_MSG_MOD(extack, "Fail to parse port function attributes");
+ NL_SET_ERR_MSG(extack, "Fail to parse port function attributes");
return err;
}
@@ -1920,14 +1328,14 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
if (!devlink_port->attrs.splittable) {
/* Split ports cannot be split. */
if (devlink_port->attrs.split)
- NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split further");
+ NL_SET_ERR_MSG(info->extack, "Port cannot be split further");
else
- NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split");
+ NL_SET_ERR_MSG(info->extack, "Port cannot be split");
return -EINVAL;
}
if (count < 2 || !is_power_of_2(count) || count > devlink_port->attrs.lanes) {
- NL_SET_ERR_MSG_MOD(info->extack, "Invalid split count");
+ NL_SET_ERR_MSG(info->extack, "Invalid split count");
return -EINVAL;
}
@@ -1991,7 +1399,7 @@ static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb,
if (!info->attrs[DEVLINK_ATTR_PORT_FLAVOUR] ||
!info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]) {
- NL_SET_ERR_MSG_MOD(extack, "Port flavour or PCI PF are not specified");
+ NL_SET_ERR_MSG(extack, "Port flavour or PCI PF are not specified");
return -EINVAL;
}
new_attrs.flavour = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_FLAVOUR]);
@@ -2039,7 +1447,7 @@ static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb,
return -EOPNOTSUPP;
if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_INDEX)) {
- NL_SET_ERR_MSG_MOD(extack, "Port index is not specified");
+ NL_SET_ERR_MSG(extack, "Port index is not specified");
return -EINVAL;
}
port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
@@ -2081,13 +1489,13 @@ devlink_nl_rate_parent_node_set(struct devlink_rate *devlink_rate,
return -ENODEV;
if (parent == devlink_rate) {
- NL_SET_ERR_MSG_MOD(info->extack, "Parent to self is not allowed");
+ NL_SET_ERR_MSG(info->extack, "Parent to self is not allowed");
return -EINVAL;
}
if (devlink_rate_is_node(devlink_rate) &&
devlink_rate_is_parent_node(devlink_rate, parent->parent)) {
- NL_SET_ERR_MSG_MOD(info->extack, "Node is already a parent of parent node.");
+ NL_SET_ERR_MSG(info->extack, "Node is already a parent of parent node.");
return -EEXIST;
}
@@ -2196,16 +1604,16 @@ static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
if (type == DEVLINK_RATE_TYPE_LEAF) {
if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_leaf_tx_share_set) {
- NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the leafs");
+ NL_SET_ERR_MSG(info->extack, "TX share set isn't supported for the leafs");
return false;
}
if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_leaf_tx_max_set) {
- NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the leafs");
+ NL_SET_ERR_MSG(info->extack, "TX max set isn't supported for the leafs");
return false;
}
if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
!ops->rate_leaf_parent_set) {
- NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the leafs");
+ NL_SET_ERR_MSG(info->extack, "Parent set isn't supported for the leafs");
return false;
}
if (attrs[DEVLINK_ATTR_RATE_TX_PRIORITY] && !ops->rate_leaf_tx_priority_set) {
@@ -2222,16 +1630,16 @@ static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
}
} else if (type == DEVLINK_RATE_TYPE_NODE) {
if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_node_tx_share_set) {
- NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the nodes");
+ NL_SET_ERR_MSG(info->extack, "TX share set isn't supported for the nodes");
return false;
}
if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_node_tx_max_set) {
- NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the nodes");
+ NL_SET_ERR_MSG(info->extack, "TX max set isn't supported for the nodes");
return false;
}
if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
!ops->rate_node_parent_set) {
- NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the nodes");
+ NL_SET_ERR_MSG(info->extack, "Parent set isn't supported for the nodes");
return false;
}
if (attrs[DEVLINK_ATTR_RATE_TX_PRIORITY] && !ops->rate_node_tx_priority_set) {
@@ -2282,7 +1690,7 @@ static int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb,
ops = devlink->ops;
if (!ops || !ops->rate_node_new || !ops->rate_node_del) {
- NL_SET_ERR_MSG_MOD(info->extack, "Rate nodes aren't supported");
+ NL_SET_ERR_MSG(info->extack, "Rate nodes aren't supported");
return -EOPNOTSUPP;
}
@@ -2338,7 +1746,7 @@ static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb,
int err;
if (refcount_read(&rate_node->refcnt) > 1) {
- NL_SET_ERR_MSG_MOD(info->extack, "Node has children. Cannot delete node.");
+ NL_SET_ERR_MSG(info->extack, "Node has children. Cannot delete node.");
return -EBUSY;
}
@@ -2465,46 +1873,42 @@ static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
-static int devlink_nl_cmd_linecard_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int devlink_nl_cmd_linecard_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb)
{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_linecard *linecard;
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
int idx = 0;
- int err;
+ int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- mutex_lock(&devlink->linecards_lock);
- list_for_each_entry(linecard, &devlink->linecard_list, list) {
- if (idx < start) {
- idx++;
- continue;
- }
- mutex_lock(&linecard->state_lock);
- err = devlink_nl_linecard_fill(msg, devlink, linecard,
- DEVLINK_CMD_LINECARD_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI,
- cb->extack);
- mutex_unlock(&linecard->state_lock);
- if (err) {
- mutex_unlock(&devlink->linecards_lock);
- devlink_put(devlink);
- goto out;
- }
+ list_for_each_entry(linecard, &devlink->linecard_list, list) {
+ if (idx < state->idx) {
idx++;
+ continue;
}
- mutex_unlock(&devlink->linecards_lock);
- devlink_put(devlink);
+ mutex_lock(&linecard->state_lock);
+ err = devlink_nl_linecard_fill(msg, devlink, linecard,
+ DEVLINK_CMD_LINECARD_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
+ cb->extack);
+ mutex_unlock(&linecard->state_lock);
+ if (err) {
+ state->idx = idx;
+ break;
+ }
+ idx++;
}
-out:
- cb->args[0] = idx;
- return msg->len;
+
+ return err;
}
+const struct devlink_cmd devl_cmd_linecard_get = {
+ .dump_one = devlink_nl_cmd_linecard_get_dump_one,
+};
+
static struct devlink_linecard_type *
devlink_linecard_type_lookup(struct devlink_linecard *linecard,
const char *type)
@@ -2530,26 +1934,26 @@ static int devlink_linecard_type_set(struct devlink_linecard *linecard,
mutex_lock(&linecard->state_lock);
if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
- NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
+ NL_SET_ERR_MSG(extack, "Line card is currently being provisioned");
err = -EBUSY;
goto out;
}
if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
- NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
+ NL_SET_ERR_MSG(extack, "Line card is currently being unprovisioned");
err = -EBUSY;
goto out;
}
linecard_type = devlink_linecard_type_lookup(linecard, type);
if (!linecard_type) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported line card type provided");
+ NL_SET_ERR_MSG(extack, "Unsupported line card type provided");
err = -EINVAL;
goto out;
}
if (linecard->state != DEVLINK_LINECARD_STATE_UNPROVISIONED &&
linecard->state != DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
- NL_SET_ERR_MSG_MOD(extack, "Line card already provisioned");
+ NL_SET_ERR_MSG(extack, "Line card already provisioned");
err = -EBUSY;
/* Check if the line card is provisioned in the same
* way the user asks. In case it is, make the operation
@@ -2593,12 +1997,12 @@ static int devlink_linecard_type_unset(struct devlink_linecard *linecard,
mutex_lock(&linecard->state_lock);
if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
- NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
+ NL_SET_ERR_MSG(extack, "Line card is currently being provisioned");
err = -EBUSY;
goto out;
}
if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
- NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
+ NL_SET_ERR_MSG(extack, "Line card is currently being unprovisioned");
err = -EBUSY;
goto out;
}
@@ -2611,7 +2015,7 @@ static int devlink_linecard_type_unset(struct devlink_linecard *linecard,
}
if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONED) {
- NL_SET_ERR_MSG_MOD(extack, "Line card is not provisioned");
+ NL_SET_ERR_MSG(extack, "Line card is not provisioned");
err = 0;
goto out;
}
@@ -2727,43 +2131,39 @@ static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
-static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int
+devlink_nl_cmd_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb)
{
- struct devlink *devlink;
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_sb *devlink_sb;
- int start = cb->args[0];
- unsigned long index;
int idx = 0;
- int err;
+ int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- devl_lock(devlink);
- list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
- DEVLINK_CMD_SB_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
- if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
+ list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+ if (idx < state->idx) {
idx++;
+ continue;
+ }
+ err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
+ DEVLINK_CMD_SB_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ state->idx = idx;
+ break;
}
- devl_unlock(devlink);
- devlink_put(devlink);
+ idx++;
}
-out:
- cb->args[0] = idx;
- return msg->len;
+
+ return err;
}
+const struct devlink_cmd devl_cmd_sb_get = {
+ .dump_one = devlink_nl_cmd_sb_get_dump_one,
+};
+
static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
struct devlink_sb *devlink_sb,
u16 pool_index, enum devlink_command cmd,
@@ -2869,46 +2269,39 @@ static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
return 0;
}
-static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int
+devlink_nl_cmd_sb_pool_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb)
{
- struct devlink *devlink;
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_sb *devlink_sb;
- int start = cb->args[0];
- unsigned long index;
- int idx = 0;
int err = 0;
+ int idx = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- if (!devlink->ops->sb_pool_get)
- goto retry;
+ if (!devlink->ops->sb_pool_get)
+ return 0;
- devl_lock(devlink);
- list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
- err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
- devlink_sb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq);
- if (err == -EOPNOTSUPP) {
- err = 0;
- } else if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
+ list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+ err = __sb_pool_get_dumpit(msg, state->idx, &idx,
+ devlink, devlink_sb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq);
+ if (err == -EOPNOTSUPP) {
+ err = 0;
+ } else if (err) {
+ state->idx = idx;
+ break;
}
- devl_unlock(devlink);
-retry:
- devlink_put(devlink);
}
-out:
- if (err != -EMSGSIZE)
- return err;
- cb->args[0] = idx;
- return msg->len;
+ return err;
}
+const struct devlink_cmd devl_cmd_sb_pool_get = {
+ .dump_one = devlink_nl_cmd_sb_pool_get_dump_one,
+};
+
static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
@@ -3084,46 +2477,39 @@ static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
return 0;
}
-static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int
+devlink_nl_cmd_sb_port_pool_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb)
{
- struct devlink *devlink;
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_sb *devlink_sb;
- int start = cb->args[0];
- unsigned long index;
int idx = 0;
int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- if (!devlink->ops->sb_port_pool_get)
- goto retry;
-
- devl_lock(devlink);
- list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
- err = __sb_port_pool_get_dumpit(msg, start, &idx,
- devlink, devlink_sb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq);
- if (err == -EOPNOTSUPP) {
- err = 0;
- } else if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
+ if (!devlink->ops->sb_port_pool_get)
+ return 0;
+
+ list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+ err = __sb_port_pool_get_dumpit(msg, state->idx, &idx,
+ devlink, devlink_sb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq);
+ if (err == -EOPNOTSUPP) {
+ err = 0;
+ } else if (err) {
+ state->idx = idx;
+ break;
}
- devl_unlock(devlink);
-retry:
- devlink_put(devlink);
}
-out:
- if (err != -EMSGSIZE)
- return err;
- cb->args[0] = idx;
- return msg->len;
+ return err;
}
+const struct devlink_cmd devl_cmd_sb_port_pool_get = {
+ .dump_one = devlink_nl_cmd_sb_port_pool_get_dump_one,
+};
+
static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 threshold,
@@ -3327,47 +2713,38 @@ static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
}
static int
-devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+devlink_nl_cmd_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb)
{
- struct devlink *devlink;
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_sb *devlink_sb;
- int start = cb->args[0];
- unsigned long index;
int idx = 0;
int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- if (!devlink->ops->sb_tc_pool_bind_get)
- goto retry;
+ if (!devlink->ops->sb_tc_pool_bind_get)
+ return 0;
- devl_lock(devlink);
- list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
- err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
- devlink,
- devlink_sb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq);
- if (err == -EOPNOTSUPP) {
- err = 0;
- } else if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
+ list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+ err = __sb_tc_pool_bind_get_dumpit(msg, state->idx, &idx,
+ devlink, devlink_sb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq);
+ if (err == -EOPNOTSUPP) {
+ err = 0;
+ } else if (err) {
+ state->idx = idx;
+ break;
}
- devl_unlock(devlink);
-retry:
- devlink_put(devlink);
}
-out:
- if (err != -EMSGSIZE)
- return err;
- cb->args[0] = idx;
- return msg->len;
+ return err;
}
+const struct devlink_cmd devl_cmd_sb_tc_pool_bind_get = {
+ .dump_one = devlink_nl_cmd_sb_tc_pool_bind_get_dump_one,
+};
+
static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
@@ -3455,142 +2832,19 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
return -EOPNOTSUPP;
}
-static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
- enum devlink_command cmd, u32 portid,
- u32 seq, int flags)
-{
- const struct devlink_ops *ops = devlink->ops;
- enum devlink_eswitch_encap_mode encap_mode;
- u8 inline_mode;
- void *hdr;
- int err = 0;
- u16 mode;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- err = devlink_nl_put_handle(msg, devlink);
- if (err)
- goto nla_put_failure;
-
- if (ops->eswitch_mode_get) {
- err = ops->eswitch_mode_get(devlink, &mode);
- if (err)
- goto nla_put_failure;
- err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode);
- if (err)
- goto nla_put_failure;
- }
-
- if (ops->eswitch_inline_mode_get) {
- err = ops->eswitch_inline_mode_get(devlink, &inline_mode);
- if (err)
- goto nla_put_failure;
- err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE,
- inline_mode);
- if (err)
- goto nla_put_failure;
- }
-
- if (ops->eswitch_encap_mode_get) {
- err = ops->eswitch_encap_mode_get(devlink, &encap_mode);
- if (err)
- goto nla_put_failure;
- err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode);
- if (err)
- goto nla_put_failure;
- }
-
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return err;
-}
-
-static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct sk_buff *msg;
- int err;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET,
- info->snd_portid, info->snd_seq, 0);
-
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
- struct netlink_ext_ack *extack)
+int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct devlink_rate *devlink_rate;
list_for_each_entry(devlink_rate, &devlink->rate_list, list)
if (devlink_rate_is_node(devlink_rate)) {
- NL_SET_ERR_MSG_MOD(extack, "Rate node(s) exists.");
+ NL_SET_ERR_MSG(extack, "Rate node(s) exists.");
return -EBUSY;
}
return 0;
}
-static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- const struct devlink_ops *ops = devlink->ops;
- enum devlink_eswitch_encap_mode encap_mode;
- u8 inline_mode;
- int err = 0;
- u16 mode;
-
- if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) {
- if (!ops->eswitch_mode_set)
- return -EOPNOTSUPP;
- mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
- err = devlink_rate_nodes_check(devlink, mode, info->extack);
- if (err)
- return err;
- err = ops->eswitch_mode_set(devlink, mode, info->extack);
- if (err)
- return err;
- }
-
- if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) {
- if (!ops->eswitch_inline_mode_set)
- return -EOPNOTSUPP;
- inline_mode = nla_get_u8(
- info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
- err = ops->eswitch_inline_mode_set(devlink, inline_mode,
- info->extack);
- if (err)
- return err;
- }
-
- if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) {
- if (!ops->eswitch_encap_mode_set)
- return -EOPNOTSUPP;
- encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
- err = ops->eswitch_encap_mode_set(devlink, encap_mode,
- info->extack);
- if (err)
- return err;
- }
-
- return 0;
-}
-
int devlink_dpipe_match_put(struct sk_buff *skb,
struct devlink_dpipe_match *match)
{
@@ -4351,18 +3605,18 @@ devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
int err = 0;
if (size > resource->size_params.size_max) {
- NL_SET_ERR_MSG_MOD(extack, "Size larger than maximum");
+ NL_SET_ERR_MSG(extack, "Size larger than maximum");
err = -EINVAL;
}
if (size < resource->size_params.size_min) {
- NL_SET_ERR_MSG_MOD(extack, "Size smaller than minimum");
+ NL_SET_ERR_MSG(extack, "Size smaller than minimum");
err = -EINVAL;
}
div64_u64_rem(size, resource->size_params.size_granularity, &reminder);
if (reminder) {
- NL_SET_ERR_MSG_MOD(extack, "Wrong granularity");
+ NL_SET_ERR_MSG(extack, "Wrong granularity");
err = -EINVAL;
}
@@ -4561,10 +3815,9 @@ static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
}
-static int
-devlink_resources_validate(struct devlink *devlink,
- struct devlink_resource *resource,
- struct genl_info *info)
+int devlink_resources_validate(struct devlink *devlink,
+ struct devlink_resource *resource,
+ struct genl_info *info)
{
struct list_head *resource_list;
int err = 0;
@@ -4584,740 +3837,6 @@ devlink_resources_validate(struct devlink *devlink,
return err;
}
-static struct net *devlink_netns_get(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID];
- struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD];
- struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID];
- struct net *net;
-
- if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) {
- NL_SET_ERR_MSG_MOD(info->extack, "multiple netns identifying attributes specified");
- return ERR_PTR(-EINVAL);
- }
-
- if (netns_pid_attr) {
- net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr));
- } else if (netns_fd_attr) {
- net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr));
- } else if (netns_id_attr) {
- net = get_net_ns_by_id(sock_net(skb->sk),
- nla_get_u32(netns_id_attr));
- if (!net)
- net = ERR_PTR(-EINVAL);
- } else {
- WARN_ON(1);
- net = ERR_PTR(-EINVAL);
- }
- if (IS_ERR(net)) {
- NL_SET_ERR_MSG_MOD(info->extack, "Unknown network namespace");
- return ERR_PTR(-EINVAL);
- }
- if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
- put_net(net);
- return ERR_PTR(-EPERM);
- }
- return net;
-}
-
-static void devlink_param_notify(struct devlink *devlink,
- unsigned int port_index,
- struct devlink_param_item *param_item,
- enum devlink_command cmd);
-
-static void devlink_ns_change_notify(struct devlink *devlink,
- struct net *dest_net, struct net *curr_net,
- bool new)
-{
- struct devlink_param_item *param_item;
- enum devlink_command cmd;
-
- /* Userspace needs to be notified about devlink objects
- * removed from original and entering new network namespace.
- * The rest of the devlink objects are re-created during
- * reload process so the notifications are generated separatelly.
- */
-
- if (!dest_net || net_eq(dest_net, curr_net))
- return;
-
- if (new)
- devlink_notify(devlink, DEVLINK_CMD_NEW);
-
- cmd = new ? DEVLINK_CMD_PARAM_NEW : DEVLINK_CMD_PARAM_DEL;
- list_for_each_entry(param_item, &devlink->param_list, list)
- devlink_param_notify(devlink, 0, param_item, cmd);
-
- if (!new)
- devlink_notify(devlink, DEVLINK_CMD_DEL);
-}
-
-static bool devlink_reload_supported(const struct devlink_ops *ops)
-{
- return ops->reload_down && ops->reload_up;
-}
-
-static void devlink_reload_failed_set(struct devlink *devlink,
- bool reload_failed)
-{
- if (devlink->reload_failed == reload_failed)
- return;
- devlink->reload_failed = reload_failed;
- devlink_notify(devlink, DEVLINK_CMD_NEW);
-}
-
-bool devlink_is_reload_failed(const struct devlink *devlink)
-{
- return devlink->reload_failed;
-}
-EXPORT_SYMBOL_GPL(devlink_is_reload_failed);
-
-static void
-__devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats,
- enum devlink_reload_limit limit, u32 actions_performed)
-{
- unsigned long actions = actions_performed;
- int stat_idx;
- int action;
-
- for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) {
- stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action;
- reload_stats[stat_idx]++;
- }
- devlink_notify(devlink, DEVLINK_CMD_NEW);
-}
-
-static void
-devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit,
- u32 actions_performed)
-{
- __devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit,
- actions_performed);
-}
-
-/**
- * devlink_remote_reload_actions_performed - Update devlink on reload actions
- * performed which are not a direct result of devlink reload call.
- *
- * This should be called by a driver after performing reload actions in case it was not
- * a result of devlink reload call. For example fw_activate was performed as a result
- * of devlink reload triggered fw_activate on another host.
- * The motivation for this function is to keep data on reload actions performed on this
- * function whether it was done due to direct devlink reload call or not.
- *
- * @devlink: devlink
- * @limit: reload limit
- * @actions_performed: bitmask of actions performed
- */
-void devlink_remote_reload_actions_performed(struct devlink *devlink,
- enum devlink_reload_limit limit,
- u32 actions_performed)
-{
- if (WARN_ON(!actions_performed ||
- actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
- actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) ||
- limit > DEVLINK_RELOAD_LIMIT_MAX))
- return;
-
- __devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit,
- actions_performed);
-}
-EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed);
-
-static int devlink_reload(struct devlink *devlink, struct net *dest_net,
- enum devlink_reload_action action, enum devlink_reload_limit limit,
- u32 *actions_performed, struct netlink_ext_ack *extack)
-{
- u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
- struct net *curr_net;
- int err;
-
- memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
- sizeof(remote_reload_stats));
-
- curr_net = devlink_net(devlink);
- devlink_ns_change_notify(devlink, dest_net, curr_net, false);
- err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
- if (err)
- return err;
-
- if (dest_net && !net_eq(dest_net, curr_net))
- write_pnet(&devlink->_net, dest_net);
-
- err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
- devlink_reload_failed_set(devlink, !!err);
- if (err)
- return err;
-
- devlink_ns_change_notify(devlink, dest_net, curr_net, true);
- WARN_ON(!(*actions_performed & BIT(action)));
- /* Catch driver on updating the remote action within devlink reload */
- WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
- sizeof(remote_reload_stats)));
- devlink_reload_stats_update(devlink, limit, *actions_performed);
- return 0;
-}
-
-static int
-devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed,
- enum devlink_command cmd, struct genl_info *info)
-{
- struct sk_buff *msg;
- void *hdr;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd);
- if (!hdr)
- goto free_msg;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
-
- if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed,
- actions_performed))
- goto nla_put_failure;
- genlmsg_end(msg, hdr);
-
- return genlmsg_reply(msg, info);
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
-free_msg:
- nlmsg_free(msg);
- return -EMSGSIZE;
-}
-
-static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- enum devlink_reload_action action;
- enum devlink_reload_limit limit;
- struct net *dest_net = NULL;
- u32 actions_performed;
- int err;
-
- if (!(devlink->features & DEVLINK_F_RELOAD))
- return -EOPNOTSUPP;
-
- err = devlink_resources_validate(devlink, NULL, info);
- if (err) {
- NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
- return err;
- }
-
- if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
- action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
- else
- action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT;
-
- if (!devlink_reload_action_is_supported(devlink, action)) {
- NL_SET_ERR_MSG_MOD(info->extack,
- "Requested reload action is not supported by the driver");
- return -EOPNOTSUPP;
- }
-
- limit = DEVLINK_RELOAD_LIMIT_UNSPEC;
- if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) {
- struct nla_bitfield32 limits;
- u32 limits_selected;
-
- limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]);
- limits_selected = limits.value & limits.selector;
- if (!limits_selected) {
- NL_SET_ERR_MSG_MOD(info->extack, "Invalid limit selected");
- return -EINVAL;
- }
- for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++)
- if (limits_selected & BIT(limit))
- break;
- /* UAPI enables multiselection, but currently it is not used */
- if (limits_selected != BIT(limit)) {
- NL_SET_ERR_MSG_MOD(info->extack,
- "Multiselection of limit is not supported");
- return -EOPNOTSUPP;
- }
- if (!devlink_reload_limit_is_supported(devlink, limit)) {
- NL_SET_ERR_MSG_MOD(info->extack,
- "Requested limit is not supported by the driver");
- return -EOPNOTSUPP;
- }
- if (devlink_reload_combination_is_invalid(action, limit)) {
- NL_SET_ERR_MSG_MOD(info->extack,
- "Requested limit is invalid for this action");
- return -EINVAL;
- }
- }
- if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
- info->attrs[DEVLINK_ATTR_NETNS_FD] ||
- info->attrs[DEVLINK_ATTR_NETNS_ID]) {
- dest_net = devlink_netns_get(skb, info);
- if (IS_ERR(dest_net))
- return PTR_ERR(dest_net);
- }
-
- err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
-
- if (dest_net)
- put_net(dest_net);
-
- if (err)
- return err;
- /* For backward compatibility generate reply only if attributes used by user */
- if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS])
- return 0;
-
- return devlink_nl_reload_actions_performed_snd(devlink, actions_performed,
- DEVLINK_CMD_RELOAD, info);
-}
-
-static int devlink_nl_flash_update_fill(struct sk_buff *msg,
- struct devlink *devlink,
- enum devlink_command cmd,
- struct devlink_flash_notify *params)
-{
- void *hdr;
-
- hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
-
- if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS)
- goto out;
-
- if (params->status_msg &&
- nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG,
- params->status_msg))
- goto nla_put_failure;
- if (params->component &&
- nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT,
- params->component))
- goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
- params->done, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
- params->total, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
- params->timeout, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
-out:
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-static void __devlink_flash_update_notify(struct devlink *devlink,
- enum devlink_command cmd,
- struct devlink_flash_notify *params)
-{
- struct sk_buff *msg;
- int err;
-
- WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE &&
- cmd != DEVLINK_CMD_FLASH_UPDATE_END &&
- cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS);
-
- if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
- return;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- err = devlink_nl_flash_update_fill(msg, devlink, cmd, params);
- if (err)
- goto out_free_msg;
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
- msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
- return;
-
-out_free_msg:
- nlmsg_free(msg);
-}
-
-static void devlink_flash_update_begin_notify(struct devlink *devlink)
-{
- struct devlink_flash_notify params = {};
-
- __devlink_flash_update_notify(devlink,
- DEVLINK_CMD_FLASH_UPDATE,
- &params);
-}
-
-static void devlink_flash_update_end_notify(struct devlink *devlink)
-{
- struct devlink_flash_notify params = {};
-
- __devlink_flash_update_notify(devlink,
- DEVLINK_CMD_FLASH_UPDATE_END,
- &params);
-}
-
-void devlink_flash_update_status_notify(struct devlink *devlink,
- const char *status_msg,
- const char *component,
- unsigned long done,
- unsigned long total)
-{
- struct devlink_flash_notify params = {
- .status_msg = status_msg,
- .component = component,
- .done = done,
- .total = total,
- };
-
- __devlink_flash_update_notify(devlink,
- DEVLINK_CMD_FLASH_UPDATE_STATUS,
- &params);
-}
-EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify);
-
-void devlink_flash_update_timeout_notify(struct devlink *devlink,
- const char *status_msg,
- const char *component,
- unsigned long timeout)
-{
- struct devlink_flash_notify params = {
- .status_msg = status_msg,
- .component = component,
- .timeout = timeout,
- };
-
- __devlink_flash_update_notify(devlink,
- DEVLINK_CMD_FLASH_UPDATE_STATUS,
- &params);
-}
-EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify);
-
-struct devlink_info_req {
- struct sk_buff *msg;
- void (*version_cb)(const char *version_name,
- enum devlink_info_version_type version_type,
- void *version_cb_priv);
- void *version_cb_priv;
-};
-
-struct devlink_flash_component_lookup_ctx {
- const char *lookup_name;
- bool lookup_name_found;
-};
-
-static void
-devlink_flash_component_lookup_cb(const char *version_name,
- enum devlink_info_version_type version_type,
- void *version_cb_priv)
-{
- struct devlink_flash_component_lookup_ctx *lookup_ctx = version_cb_priv;
-
- if (version_type != DEVLINK_INFO_VERSION_TYPE_COMPONENT ||
- lookup_ctx->lookup_name_found)
- return;
-
- lookup_ctx->lookup_name_found =
- !strcmp(lookup_ctx->lookup_name, version_name);
-}
-
-static int devlink_flash_component_get(struct devlink *devlink,
- struct nlattr *nla_component,
- const char **p_component,
- struct netlink_ext_ack *extack)
-{
- struct devlink_flash_component_lookup_ctx lookup_ctx = {};
- struct devlink_info_req req = {};
- const char *component;
- int ret;
-
- if (!nla_component)
- return 0;
-
- component = nla_data(nla_component);
-
- if (!devlink->ops->info_get) {
- NL_SET_ERR_MSG_ATTR(extack, nla_component,
- "component update is not supported by this device");
- return -EOPNOTSUPP;
- }
-
- lookup_ctx.lookup_name = component;
- req.version_cb = devlink_flash_component_lookup_cb;
- req.version_cb_priv = &lookup_ctx;
-
- ret = devlink->ops->info_get(devlink, &req, NULL);
- if (ret)
- return ret;
-
- if (!lookup_ctx.lookup_name_found) {
- NL_SET_ERR_MSG_ATTR(extack, nla_component,
- "selected component is not supported by this device");
- return -EINVAL;
- }
- *p_component = component;
- return 0;
-}
-
-static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct nlattr *nla_overwrite_mask, *nla_file_name;
- struct devlink_flash_update_params params = {};
- struct devlink *devlink = info->user_ptr[0];
- const char *file_name;
- u32 supported_params;
- int ret;
-
- if (!devlink->ops->flash_update)
- return -EOPNOTSUPP;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME))
- return -EINVAL;
-
- ret = devlink_flash_component_get(devlink,
- info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT],
- &params.component, info->extack);
- if (ret)
- return ret;
-
- supported_params = devlink->ops->supported_flash_update_params;
-
- nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK];
- if (nla_overwrite_mask) {
- struct nla_bitfield32 sections;
-
- if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK)) {
- NL_SET_ERR_MSG_ATTR(info->extack, nla_overwrite_mask,
- "overwrite settings are not supported by this device");
- return -EOPNOTSUPP;
- }
- sections = nla_get_bitfield32(nla_overwrite_mask);
- params.overwrite_mask = sections.value & sections.selector;
- }
-
- nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME];
- file_name = nla_data(nla_file_name);
- ret = request_firmware(&params.fw, file_name, devlink->dev);
- if (ret) {
- NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name, "failed to locate the requested firmware file");
- return ret;
- }
-
- devlink_flash_update_begin_notify(devlink);
- ret = devlink->ops->flash_update(devlink, &params, info->extack);
- devlink_flash_update_end_notify(devlink);
-
- release_firmware(params.fw);
-
- return ret;
-}
-
-static int
-devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink,
- u32 portid, u32 seq, int flags,
- struct netlink_ext_ack *extack)
-{
- struct nlattr *selftests;
- void *hdr;
- int err;
- int i;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags,
- DEVLINK_CMD_SELFTESTS_GET);
- if (!hdr)
- return -EMSGSIZE;
-
- err = -EMSGSIZE;
- if (devlink_nl_put_handle(msg, devlink))
- goto err_cancel_msg;
-
- selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
- if (!selftests)
- goto err_cancel_msg;
-
- for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
- i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
- if (devlink->ops->selftest_check(devlink, i, extack)) {
- err = nla_put_flag(msg, i);
- if (err)
- goto err_cancel_msg;
- }
- }
-
- nla_nest_end(msg, selftests);
- genlmsg_end(msg, hdr);
- return 0;
-
-err_cancel_msg:
- genlmsg_cancel(msg, hdr);
- return err;
-}
-
-static int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct sk_buff *msg;
- int err;
-
- if (!devlink->ops->selftest_check)
- return -EOPNOTSUPP;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid,
- info->snd_seq, 0, info->extack);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int devlink_nl_cmd_selftests_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
-{
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
- int idx = 0;
- int err = 0;
-
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- if (idx < start || !devlink->ops->selftest_check)
- goto inc;
-
- devl_lock(devlink);
- err = devlink_nl_selftests_fill(msg, devlink,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->extack);
- devl_unlock(devlink);
- if (err) {
- devlink_put(devlink);
- break;
- }
-inc:
- idx++;
- devlink_put(devlink);
- }
-
- if (err != -EMSGSIZE)
- return err;
-
- cb->args[0] = idx;
- return msg->len;
-}
-
-static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id,
- enum devlink_selftest_status test_status)
-{
- struct nlattr *result_attr;
-
- result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT);
- if (!result_attr)
- return -EMSGSIZE;
-
- if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) ||
- nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS,
- test_status))
- goto nla_put_failure;
-
- nla_nest_end(skb, result_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(skb, result_attr);
- return -EMSGSIZE;
-}
-
-static int devlink_nl_cmd_selftests_run(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1];
- struct devlink *devlink = info->user_ptr[0];
- struct nlattr *attrs, *selftests;
- struct sk_buff *msg;
- void *hdr;
- int err;
- int i;
-
- if (!devlink->ops->selftest_run || !devlink->ops->selftest_check)
- return -EOPNOTSUPP;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SELFTESTS))
- return -EINVAL;
-
- attrs = info->attrs[DEVLINK_ATTR_SELFTESTS];
-
- err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs,
- devlink_selftest_nl_policy, info->extack);
- if (err < 0)
- return err;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = -EMSGSIZE;
- hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
- &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN);
- if (!hdr)
- goto free_msg;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto genlmsg_cancel;
-
- selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
- if (!selftests)
- goto genlmsg_cancel;
-
- for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
- i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
- enum devlink_selftest_status test_status;
-
- if (nla_get_flag(tb[i])) {
- if (!devlink->ops->selftest_check(devlink, i,
- info->extack)) {
- if (devlink_selftest_result_put(msg, i,
- DEVLINK_SELFTEST_STATUS_SKIP))
- goto selftests_nest_cancel;
- continue;
- }
-
- test_status = devlink->ops->selftest_run(devlink, i,
- info->extack);
- if (devlink_selftest_result_put(msg, i, test_status))
- goto selftests_nest_cancel;
- }
- }
-
- nla_nest_end(msg, selftests);
- genlmsg_end(msg, hdr);
- return genlmsg_reply(msg, info);
-
-selftests_nest_cancel:
- nla_nest_cancel(msg, selftests);
-genlmsg_cancel:
- genlmsg_cancel(msg, hdr);
-free_msg:
- nlmsg_free(msg);
- return err;
-}
-
static const struct devlink_param devlink_param_generic[] = {
{
.id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
@@ -5434,26 +3953,22 @@ static int devlink_param_driver_verify(const struct devlink_param *param)
}
static struct devlink_param_item *
-devlink_param_find_by_name(struct list_head *param_list,
- const char *param_name)
+devlink_param_find_by_name(struct xarray *params, const char *param_name)
{
struct devlink_param_item *param_item;
+ unsigned long param_id;
- list_for_each_entry(param_item, param_list, list)
+ xa_for_each(params, param_id, param_item) {
if (!strcmp(param_item->param->name, param_name))
return param_item;
+ }
return NULL;
}
static struct devlink_param_item *
-devlink_param_find_by_id(struct list_head *param_list, u32 param_id)
+devlink_param_find_by_id(struct xarray *params, u32 param_id)
{
- struct devlink_param_item *param_item;
-
- list_for_each_entry(param_item, param_list, list)
- if (param_item->param->id == param_id)
- return param_item;
- return NULL;
+ return xa_load(params, param_id);
}
static bool
@@ -5572,9 +4087,12 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
if (!devlink_param_cmode_is_supported(param, i))
continue;
if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) {
- if (!param_item->driverinit_value_valid)
+ if (param_item->driverinit_value_new_valid)
+ param_value[i] = param_item->driverinit_value_new;
+ else if (param_item->driverinit_value_valid)
+ param_value[i] = param_item->driverinit_value;
+ else
return -EOPNOTSUPP;
- param_value[i] = param_item->driverinit_value;
} else {
ctx.cmode = i;
err = devlink_param_get(devlink, param, &ctx);
@@ -5651,7 +4169,13 @@ static void devlink_param_notify(struct devlink *devlink,
WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL &&
cmd != DEVLINK_CMD_PORT_PARAM_NEW &&
cmd != DEVLINK_CMD_PORT_PARAM_DEL);
- ASSERT_DEVLINK_REGISTERED(devlink);
+
+ /* devlink_notify_register() / devlink_notify_unregister()
+ * will replay the notifications if the params are added/removed
+ * outside of the lifetime of the instance.
+ */
+ if (!devl_is_registered(devlink))
+ return;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
@@ -5667,48 +4191,36 @@ static void devlink_param_notify(struct devlink *devlink,
msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
-static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int
+devlink_nl_cmd_param_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb)
{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_param_item *param_item;
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
- int idx = 0;
+ unsigned long param_id;
int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- devl_lock(devlink);
- list_for_each_entry(param_item, &devlink->param_list, list) {
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_param_fill(msg, devlink, 0, param_item,
- DEVLINK_CMD_PARAM_GET,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
- if (err == -EOPNOTSUPP) {
- err = 0;
- } else if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
- idx++;
+ xa_for_each_start(&devlink->params, param_id, param_item, state->idx) {
+ err = devlink_nl_param_fill(msg, devlink, 0, param_item,
+ DEVLINK_CMD_PARAM_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err == -EOPNOTSUPP) {
+ err = 0;
+ } else if (err) {
+ state->idx = param_id;
+ break;
}
- devl_unlock(devlink);
- devlink_put(devlink);
}
-out:
- if (err != -EMSGSIZE)
- return err;
- cb->args[0] = idx;
- return msg->len;
+ return err;
}
+const struct devlink_cmd devl_cmd_param_get = {
+ .dump_one = devlink_nl_cmd_param_get_dump_one,
+};
+
static int
devlink_param_type_get_from_info(struct genl_info *info,
enum devlink_param_type *param_type)
@@ -5785,8 +4297,7 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
}
static struct devlink_param_item *
-devlink_param_get_from_info(struct list_head *param_list,
- struct genl_info *info)
+devlink_param_get_from_info(struct xarray *params, struct genl_info *info)
{
char *param_name;
@@ -5794,7 +4305,7 @@ devlink_param_get_from_info(struct list_head *param_list,
return NULL;
param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
- return devlink_param_find_by_name(param_list, param_name);
+ return devlink_param_find_by_name(params, param_name);
}
static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
@@ -5805,7 +4316,7 @@ static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
struct sk_buff *msg;
int err;
- param_item = devlink_param_get_from_info(&devlink->param_list, info);
+ param_item = devlink_param_get_from_info(&devlink->params, info);
if (!param_item)
return -EINVAL;
@@ -5826,7 +4337,7 @@ static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
unsigned int port_index,
- struct list_head *param_list,
+ struct xarray *params,
struct genl_info *info,
enum devlink_command cmd)
{
@@ -5838,7 +4349,7 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
union devlink_param_value value;
int err = 0;
- param_item = devlink_param_get_from_info(param_list, info);
+ param_item = devlink_param_get_from_info(params, info);
if (!param_item)
return -EINVAL;
param = param_item->param;
@@ -5863,11 +4374,8 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
return -EOPNOTSUPP;
if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
- if (param->type == DEVLINK_PARAM_TYPE_STRING)
- strcpy(param_item->driverinit_value.vstr, value.vstr);
- else
- param_item->driverinit_value = value;
- param_item->driverinit_value_valid = true;
+ param_item->driverinit_value_new = value;
+ param_item->driverinit_value_new_valid = true;
} else {
if (!param->set)
return -EOPNOTSUPP;
@@ -5887,28 +4395,28 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
{
struct devlink *devlink = info->user_ptr[0];
- return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->param_list,
+ return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->params,
info, DEVLINK_CMD_PARAM_NEW);
}
static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
struct netlink_callback *cb)
{
- NL_SET_ERR_MSG_MOD(cb->extack, "Port params are not supported");
+ NL_SET_ERR_MSG(cb->extack, "Port params are not supported");
return msg->len;
}
static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
- NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported");
+ NL_SET_ERR_MSG(info->extack, "Port params are not supported");
return -EINVAL;
}
static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
- NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported");
+ NL_SET_ERR_MSG(info->extack, "Port params are not supported");
return -EINVAL;
}
@@ -6372,21 +4880,20 @@ out:
return err;
}
-static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb,
- struct devlink *devlink,
- int *idx,
- int start)
+static int
+devlink_nl_cmd_region_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb)
{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_region *region;
struct devlink_port *port;
unsigned long port_index;
- int err = 0;
+ int idx = 0;
+ int err;
- devl_lock(devlink);
list_for_each_entry(region, &devlink->region_list, list) {
- if (*idx < start) {
- (*idx)++;
+ if (idx < state->idx) {
+ idx++;
continue;
}
err = devlink_nl_region_fill(msg, devlink,
@@ -6394,43 +4901,28 @@ static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI, region);
- if (err)
- goto out;
- (*idx)++;
+ if (err) {
+ state->idx = idx;
+ return err;
+ }
+ idx++;
}
xa_for_each(&devlink->ports, port_index, port) {
- err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, idx,
- start);
- if (err)
- goto out;
+ err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, &idx,
+ state->idx);
+ if (err) {
+ state->idx = idx;
+ return err;
+ }
}
-out:
- devl_unlock(devlink);
- return err;
+ return 0;
}
-static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
-{
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
- int idx = 0;
- int err = 0;
-
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- err = devlink_nl_cmd_region_get_devlink_dumpit(msg, cb, devlink,
- &idx, start);
- devlink_put(devlink);
- if (err)
- goto out;
- }
-out:
- cb->args[0] = idx;
- return msg->len;
-}
+const struct devlink_cmd devl_cmd_region_get = {
+ .dump_one = devlink_nl_cmd_region_get_dump_one,
+};
static int devlink_nl_cmd_region_del(struct sk_buff *skb,
struct genl_info *info)
@@ -6493,7 +4985,7 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
int err;
if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME)) {
- NL_SET_ERR_MSG_MOD(info->extack, "No region name provided");
+ NL_SET_ERR_MSG(info->extack, "No region name provided");
return -EINVAL;
}
@@ -6513,19 +5005,19 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
region = devlink_region_get_by_name(devlink, region_name);
if (!region) {
- NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not exist");
+ NL_SET_ERR_MSG(info->extack, "The requested region does not exist");
return -EINVAL;
}
if (!region->ops->snapshot) {
- NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not support taking an immediate snapshot");
+ NL_SET_ERR_MSG(info->extack, "The requested region does not support taking an immediate snapshot");
return -EOPNOTSUPP;
}
mutex_lock(&region->snapshot_lock);
if (region->cur_snapshots == region->max_snapshots) {
- NL_SET_ERR_MSG_MOD(info->extack, "The region has reached the maximum number of stored snapshots");
+ NL_SET_ERR_MSG(info->extack, "The region has reached the maximum number of stored snapshots");
err = -ENOSPC;
goto unlock;
}
@@ -6535,7 +5027,7 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
snapshot_id = nla_get_u32(snapshot_id_attr);
if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
- NL_SET_ERR_MSG_MOD(info->extack, "The requested snapshot id is already in use");
+ NL_SET_ERR_MSG(info->extack, "The requested snapshot id is already in use");
err = -EEXIST;
goto unlock;
}
@@ -6546,7 +5038,7 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
} else {
err = __devlink_region_snapshot_id_get(devlink, &snapshot_id);
if (err) {
- NL_SET_ERR_MSG_MOD(info->extack, "Failed to allocate a new snapshot id");
+ NL_SET_ERR_MSG(info->extack, "Failed to allocate a new snapshot id");
goto unlock;
}
}
@@ -6713,6 +5205,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct nlattr *chunks_attr, *region_attr, *snapshot_attr;
u64 ret_offset, start_offset, end_offset = U64_MAX;
struct nlattr **attrs = info->attrs;
@@ -6726,14 +5219,12 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
void *hdr;
int err;
- start_offset = *((u64 *)&cb->args[0]);
+ start_offset = state->start_offset;
- devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
+ devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
if (IS_ERR(devlink))
return PTR_ERR(devlink);
- devl_lock(devlink);
-
if (!attrs[DEVLINK_ATTR_REGION_NAME]) {
NL_SET_ERR_MSG(cb->extack, "No region name provided");
err = -EINVAL;
@@ -6865,7 +5356,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
goto nla_put_failure;
}
- *((u64 *)&cb->args[0]) = ret_offset;
+ state->start_offset = ret_offset;
nla_nest_end(skb, chunks_attr);
genlmsg_end(skb, hdr);
@@ -6881,1605 +5372,6 @@ out_unlock:
return err;
}
-int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
-{
- if (!req->msg)
- return 0;
- return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn);
-}
-EXPORT_SYMBOL_GPL(devlink_info_serial_number_put);
-
-int devlink_info_board_serial_number_put(struct devlink_info_req *req,
- const char *bsn)
-{
- if (!req->msg)
- return 0;
- return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER,
- bsn);
-}
-EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put);
-
-static int devlink_info_version_put(struct devlink_info_req *req, int attr,
- const char *version_name,
- const char *version_value,
- enum devlink_info_version_type version_type)
-{
- struct nlattr *nest;
- int err;
-
- if (req->version_cb)
- req->version_cb(version_name, version_type,
- req->version_cb_priv);
-
- if (!req->msg)
- return 0;
-
- nest = nla_nest_start_noflag(req->msg, attr);
- if (!nest)
- return -EMSGSIZE;
-
- err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME,
- version_name);
- if (err)
- goto nla_put_failure;
-
- err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE,
- version_value);
- if (err)
- goto nla_put_failure;
-
- nla_nest_end(req->msg, nest);
-
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(req->msg, nest);
- return err;
-}
-
-int devlink_info_version_fixed_put(struct devlink_info_req *req,
- const char *version_name,
- const char *version_value)
-{
- return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED,
- version_name, version_value,
- DEVLINK_INFO_VERSION_TYPE_NONE);
-}
-EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put);
-
-int devlink_info_version_stored_put(struct devlink_info_req *req,
- const char *version_name,
- const char *version_value)
-{
- return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
- version_name, version_value,
- DEVLINK_INFO_VERSION_TYPE_NONE);
-}
-EXPORT_SYMBOL_GPL(devlink_info_version_stored_put);
-
-int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
- const char *version_name,
- const char *version_value,
- enum devlink_info_version_type version_type)
-{
- return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
- version_name, version_value,
- version_type);
-}
-EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext);
-
-int devlink_info_version_running_put(struct devlink_info_req *req,
- const char *version_name,
- const char *version_value)
-{
- return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
- version_name, version_value,
- DEVLINK_INFO_VERSION_TYPE_NONE);
-}
-EXPORT_SYMBOL_GPL(devlink_info_version_running_put);
-
-int devlink_info_version_running_put_ext(struct devlink_info_req *req,
- const char *version_name,
- const char *version_value,
- enum devlink_info_version_type version_type)
-{
- return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
- version_name, version_value,
- version_type);
-}
-EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext);
-
-static int devlink_nl_driver_info_get(struct device_driver *drv,
- struct devlink_info_req *req)
-{
- if (!drv)
- return 0;
-
- if (drv->name[0])
- return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME,
- drv->name);
-
- return 0;
-}
-
-static int
-devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink,
- enum devlink_command cmd, u32 portid,
- u32 seq, int flags, struct netlink_ext_ack *extack)
-{
- struct device *dev = devlink_to_dev(devlink);
- struct devlink_info_req req = {};
- void *hdr;
- int err;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- err = -EMSGSIZE;
- if (devlink_nl_put_handle(msg, devlink))
- goto err_cancel_msg;
-
- req.msg = msg;
- if (devlink->ops->info_get) {
- err = devlink->ops->info_get(devlink, &req, extack);
- if (err)
- goto err_cancel_msg;
- }
-
- err = devlink_nl_driver_info_get(dev->driver, &req);
- if (err)
- goto err_cancel_msg;
-
- genlmsg_end(msg, hdr);
- return 0;
-
-err_cancel_msg:
- genlmsg_cancel(msg, hdr);
- return err;
-}
-
-static int devlink_nl_cmd_info_get_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct sk_buff *msg;
- int err;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
- info->snd_portid, info->snd_seq, 0,
- info->extack);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
-{
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
- int idx = 0;
- int err = 0;
-
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- if (idx < start)
- goto inc;
-
- devl_lock(devlink);
- err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->extack);
- devl_unlock(devlink);
- if (err == -EOPNOTSUPP)
- err = 0;
- else if (err) {
- devlink_put(devlink);
- break;
- }
-inc:
- idx++;
- devlink_put(devlink);
- }
-
- if (err != -EMSGSIZE)
- return err;
-
- cb->args[0] = idx;
- return msg->len;
-}
-
-struct devlink_fmsg_item {
- struct list_head list;
- int attrtype;
- u8 nla_type;
- u16 len;
- int value[];
-};
-
-struct devlink_fmsg {
- struct list_head item_list;
- bool putting_binary; /* This flag forces enclosing of binary data
- * in an array brackets. It forces using
- * of designated API:
- * devlink_fmsg_binary_pair_nest_start()
- * devlink_fmsg_binary_pair_nest_end()
- */
-};
-
-static struct devlink_fmsg *devlink_fmsg_alloc(void)
-{
- struct devlink_fmsg *fmsg;
-
- fmsg = kzalloc(sizeof(*fmsg), GFP_KERNEL);
- if (!fmsg)
- return NULL;
-
- INIT_LIST_HEAD(&fmsg->item_list);
-
- return fmsg;
-}
-
-static void devlink_fmsg_free(struct devlink_fmsg *fmsg)
-{
- struct devlink_fmsg_item *item, *tmp;
-
- list_for_each_entry_safe(item, tmp, &fmsg->item_list, list) {
- list_del(&item->list);
- kfree(item);
- }
- kfree(fmsg);
-}
-
-static int devlink_fmsg_nest_common(struct devlink_fmsg *fmsg,
- int attrtype)
-{
- struct devlink_fmsg_item *item;
-
- item = kzalloc(sizeof(*item), GFP_KERNEL);
- if (!item)
- return -ENOMEM;
-
- item->attrtype = attrtype;
- list_add_tail(&item->list, &fmsg->item_list);
-
- return 0;
-}
-
-int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
-{
- if (fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START);
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
-
-static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg)
-{
- if (fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END);
-}
-
-int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
-{
- if (fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_nest_end(fmsg);
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end);
-
-#define DEVLINK_FMSG_MAX_SIZE (GENLMSG_DEFAULT_SIZE - GENL_HDRLEN - NLA_HDRLEN)
-
-static int devlink_fmsg_put_name(struct devlink_fmsg *fmsg, const char *name)
-{
- struct devlink_fmsg_item *item;
-
- if (fmsg->putting_binary)
- return -EINVAL;
-
- if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE)
- return -EMSGSIZE;
-
- item = kzalloc(sizeof(*item) + strlen(name) + 1, GFP_KERNEL);
- if (!item)
- return -ENOMEM;
-
- item->nla_type = NLA_NUL_STRING;
- item->len = strlen(name) + 1;
- item->attrtype = DEVLINK_ATTR_FMSG_OBJ_NAME;
- memcpy(&item->value, name, item->len);
- list_add_tail(&item->list, &fmsg->item_list);
-
- return 0;
-}
-
-int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
-{
- int err;
-
- if (fmsg->putting_binary)
- return -EINVAL;
-
- err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START);
- if (err)
- return err;
-
- err = devlink_fmsg_put_name(fmsg, name);
- if (err)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_start);
-
-int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
-{
- if (fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_nest_end(fmsg);
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end);
-
-int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
- const char *name)
-{
- int err;
-
- if (fmsg->putting_binary)
- return -EINVAL;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_ARR_NEST_START);
- if (err)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_start);
-
-int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
-{
- int err;
-
- if (fmsg->putting_binary)
- return -EINVAL;
-
- err = devlink_fmsg_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
-
-int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
- const char *name)
-{
- int err;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- fmsg->putting_binary = true;
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_start);
-
-int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg)
-{
- if (!fmsg->putting_binary)
- return -EINVAL;
-
- fmsg->putting_binary = false;
- return devlink_fmsg_arr_pair_nest_end(fmsg);
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_end);
-
-static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg,
- const void *value, u16 value_len,
- u8 value_nla_type)
-{
- struct devlink_fmsg_item *item;
-
- if (value_len > DEVLINK_FMSG_MAX_SIZE)
- return -EMSGSIZE;
-
- item = kzalloc(sizeof(*item) + value_len, GFP_KERNEL);
- if (!item)
- return -ENOMEM;
-
- item->nla_type = value_nla_type;
- item->len = value_len;
- item->attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
- memcpy(&item->value, value, item->len);
- list_add_tail(&item->list, &fmsg->item_list);
-
- return 0;
-}
-
-static int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
-{
- if (fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG);
-}
-
-static int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
-{
- if (fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8);
-}
-
-int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
-{
- if (fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32);
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
-
-static int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
-{
- if (fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64);
-}
-
-int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
-{
- if (fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1,
- NLA_NUL_STRING);
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
-
-int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len)
-{
- if (!fmsg->putting_binary)
- return -EINVAL;
-
- return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
-
-int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
- bool value)
-{
- int err;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- err = devlink_fmsg_bool_put(fmsg, value);
- if (err)
- return err;
-
- err = devlink_fmsg_pair_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_bool_pair_put);
-
-int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u8 value)
-{
- int err;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_put(fmsg, value);
- if (err)
- return err;
-
- err = devlink_fmsg_pair_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_u8_pair_put);
-
-int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u32 value)
-{
- int err;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_put(fmsg, value);
- if (err)
- return err;
-
- err = devlink_fmsg_pair_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_u32_pair_put);
-
-int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u64 value)
-{
- int err;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- err = devlink_fmsg_u64_put(fmsg, value);
- if (err)
- return err;
-
- err = devlink_fmsg_pair_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_u64_pair_put);
-
-int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const char *value)
-{
- int err;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- err = devlink_fmsg_string_put(fmsg, value);
- if (err)
- return err;
-
- err = devlink_fmsg_pair_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put);
-
-int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const void *value, u32 value_len)
-{
- u32 data_size;
- int end_err;
- u32 offset;
- int err;
-
- err = devlink_fmsg_binary_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- for (offset = 0; offset < value_len; offset += data_size) {
- data_size = value_len - offset;
- if (data_size > DEVLINK_FMSG_MAX_SIZE)
- data_size = DEVLINK_FMSG_MAX_SIZE;
- err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
- if (err)
- break;
- /* Exit from loop with a break (instead of
- * return) to make sure putting_binary is turned off in
- * devlink_fmsg_binary_pair_nest_end
- */
- }
-
- end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
- if (end_err)
- err = end_err;
-
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
-
-static int
-devlink_fmsg_item_fill_type(struct devlink_fmsg_item *msg, struct sk_buff *skb)
-{
- switch (msg->nla_type) {
- case NLA_FLAG:
- case NLA_U8:
- case NLA_U32:
- case NLA_U64:
- case NLA_NUL_STRING:
- case NLA_BINARY:
- return nla_put_u8(skb, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE,
- msg->nla_type);
- default:
- return -EINVAL;
- }
-}
-
-static int
-devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb)
-{
- int attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
- u8 tmp;
-
- switch (msg->nla_type) {
- case NLA_FLAG:
- /* Always provide flag data, regardless of its value */
- tmp = *(bool *) msg->value;
-
- return nla_put_u8(skb, attrtype, tmp);
- case NLA_U8:
- return nla_put_u8(skb, attrtype, *(u8 *) msg->value);
- case NLA_U32:
- return nla_put_u32(skb, attrtype, *(u32 *) msg->value);
- case NLA_U64:
- return nla_put_u64_64bit(skb, attrtype, *(u64 *) msg->value,
- DEVLINK_ATTR_PAD);
- case NLA_NUL_STRING:
- return nla_put_string(skb, attrtype, (char *) &msg->value);
- case NLA_BINARY:
- return nla_put(skb, attrtype, msg->len, (void *) &msg->value);
- default:
- return -EINVAL;
- }
-}
-
-static int
-devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb,
- int *start)
-{
- struct devlink_fmsg_item *item;
- struct nlattr *fmsg_nlattr;
- int i = 0;
- int err;
-
- fmsg_nlattr = nla_nest_start_noflag(skb, DEVLINK_ATTR_FMSG);
- if (!fmsg_nlattr)
- return -EMSGSIZE;
-
- list_for_each_entry(item, &fmsg->item_list, list) {
- if (i < *start) {
- i++;
- continue;
- }
-
- switch (item->attrtype) {
- case DEVLINK_ATTR_FMSG_OBJ_NEST_START:
- case DEVLINK_ATTR_FMSG_PAIR_NEST_START:
- case DEVLINK_ATTR_FMSG_ARR_NEST_START:
- case DEVLINK_ATTR_FMSG_NEST_END:
- err = nla_put_flag(skb, item->attrtype);
- break;
- case DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA:
- err = devlink_fmsg_item_fill_type(item, skb);
- if (err)
- break;
- err = devlink_fmsg_item_fill_data(item, skb);
- break;
- case DEVLINK_ATTR_FMSG_OBJ_NAME:
- err = nla_put_string(skb, item->attrtype,
- (char *) &item->value);
- break;
- default:
- err = -EINVAL;
- break;
- }
- if (!err)
- *start = ++i;
- else
- break;
- }
-
- nla_nest_end(skb, fmsg_nlattr);
- return err;
-}
-
-static int devlink_fmsg_snd(struct devlink_fmsg *fmsg,
- struct genl_info *info,
- enum devlink_command cmd, int flags)
-{
- struct nlmsghdr *nlh;
- struct sk_buff *skb;
- bool last = false;
- int index = 0;
- void *hdr;
- int err;
-
- while (!last) {
- int tmp_index = index;
-
- skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
- &devlink_nl_family, flags | NLM_F_MULTI, cmd);
- if (!hdr) {
- err = -EMSGSIZE;
- goto nla_put_failure;
- }
-
- err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
- if (!err)
- last = true;
- else if (err != -EMSGSIZE || tmp_index == index)
- goto nla_put_failure;
-
- genlmsg_end(skb, hdr);
- err = genlmsg_reply(skb, info);
- if (err)
- return err;
- }
-
- skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
- NLMSG_DONE, 0, flags | NLM_F_MULTI);
- if (!nlh) {
- err = -EMSGSIZE;
- goto nla_put_failure;
- }
-
- return genlmsg_reply(skb, info);
-
-nla_put_failure:
- nlmsg_free(skb);
- return err;
-}
-
-static int devlink_fmsg_dumpit(struct devlink_fmsg *fmsg, struct sk_buff *skb,
- struct netlink_callback *cb,
- enum devlink_command cmd)
-{
- int index = cb->args[0];
- int tmp_index = index;
- void *hdr;
- int err;
-
- hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
- &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, cmd);
- if (!hdr) {
- err = -EMSGSIZE;
- goto nla_put_failure;
- }
-
- err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
- if ((err && err != -EMSGSIZE) || tmp_index == index)
- goto nla_put_failure;
-
- cb->args[0] = index;
- genlmsg_end(skb, hdr);
- return skb->len;
-
-nla_put_failure:
- genlmsg_cancel(skb, hdr);
- return err;
-}
-
-struct devlink_health_reporter {
- struct list_head list;
- void *priv;
- const struct devlink_health_reporter_ops *ops;
- struct devlink *devlink;
- struct devlink_port *devlink_port;
- struct devlink_fmsg *dump_fmsg;
- struct mutex dump_lock; /* lock parallel read/write from dump buffers */
- u64 graceful_period;
- bool auto_recover;
- bool auto_dump;
- u8 health_state;
- u64 dump_ts;
- u64 dump_real_ts;
- u64 error_count;
- u64 recovery_count;
- u64 last_recovery_ts;
- refcount_t refcount;
-};
-
-void *
-devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
-{
- return reporter->priv;
-}
-EXPORT_SYMBOL_GPL(devlink_health_reporter_priv);
-
-static struct devlink_health_reporter *
-__devlink_health_reporter_find_by_name(struct list_head *reporter_list,
- struct mutex *list_lock,
- const char *reporter_name)
-{
- struct devlink_health_reporter *reporter;
-
- lockdep_assert_held(list_lock);
- list_for_each_entry(reporter, reporter_list, list)
- if (!strcmp(reporter->ops->name, reporter_name))
- return reporter;
- return NULL;
-}
-
-static struct devlink_health_reporter *
-devlink_health_reporter_find_by_name(struct devlink *devlink,
- const char *reporter_name)
-{
- return __devlink_health_reporter_find_by_name(&devlink->reporter_list,
- &devlink->reporters_lock,
- reporter_name);
-}
-
-static struct devlink_health_reporter *
-devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port,
- const char *reporter_name)
-{
- return __devlink_health_reporter_find_by_name(&devlink_port->reporter_list,
- &devlink_port->reporters_lock,
- reporter_name);
-}
-
-static struct devlink_health_reporter *
-__devlink_health_reporter_create(struct devlink *devlink,
- const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv)
-{
- struct devlink_health_reporter *reporter;
-
- if (WARN_ON(graceful_period && !ops->recover))
- return ERR_PTR(-EINVAL);
-
- reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
- if (!reporter)
- return ERR_PTR(-ENOMEM);
-
- reporter->priv = priv;
- reporter->ops = ops;
- reporter->devlink = devlink;
- reporter->graceful_period = graceful_period;
- reporter->auto_recover = !!ops->recover;
- reporter->auto_dump = !!ops->dump;
- mutex_init(&reporter->dump_lock);
- refcount_set(&reporter->refcount, 1);
- return reporter;
-}
-
-/**
- * devlink_port_health_reporter_create - create devlink health reporter for
- * specified port instance
- *
- * @port: devlink_port which should contain the new reporter
- * @ops: ops
- * @graceful_period: to avoid recovery loops, in msecs
- * @priv: priv
- */
-struct devlink_health_reporter *
-devlink_port_health_reporter_create(struct devlink_port *port,
- const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv)
-{
- struct devlink_health_reporter *reporter;
-
- mutex_lock(&port->reporters_lock);
- if (__devlink_health_reporter_find_by_name(&port->reporter_list,
- &port->reporters_lock, ops->name)) {
- reporter = ERR_PTR(-EEXIST);
- goto unlock;
- }
-
- reporter = __devlink_health_reporter_create(port->devlink, ops,
- graceful_period, priv);
- if (IS_ERR(reporter))
- goto unlock;
-
- reporter->devlink_port = port;
- list_add_tail(&reporter->list, &port->reporter_list);
-unlock:
- mutex_unlock(&port->reporters_lock);
- return reporter;
-}
-EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create);
-
-/**
- * devlink_health_reporter_create - create devlink health reporter
- *
- * @devlink: devlink
- * @ops: ops
- * @graceful_period: to avoid recovery loops, in msecs
- * @priv: priv
- */
-struct devlink_health_reporter *
-devlink_health_reporter_create(struct devlink *devlink,
- const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv)
-{
- struct devlink_health_reporter *reporter;
-
- mutex_lock(&devlink->reporters_lock);
- if (devlink_health_reporter_find_by_name(devlink, ops->name)) {
- reporter = ERR_PTR(-EEXIST);
- goto unlock;
- }
-
- reporter = __devlink_health_reporter_create(devlink, ops,
- graceful_period, priv);
- if (IS_ERR(reporter))
- goto unlock;
-
- list_add_tail(&reporter->list, &devlink->reporter_list);
-unlock:
- mutex_unlock(&devlink->reporters_lock);
- return reporter;
-}
-EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
-
-static void
-devlink_health_reporter_free(struct devlink_health_reporter *reporter)
-{
- mutex_destroy(&reporter->dump_lock);
- if (reporter->dump_fmsg)
- devlink_fmsg_free(reporter->dump_fmsg);
- kfree(reporter);
-}
-
-static void
-devlink_health_reporter_put(struct devlink_health_reporter *reporter)
-{
- if (refcount_dec_and_test(&reporter->refcount))
- devlink_health_reporter_free(reporter);
-}
-
-static void
-__devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
-{
- list_del(&reporter->list);
- devlink_health_reporter_put(reporter);
-}
-
-/**
- * devlink_health_reporter_destroy - destroy devlink health reporter
- *
- * @reporter: devlink health reporter to destroy
- */
-void
-devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
-{
- struct mutex *lock = &reporter->devlink->reporters_lock;
-
- mutex_lock(lock);
- __devlink_health_reporter_destroy(reporter);
- mutex_unlock(lock);
-}
-EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy);
-
-/**
- * devlink_port_health_reporter_destroy - destroy devlink port health reporter
- *
- * @reporter: devlink health reporter to destroy
- */
-void
-devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter)
-{
- struct mutex *lock = &reporter->devlink_port->reporters_lock;
-
- mutex_lock(lock);
- __devlink_health_reporter_destroy(reporter);
- mutex_unlock(lock);
-}
-EXPORT_SYMBOL_GPL(devlink_port_health_reporter_destroy);
-
-static int
-devlink_nl_health_reporter_fill(struct sk_buff *msg,
- struct devlink_health_reporter *reporter,
- enum devlink_command cmd, u32 portid,
- u32 seq, int flags)
-{
- struct devlink *devlink = reporter->devlink;
- struct nlattr *reporter_attr;
- void *hdr;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto genlmsg_cancel;
-
- if (reporter->devlink_port) {
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, reporter->devlink_port->index))
- goto genlmsg_cancel;
- }
- reporter_attr = nla_nest_start_noflag(msg,
- DEVLINK_ATTR_HEALTH_REPORTER);
- if (!reporter_attr)
- goto genlmsg_cancel;
- if (nla_put_string(msg, DEVLINK_ATTR_HEALTH_REPORTER_NAME,
- reporter->ops->name))
- goto reporter_nest_cancel;
- if (nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_STATE,
- reporter->health_state))
- goto reporter_nest_cancel;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT,
- reporter->error_count, DEVLINK_ATTR_PAD))
- goto reporter_nest_cancel;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT,
- reporter->recovery_count, DEVLINK_ATTR_PAD))
- goto reporter_nest_cancel;
- if (reporter->ops->recover &&
- nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD,
- reporter->graceful_period,
- DEVLINK_ATTR_PAD))
- goto reporter_nest_cancel;
- if (reporter->ops->recover &&
- nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER,
- reporter->auto_recover))
- goto reporter_nest_cancel;
- if (reporter->dump_fmsg &&
- nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS,
- jiffies_to_msecs(reporter->dump_ts),
- DEVLINK_ATTR_PAD))
- goto reporter_nest_cancel;
- if (reporter->dump_fmsg &&
- nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
- reporter->dump_real_ts, DEVLINK_ATTR_PAD))
- goto reporter_nest_cancel;
- if (reporter->ops->dump &&
- nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP,
- reporter->auto_dump))
- goto reporter_nest_cancel;
-
- nla_nest_end(msg, reporter_attr);
- genlmsg_end(msg, hdr);
- return 0;
-
-reporter_nest_cancel:
- nla_nest_end(msg, reporter_attr);
-genlmsg_cancel:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-static void devlink_recover_notify(struct devlink_health_reporter *reporter,
- enum devlink_command cmd)
-{
- struct devlink *devlink = reporter->devlink;
- struct sk_buff *msg;
- int err;
-
- WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
- WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- err = devlink_nl_health_reporter_fill(msg, reporter, cmd, 0, 0, 0);
- if (err) {
- nlmsg_free(msg);
- return;
- }
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
- 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
-void
-devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter)
-{
- reporter->recovery_count++;
- reporter->last_recovery_ts = jiffies;
-}
-EXPORT_SYMBOL_GPL(devlink_health_reporter_recovery_done);
-
-static int
-devlink_health_reporter_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx, struct netlink_ext_ack *extack)
-{
- int err;
-
- if (reporter->health_state == DEVLINK_HEALTH_REPORTER_STATE_HEALTHY)
- return 0;
-
- if (!reporter->ops->recover)
- return -EOPNOTSUPP;
-
- err = reporter->ops->recover(reporter, priv_ctx, extack);
- if (err)
- return err;
-
- devlink_health_reporter_recovery_done(reporter);
- reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
- devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
-
- return 0;
-}
-
-static void
-devlink_health_dump_clear(struct devlink_health_reporter *reporter)
-{
- if (!reporter->dump_fmsg)
- return;
- devlink_fmsg_free(reporter->dump_fmsg);
- reporter->dump_fmsg = NULL;
-}
-
-static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
- void *priv_ctx,
- struct netlink_ext_ack *extack)
-{
- int err;
-
- if (!reporter->ops->dump)
- return 0;
-
- if (reporter->dump_fmsg)
- return 0;
-
- reporter->dump_fmsg = devlink_fmsg_alloc();
- if (!reporter->dump_fmsg) {
- err = -ENOMEM;
- return err;
- }
-
- err = devlink_fmsg_obj_nest_start(reporter->dump_fmsg);
- if (err)
- goto dump_err;
-
- err = reporter->ops->dump(reporter, reporter->dump_fmsg,
- priv_ctx, extack);
- if (err)
- goto dump_err;
-
- err = devlink_fmsg_obj_nest_end(reporter->dump_fmsg);
- if (err)
- goto dump_err;
-
- reporter->dump_ts = jiffies;
- reporter->dump_real_ts = ktime_get_real_ns();
-
- return 0;
-
-dump_err:
- devlink_health_dump_clear(reporter);
- return err;
-}
-
-int devlink_health_report(struct devlink_health_reporter *reporter,
- const char *msg, void *priv_ctx)
-{
- enum devlink_health_reporter_state prev_health_state;
- struct devlink *devlink = reporter->devlink;
- unsigned long recover_ts_threshold;
- int ret;
-
- /* write a log message of the current error */
- WARN_ON(!msg);
- trace_devlink_health_report(devlink, reporter->ops->name, msg);
- reporter->error_count++;
- prev_health_state = reporter->health_state;
- reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
- devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
-
- /* abort if the previous error wasn't recovered */
- recover_ts_threshold = reporter->last_recovery_ts +
- msecs_to_jiffies(reporter->graceful_period);
- if (reporter->auto_recover &&
- (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
- (reporter->last_recovery_ts && reporter->recovery_count &&
- time_is_after_jiffies(recover_ts_threshold)))) {
- trace_devlink_health_recover_aborted(devlink,
- reporter->ops->name,
- reporter->health_state,
- jiffies -
- reporter->last_recovery_ts);
- return -ECANCELED;
- }
-
- if (reporter->auto_dump) {
- mutex_lock(&reporter->dump_lock);
- /* store current dump of current error, for later analysis */
- devlink_health_do_dump(reporter, priv_ctx, NULL);
- mutex_unlock(&reporter->dump_lock);
- }
-
- if (!reporter->auto_recover)
- return 0;
-
- devl_lock(devlink);
- ret = devlink_health_reporter_recover(reporter, priv_ctx, NULL);
- devl_unlock(devlink);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devlink_health_report);
-
-static struct devlink_health_reporter *
-devlink_health_reporter_get_from_attrs(struct devlink *devlink,
- struct nlattr **attrs)
-{
- struct devlink_health_reporter *reporter;
- struct devlink_port *devlink_port;
- char *reporter_name;
-
- if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME])
- return NULL;
-
- reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]);
- devlink_port = devlink_port_get_from_attrs(devlink, attrs);
- if (IS_ERR(devlink_port)) {
- mutex_lock(&devlink->reporters_lock);
- reporter = devlink_health_reporter_find_by_name(devlink, reporter_name);
- if (reporter)
- refcount_inc(&reporter->refcount);
- mutex_unlock(&devlink->reporters_lock);
- } else {
- mutex_lock(&devlink_port->reporters_lock);
- reporter = devlink_port_health_reporter_find_by_name(devlink_port, reporter_name);
- if (reporter)
- refcount_inc(&reporter->refcount);
- mutex_unlock(&devlink_port->reporters_lock);
- }
-
- return reporter;
-}
-
-static struct devlink_health_reporter *
-devlink_health_reporter_get_from_info(struct devlink *devlink,
- struct genl_info *info)
-{
- return devlink_health_reporter_get_from_attrs(devlink, info->attrs);
-}
-
-static struct devlink_health_reporter *
-devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
-{
- const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- struct devlink_health_reporter *reporter;
- struct nlattr **attrs = info->attrs;
- struct devlink *devlink;
-
- devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
- if (IS_ERR(devlink))
- return NULL;
-
- reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
- devlink_put(devlink);
- return reporter;
-}
-
-void
-devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
- enum devlink_health_reporter_state state)
-{
- if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY &&
- state != DEVLINK_HEALTH_REPORTER_STATE_ERROR))
- return;
-
- if (reporter->health_state == state)
- return;
-
- reporter->health_state = state;
- trace_devlink_health_reporter_state_update(reporter->devlink,
- reporter->ops->name, state);
- devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
-}
-EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update);
-
-static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_health_reporter *reporter;
- struct sk_buff *msg;
- int err;
-
- reporter = devlink_health_reporter_get_from_info(devlink, info);
- if (!reporter)
- return -EINVAL;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg) {
- err = -ENOMEM;
- goto out;
- }
-
- err = devlink_nl_health_reporter_fill(msg, reporter,
- DEVLINK_CMD_HEALTH_REPORTER_GET,
- info->snd_portid, info->snd_seq,
- 0);
- if (err) {
- nlmsg_free(msg);
- goto out;
- }
-
- err = genlmsg_reply(msg, info);
-out:
- devlink_health_reporter_put(reporter);
- return err;
-}
-
-static int
-devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
-{
- struct devlink_health_reporter *reporter;
- unsigned long index, port_index;
- struct devlink_port *port;
- struct devlink *devlink;
- int start = cb->args[0];
- int idx = 0;
- int err;
-
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- mutex_lock(&devlink->reporters_lock);
- list_for_each_entry(reporter, &devlink->reporter_list,
- list) {
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_health_reporter_fill(
- msg, reporter, DEVLINK_CMD_HEALTH_REPORTER_GET,
- NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
- if (err) {
- mutex_unlock(&devlink->reporters_lock);
- devlink_put(devlink);
- goto out;
- }
- idx++;
- }
- mutex_unlock(&devlink->reporters_lock);
- devlink_put(devlink);
- }
-
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- devl_lock(devlink);
- xa_for_each(&devlink->ports, port_index, port) {
- mutex_lock(&port->reporters_lock);
- list_for_each_entry(reporter, &port->reporter_list, list) {
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_health_reporter_fill(
- msg, reporter,
- DEVLINK_CMD_HEALTH_REPORTER_GET,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI);
- if (err) {
- mutex_unlock(&port->reporters_lock);
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
- idx++;
- }
- mutex_unlock(&port->reporters_lock);
- }
- devl_unlock(devlink);
- devlink_put(devlink);
- }
-out:
- cb->args[0] = idx;
- return msg->len;
-}
-
-static int
-devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_health_reporter *reporter;
- int err;
-
- reporter = devlink_health_reporter_get_from_info(devlink, info);
- if (!reporter)
- return -EINVAL;
-
- if (!reporter->ops->recover &&
- (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] ||
- info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])) {
- err = -EOPNOTSUPP;
- goto out;
- }
- if (!reporter->ops->dump &&
- info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
- reporter->graceful_period =
- nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]);
-
- if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])
- reporter->auto_recover =
- nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]);
-
- if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP])
- reporter->auto_dump =
- nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]);
-
- devlink_health_reporter_put(reporter);
- return 0;
-out:
- devlink_health_reporter_put(reporter);
- return err;
-}
-
-static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_health_reporter *reporter;
- int err;
-
- reporter = devlink_health_reporter_get_from_info(devlink, info);
- if (!reporter)
- return -EINVAL;
-
- err = devlink_health_reporter_recover(reporter, NULL, info->extack);
-
- devlink_health_reporter_put(reporter);
- return err;
-}
-
-static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_health_reporter *reporter;
- struct devlink_fmsg *fmsg;
- int err;
-
- reporter = devlink_health_reporter_get_from_info(devlink, info);
- if (!reporter)
- return -EINVAL;
-
- if (!reporter->ops->diagnose) {
- devlink_health_reporter_put(reporter);
- return -EOPNOTSUPP;
- }
-
- fmsg = devlink_fmsg_alloc();
- if (!fmsg) {
- devlink_health_reporter_put(reporter);
- return -ENOMEM;
- }
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- goto out;
-
- err = reporter->ops->diagnose(reporter, fmsg, info->extack);
- if (err)
- goto out;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- goto out;
-
- err = devlink_fmsg_snd(fmsg, info,
- DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 0);
-
-out:
- devlink_fmsg_free(fmsg);
- devlink_health_reporter_put(reporter);
- return err;
-}
-
-static int
-devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- struct devlink_health_reporter *reporter;
- u64 start = cb->args[0];
- int err;
-
- reporter = devlink_health_reporter_get_from_cb(cb);
- if (!reporter)
- return -EINVAL;
-
- if (!reporter->ops->dump) {
- err = -EOPNOTSUPP;
- goto out;
- }
- mutex_lock(&reporter->dump_lock);
- if (!start) {
- err = devlink_health_do_dump(reporter, NULL, cb->extack);
- if (err)
- goto unlock;
- cb->args[1] = reporter->dump_ts;
- }
- if (!reporter->dump_fmsg || cb->args[1] != reporter->dump_ts) {
- NL_SET_ERR_MSG_MOD(cb->extack, "Dump trampled, please retry");
- err = -EAGAIN;
- goto unlock;
- }
-
- err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
- DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
-unlock:
- mutex_unlock(&reporter->dump_lock);
-out:
- devlink_health_reporter_put(reporter);
- return err;
-}
-
-static int
-devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_health_reporter *reporter;
-
- reporter = devlink_health_reporter_get_from_info(devlink, info);
- if (!reporter)
- return -EINVAL;
-
- if (!reporter->ops->dump) {
- devlink_health_reporter_put(reporter);
- return -EOPNOTSUPP;
- }
-
- mutex_lock(&reporter->dump_lock);
- devlink_health_dump_clear(reporter);
- mutex_unlock(&reporter->dump_lock);
- devlink_health_reporter_put(reporter);
- return 0;
-}
-
-static int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_health_reporter *reporter;
- int err;
-
- reporter = devlink_health_reporter_get_from_info(devlink, info);
- if (!reporter)
- return -EINVAL;
-
- if (!reporter->ops->test) {
- devlink_health_reporter_put(reporter);
- return -EOPNOTSUPP;
- }
-
- err = reporter->ops->test(reporter, info->extack);
-
- devlink_health_reporter_put(reporter);
- return err;
-}
-
struct devlink_stats {
u64_stats_t rx_bytes;
u64_stats_t rx_packets;
@@ -8790,7 +5682,7 @@ static int devlink_nl_cmd_trap_get_doit(struct sk_buff *skb,
trap_item = devlink_trap_item_get_from_info(devlink, info);
if (!trap_item) {
- NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap");
+ NL_SET_ERR_MSG(extack, "Device did not register this trap");
return -ENOENT;
}
@@ -8811,43 +5703,39 @@ err_trap_fill:
return err;
}
-static int devlink_nl_cmd_trap_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int
+devlink_nl_cmd_trap_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb)
{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_trap_item *trap_item;
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
int idx = 0;
- int err;
+ int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- devl_lock(devlink);
- list_for_each_entry(trap_item, &devlink->trap_list, list) {
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_trap_fill(msg, devlink, trap_item,
- DEVLINK_CMD_TRAP_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
- if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
+ list_for_each_entry(trap_item, &devlink->trap_list, list) {
+ if (idx < state->idx) {
idx++;
+ continue;
}
- devl_unlock(devlink);
- devlink_put(devlink);
+ err = devlink_nl_trap_fill(msg, devlink, trap_item,
+ DEVLINK_CMD_TRAP_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ state->idx = idx;
+ break;
+ }
+ idx++;
}
-out:
- cb->args[0] = idx;
- return msg->len;
+
+ return err;
}
+const struct devlink_cmd devl_cmd_trap_get = {
+ .dump_one = devlink_nl_cmd_trap_get_dump_one,
+};
+
static int __devlink_trap_action_set(struct devlink *devlink,
struct devlink_trap_item *trap_item,
enum devlink_trap_action trap_action,
@@ -8857,7 +5745,7 @@ static int __devlink_trap_action_set(struct devlink *devlink,
if (trap_item->action != trap_action &&
trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP) {
- NL_SET_ERR_MSG_MOD(extack, "Cannot change action of non-drop traps. Skipping");
+ NL_SET_ERR_MSG(extack, "Cannot change action of non-drop traps. Skipping");
return 0;
}
@@ -8883,7 +5771,7 @@ static int devlink_trap_action_set(struct devlink *devlink,
err = devlink_trap_action_get_from_info(info, &trap_action);
if (err) {
- NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action");
+ NL_SET_ERR_MSG(info->extack, "Invalid trap action");
return -EINVAL;
}
@@ -8903,7 +5791,7 @@ static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb,
trap_item = devlink_trap_item_get_from_info(devlink, info);
if (!trap_item) {
- NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap");
+ NL_SET_ERR_MSG(extack, "Device did not register this trap");
return -ENOENT;
}
@@ -9005,7 +5893,7 @@ static int devlink_nl_cmd_trap_group_get_doit(struct sk_buff *skb,
group_item = devlink_trap_group_item_get_from_info(devlink, info);
if (!group_item) {
- NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group");
+ NL_SET_ERR_MSG(extack, "Device did not register this trap group");
return -ENOENT;
}
@@ -9026,46 +5914,41 @@ err_trap_group_fill:
return err;
}
-static int devlink_nl_cmd_trap_group_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int
+devlink_nl_cmd_trap_group_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb)
{
- enum devlink_command cmd = DEVLINK_CMD_TRAP_GROUP_NEW;
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_trap_group_item *group_item;
- u32 portid = NETLINK_CB(cb->skb).portid;
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
int idx = 0;
- int err;
+ int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- devl_lock(devlink);
- list_for_each_entry(group_item, &devlink->trap_group_list,
- list) {
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_trap_group_fill(msg, devlink,
- group_item, cmd,
- portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
- if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
+
+ list_for_each_entry(group_item, &devlink->trap_group_list, list) {
+ if (idx < state->idx) {
idx++;
+ continue;
}
- devl_unlock(devlink);
- devlink_put(devlink);
+ err = devlink_nl_trap_group_fill(msg, devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ state->idx = idx;
+ break;
+ }
+ idx++;
}
-out:
- cb->args[0] = idx;
- return msg->len;
+
+ return err;
}
+const struct devlink_cmd devl_cmd_trap_group_get = {
+ .dump_one = devlink_nl_cmd_trap_group_get_dump_one,
+};
+
static int
__devlink_trap_group_action_set(struct devlink *devlink,
struct devlink_trap_group_item *group_item,
@@ -9119,7 +6002,7 @@ devlink_trap_group_action_set(struct devlink *devlink,
err = devlink_trap_action_get_from_info(info, &trap_action);
if (err) {
- NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action");
+ NL_SET_ERR_MSG(info->extack, "Invalid trap action");
return -EINVAL;
}
@@ -9141,6 +6024,7 @@ static int devlink_trap_group_set(struct devlink *devlink,
struct netlink_ext_ack *extack = info->extack;
const struct devlink_trap_policer *policer;
struct nlattr **attrs = info->attrs;
+ u32 policer_id;
int err;
if (!attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
@@ -9149,17 +6033,11 @@ static int devlink_trap_group_set(struct devlink *devlink,
if (!devlink->ops->trap_group_set)
return -EOPNOTSUPP;
- policer_item = group_item->policer_item;
- if (attrs[DEVLINK_ATTR_TRAP_POLICER_ID]) {
- u32 policer_id;
-
- policer_id = nla_get_u32(attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
- policer_item = devlink_trap_policer_item_lookup(devlink,
- policer_id);
- if (policer_id && !policer_item) {
- NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
- return -ENOENT;
- }
+ policer_id = nla_get_u32(attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
+ policer_item = devlink_trap_policer_item_lookup(devlink, policer_id);
+ if (policer_id && !policer_item) {
+ NL_SET_ERR_MSG(extack, "Device did not register this trap policer");
+ return -ENOENT;
}
policer = policer_item ? policer_item->policer : NULL;
@@ -9187,7 +6065,7 @@ static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
group_item = devlink_trap_group_item_get_from_info(devlink, info);
if (!group_item) {
- NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group");
+ NL_SET_ERR_MSG(extack, "Device did not register this trap group");
return -ENOENT;
}
@@ -9204,7 +6082,7 @@ static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
err_trap_group_set:
if (modified)
- NL_SET_ERR_MSG_MOD(extack, "Trap group set failed, but some changes were committed already");
+ NL_SET_ERR_MSG(extack, "Trap group set failed, but some changes were committed already");
return err;
}
@@ -9309,7 +6187,7 @@ static int devlink_nl_cmd_trap_policer_get_doit(struct sk_buff *skb,
policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
if (!policer_item) {
- NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
+ NL_SET_ERR_MSG(extack, "Device did not register this trap policer");
return -ENOENT;
}
@@ -9330,46 +6208,40 @@ err_trap_policer_fill:
return err;
}
-static int devlink_nl_cmd_trap_policer_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+static int
+devlink_nl_cmd_trap_policer_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb)
{
- enum devlink_command cmd = DEVLINK_CMD_TRAP_POLICER_NEW;
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_trap_policer_item *policer_item;
- u32 portid = NETLINK_CB(cb->skb).portid;
- struct devlink *devlink;
- int start = cb->args[0];
- unsigned long index;
int idx = 0;
- int err;
+ int err = 0;
- devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
- devl_lock(devlink);
- list_for_each_entry(policer_item, &devlink->trap_policer_list,
- list) {
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_trap_policer_fill(msg, devlink,
- policer_item, cmd,
- portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
- if (err) {
- devl_unlock(devlink);
- devlink_put(devlink);
- goto out;
- }
+ list_for_each_entry(policer_item, &devlink->trap_policer_list, list) {
+ if (idx < state->idx) {
idx++;
+ continue;
+ }
+ err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ state->idx = idx;
+ break;
}
- devl_unlock(devlink);
- devlink_put(devlink);
+ idx++;
}
-out:
- cb->args[0] = idx;
- return msg->len;
+
+ return err;
}
+const struct devlink_cmd devl_cmd_trap_policer_get = {
+ .dump_one = devlink_nl_cmd_trap_policer_get_dump_one,
+};
+
static int
devlink_trap_policer_set(struct devlink *devlink,
struct devlink_trap_policer_item *policer_item,
@@ -9390,22 +6262,22 @@ devlink_trap_policer_set(struct devlink *devlink,
burst = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_BURST]);
if (rate < policer_item->policer->min_rate) {
- NL_SET_ERR_MSG_MOD(extack, "Policer rate lower than limit");
+ NL_SET_ERR_MSG(extack, "Policer rate lower than limit");
return -EINVAL;
}
if (rate > policer_item->policer->max_rate) {
- NL_SET_ERR_MSG_MOD(extack, "Policer rate higher than limit");
+ NL_SET_ERR_MSG(extack, "Policer rate higher than limit");
return -EINVAL;
}
if (burst < policer_item->policer->min_burst) {
- NL_SET_ERR_MSG_MOD(extack, "Policer burst size lower than limit");
+ NL_SET_ERR_MSG(extack, "Policer burst size lower than limit");
return -EINVAL;
}
if (burst > policer_item->policer->max_burst) {
- NL_SET_ERR_MSG_MOD(extack, "Policer burst size higher than limit");
+ NL_SET_ERR_MSG(extack, "Policer burst size higher than limit");
return -EINVAL;
}
@@ -9435,95 +6307,26 @@ static int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
if (!policer_item) {
- NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
+ NL_SET_ERR_MSG(extack, "Device did not register this trap policer");
return -ENOENT;
}
return devlink_trap_policer_set(devlink, policer_item, info);
}
-static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
- [DEVLINK_ATTR_UNSPEC] = { .strict_start_type =
- DEVLINK_ATTR_TRAP_POLICER_ID },
- [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
- [DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_PORT_TYPE_AUTO,
- DEVLINK_PORT_TYPE_IB),
- [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
- [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
- [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
- [DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
- [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
- [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
- [DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_ESWITCH_MODE_LEGACY,
- DEVLINK_ESWITCH_MODE_SWITCHDEV),
- [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
- [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
- [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
- [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
- [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
- [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
- [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
- [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
- [DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] =
- NLA_POLICY_BITFIELD32(DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS),
- [DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 },
- [DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32 },
- [DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32 },
- [DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32 },
- [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8 },
- [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 },
- [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
- [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
- [DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED },
- [DEVLINK_ATTR_RELOAD_ACTION] = NLA_POLICY_RANGE(NLA_U8, DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
- DEVLINK_RELOAD_ACTION_MAX),
- [DEVLINK_ATTR_RELOAD_LIMITS] = NLA_POLICY_BITFIELD32(DEVLINK_RELOAD_LIMITS_VALID_MASK),
- [DEVLINK_ATTR_PORT_FLAVOUR] = { .type = NLA_U16 },
- [DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .type = NLA_U16 },
- [DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .type = NLA_U32 },
- [DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .type = NLA_U32 },
- [DEVLINK_ATTR_RATE_TYPE] = { .type = NLA_U16 },
- [DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64 },
- [DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 },
- [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
- [DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_SELFTESTS] = { .type = NLA_NESTED },
- [DEVLINK_ATTR_RATE_TX_PRIORITY] = { .type = NLA_U32 },
- [DEVLINK_ATTR_RATE_TX_WEIGHT] = { .type = NLA_U32 },
- [DEVLINK_ATTR_REGION_DIRECT] = { .type = NLA_FLAG },
-};
-
-static const struct genl_small_ops devlink_nl_ops[] = {
+const struct genl_small_ops devlink_nl_ops[56] = {
{
.cmd = DEVLINK_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_get_doit,
- .dumpit = devlink_nl_cmd_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_PORT_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_get_doit,
- .dumpit = devlink_nl_cmd_port_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
/* can be retrieved by unprivileged users */
},
@@ -9537,7 +6340,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_RATE_GET,
.doit = devlink_nl_cmd_rate_get_doit,
- .dumpit = devlink_nl_cmd_rate_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
/* can be retrieved by unprivileged users */
},
@@ -9585,7 +6388,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_LINECARD_GET,
.doit = devlink_nl_cmd_linecard_get_doit,
- .dumpit = devlink_nl_cmd_linecard_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
/* can be retrieved by unprivileged users */
},
@@ -9599,14 +6402,14 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_SB_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_get_doit,
- .dumpit = devlink_nl_cmd_sb_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_SB_POOL_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_pool_get_doit,
- .dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
/* can be retrieved by unprivileged users */
},
{
@@ -9619,7 +6422,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_port_pool_get_doit,
- .dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
/* can be retrieved by unprivileged users */
},
@@ -9634,7 +6437,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
- .dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
/* can be retrieved by unprivileged users */
},
@@ -9715,7 +6518,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_PARAM_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_param_get_doit,
- .dumpit = devlink_nl_cmd_param_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
/* can be retrieved by unprivileged users */
},
{
@@ -9743,7 +6546,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_REGION_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_region_get_doit,
- .dumpit = devlink_nl_cmd_region_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
.flags = GENL_ADMIN_PERM,
},
{
@@ -9769,14 +6572,14 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_INFO_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_info_get_doit,
- .dumpit = devlink_nl_cmd_info_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_get_doit,
- .dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
/* can be retrieved by unprivileged users */
},
@@ -9831,7 +6634,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_TRAP_GET,
.doit = devlink_nl_cmd_trap_get_doit,
- .dumpit = devlink_nl_cmd_trap_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
/* can be retrieved by unprivileged users */
},
{
@@ -9842,7 +6645,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_TRAP_GROUP_GET,
.doit = devlink_nl_cmd_trap_group_get_doit,
- .dumpit = devlink_nl_cmd_trap_group_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
/* can be retrieved by unprivileged users */
},
{
@@ -9853,7 +6656,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_TRAP_POLICER_GET,
.doit = devlink_nl_cmd_trap_policer_get_doit,
- .dumpit = devlink_nl_cmd_trap_policer_get_dumpit,
+ .dumpit = devlink_nl_instance_iter_dumpit,
/* can be retrieved by unprivileged users */
},
{
@@ -9864,7 +6667,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_SELFTESTS_GET,
.doit = devlink_nl_cmd_selftests_get_doit,
- .dumpit = devlink_nl_cmd_selftests_get_dumpit
+ .dumpit = devlink_nl_instance_iter_dumpit,
/* can be retrieved by unprivileged users */
},
{
@@ -9872,148 +6675,9 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.doit = devlink_nl_cmd_selftests_run,
.flags = GENL_ADMIN_PERM,
},
+ /* -- No new ops here! Use split ops going forward! -- */
};
-static struct genl_family devlink_nl_family __ro_after_init = {
- .name = DEVLINK_GENL_NAME,
- .version = DEVLINK_GENL_VERSION,
- .maxattr = DEVLINK_ATTR_MAX,
- .policy = devlink_nl_policy,
- .netnsok = true,
- .parallel_ops = true,
- .pre_doit = devlink_nl_pre_doit,
- .post_doit = devlink_nl_post_doit,
- .module = THIS_MODULE,
- .small_ops = devlink_nl_ops,
- .n_small_ops = ARRAY_SIZE(devlink_nl_ops),
- .resv_start_op = DEVLINK_CMD_SELFTESTS_RUN + 1,
- .mcgrps = devlink_nl_mcgrps,
- .n_mcgrps = ARRAY_SIZE(devlink_nl_mcgrps),
-};
-
-static bool devlink_reload_actions_valid(const struct devlink_ops *ops)
-{
- const struct devlink_reload_combination *comb;
- int i;
-
- if (!devlink_reload_supported(ops)) {
- if (WARN_ON(ops->reload_actions))
- return false;
- return true;
- }
-
- if (WARN_ON(!ops->reload_actions ||
- ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
- ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX)))
- return false;
-
- if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) ||
- ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX)))
- return false;
-
- for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) {
- comb = &devlink_reload_invalid_combinations[i];
- if (ops->reload_actions == BIT(comb->action) &&
- ops->reload_limits == BIT(comb->limit))
- return false;
- }
- return true;
-}
-
-/**
- * devlink_set_features - Set devlink supported features
- *
- * @devlink: devlink
- * @features: devlink support features
- *
- * This interface allows us to set reload ops separatelly from
- * the devlink_alloc.
- */
-void devlink_set_features(struct devlink *devlink, u64 features)
-{
- ASSERT_DEVLINK_NOT_REGISTERED(devlink);
-
- WARN_ON(features & DEVLINK_F_RELOAD &&
- !devlink_reload_supported(devlink->ops));
- devlink->features = features;
-}
-EXPORT_SYMBOL_GPL(devlink_set_features);
-
-static int devlink_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr);
-
-/**
- * devlink_alloc_ns - Allocate new devlink instance resources
- * in specific namespace
- *
- * @ops: ops
- * @priv_size: size of user private data
- * @net: net namespace
- * @dev: parent device
- *
- * Allocate new devlink instance resources, including devlink index
- * and name.
- */
-struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
- size_t priv_size, struct net *net,
- struct device *dev)
-{
- struct devlink *devlink;
- static u32 last_id;
- int ret;
-
- WARN_ON(!ops || !dev);
- if (!devlink_reload_actions_valid(ops))
- return NULL;
-
- devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
- if (!devlink)
- return NULL;
-
- ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
- &last_id, GFP_KERNEL);
- if (ret < 0)
- goto err_xa_alloc;
-
- devlink->netdevice_nb.notifier_call = devlink_netdevice_event;
- ret = register_netdevice_notifier(&devlink->netdevice_nb);
- if (ret)
- goto err_register_netdevice_notifier;
-
- devlink->dev = dev;
- devlink->ops = ops;
- xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
- xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
- write_pnet(&devlink->_net, net);
- INIT_LIST_HEAD(&devlink->rate_list);
- INIT_LIST_HEAD(&devlink->linecard_list);
- INIT_LIST_HEAD(&devlink->sb_list);
- INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
- INIT_LIST_HEAD(&devlink->resource_list);
- INIT_LIST_HEAD(&devlink->param_list);
- INIT_LIST_HEAD(&devlink->region_list);
- INIT_LIST_HEAD(&devlink->reporter_list);
- INIT_LIST_HEAD(&devlink->trap_list);
- INIT_LIST_HEAD(&devlink->trap_group_list);
- INIT_LIST_HEAD(&devlink->trap_policer_list);
- lockdep_register_key(&devlink->lock_key);
- mutex_init(&devlink->lock);
- lockdep_set_class(&devlink->lock, &devlink->lock_key);
- mutex_init(&devlink->reporters_lock);
- mutex_init(&devlink->linecards_lock);
- refcount_set(&devlink->refcount, 1);
- init_completion(&devlink->comp);
-
- return devlink;
-
-err_register_netdevice_notifier:
- xa_erase(&devlinks, devlink->index);
-err_xa_alloc:
- kfree(devlink);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(devlink_alloc_ns);
-
static void
devlink_trap_policer_notify(struct devlink *devlink,
const struct devlink_trap_policer_item *policer_item,
@@ -10026,7 +6690,7 @@ static void devlink_trap_notify(struct devlink *devlink,
const struct devlink_trap_item *trap_item,
enum devlink_command cmd);
-static void devlink_notify_register(struct devlink *devlink)
+void devlink_notify_register(struct devlink *devlink)
{
struct devlink_trap_policer_item *policer_item;
struct devlink_trap_group_item *group_item;
@@ -10037,6 +6701,7 @@ static void devlink_notify_register(struct devlink *devlink)
struct devlink_rate *rate_node;
struct devlink_region *region;
unsigned long port_index;
+ unsigned long param_id;
devlink_notify(devlink, DEVLINK_CMD_NEW);
list_for_each_entry(linecard, &devlink->linecard_list, list)
@@ -10062,12 +6727,12 @@ static void devlink_notify_register(struct devlink *devlink)
list_for_each_entry(region, &devlink->region_list, list)
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
- list_for_each_entry(param_item, &devlink->param_list, list)
+ xa_for_each(&devlink->params, param_id, param_item)
devlink_param_notify(devlink, 0, param_item,
DEVLINK_CMD_PARAM_NEW);
}
-static void devlink_notify_unregister(struct devlink *devlink)
+void devlink_notify_unregister(struct devlink *devlink)
{
struct devlink_trap_policer_item *policer_item;
struct devlink_trap_group_item *group_item;
@@ -10077,8 +6742,9 @@ static void devlink_notify_unregister(struct devlink *devlink)
struct devlink_rate *rate_node;
struct devlink_region *region;
unsigned long port_index;
+ unsigned long param_id;
- list_for_each_entry_reverse(param_item, &devlink->param_list, list)
+ xa_for_each(&devlink->params, param_id, param_item)
devlink_param_notify(devlink, 0, param_item,
DEVLINK_CMD_PARAM_DEL);
@@ -10104,78 +6770,6 @@ static void devlink_notify_unregister(struct devlink *devlink)
devlink_notify(devlink, DEVLINK_CMD_DEL);
}
-/**
- * devlink_register - Register devlink instance
- *
- * @devlink: devlink
- */
-void devlink_register(struct devlink *devlink)
-{
- ASSERT_DEVLINK_NOT_REGISTERED(devlink);
- /* Make sure that we are in .probe() routine */
-
- xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
- devlink_notify_register(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_register);
-
-/**
- * devlink_unregister - Unregister devlink instance
- *
- * @devlink: devlink
- */
-void devlink_unregister(struct devlink *devlink)
-{
- ASSERT_DEVLINK_REGISTERED(devlink);
- /* Make sure that we are in .remove() routine */
-
- xa_set_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
- devlink_put(devlink);
- wait_for_completion(&devlink->comp);
-
- devlink_notify_unregister(devlink);
- xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
- xa_clear_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
-}
-EXPORT_SYMBOL_GPL(devlink_unregister);
-
-/**
- * devlink_free - Free devlink instance resources
- *
- * @devlink: devlink
- */
-void devlink_free(struct devlink *devlink)
-{
- ASSERT_DEVLINK_NOT_REGISTERED(devlink);
-
- mutex_destroy(&devlink->linecards_lock);
- mutex_destroy(&devlink->reporters_lock);
- mutex_destroy(&devlink->lock);
- lockdep_unregister_key(&devlink->lock_key);
- WARN_ON(!list_empty(&devlink->trap_policer_list));
- WARN_ON(!list_empty(&devlink->trap_group_list));
- WARN_ON(!list_empty(&devlink->trap_list));
- WARN_ON(!list_empty(&devlink->reporter_list));
- WARN_ON(!list_empty(&devlink->region_list));
- WARN_ON(!list_empty(&devlink->param_list));
- WARN_ON(!list_empty(&devlink->resource_list));
- WARN_ON(!list_empty(&devlink->dpipe_table_list));
- WARN_ON(!list_empty(&devlink->sb_list));
- WARN_ON(!list_empty(&devlink->rate_list));
- WARN_ON(!list_empty(&devlink->linecard_list));
- WARN_ON(!xa_empty(&devlink->ports));
-
- xa_destroy(&devlink->snapshot_ids);
- xa_destroy(&devlink->ports);
-
- WARN_ON_ONCE(unregister_netdevice_notifier(&devlink->netdevice_nb));
-
- xa_erase(&devlinks, devlink->index);
-
- kfree(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_free);
-
static void devlink_port_type_warn(struct work_struct *work)
{
WARN(true, "Type was not set for devlink port.");
@@ -10275,12 +6869,9 @@ int devl_port_register(struct devlink *devlink,
devlink_port->index = port_index;
spin_lock_init(&devlink_port->type_lock);
INIT_LIST_HEAD(&devlink_port->reporter_list);
- mutex_init(&devlink_port->reporters_lock);
err = xa_insert(&devlink->ports, port_index, devlink_port, GFP_KERNEL);
- if (err) {
- mutex_destroy(&devlink_port->reporters_lock);
+ if (err)
return err;
- }
INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
devlink_port_type_warn_schedule(devlink_port);
@@ -10331,7 +6922,6 @@ void devl_port_unregister(struct devlink_port *devlink_port)
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
xa_erase(&devlink_port->devlink->ports, devlink_port->index);
WARN_ON(!list_empty(&devlink_port->reporter_list));
- mutex_destroy(&devlink_port->reporters_lock);
devlink_port->registered = false;
}
EXPORT_SYMBOL_GPL(devl_port_unregister);
@@ -10476,8 +7066,8 @@ void devlink_port_type_clear(struct devlink_port *devlink_port)
}
EXPORT_SYMBOL_GPL(devlink_port_type_clear);
-static int devlink_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+int devlink_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct devlink_port *devlink_port = netdev->devlink_port;
@@ -10911,7 +7501,7 @@ static void devlink_linecard_types_fini(struct devlink_linecard *linecard)
}
/**
- * devlink_linecard_create - Create devlink linecard
+ * devl_linecard_create - Create devlink linecard
*
* @devlink: devlink
* @linecard_index: driver-specific numerical identifier of the linecard
@@ -10924,8 +7514,8 @@ static void devlink_linecard_types_fini(struct devlink_linecard *linecard)
* Return: Line card structure or an ERR_PTR() encoded error code.
*/
struct devlink_linecard *
-devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
- const struct devlink_linecard_ops *ops, void *priv)
+devl_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+ const struct devlink_linecard_ops *ops, void *priv)
{
struct devlink_linecard *linecard;
int err;
@@ -10934,17 +7524,12 @@ devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
!ops->types_count || !ops->types_get))
return ERR_PTR(-EINVAL);
- mutex_lock(&devlink->linecards_lock);
- if (devlink_linecard_index_exists(devlink, linecard_index)) {
- mutex_unlock(&devlink->linecards_lock);
+ if (devlink_linecard_index_exists(devlink, linecard_index))
return ERR_PTR(-EEXIST);
- }
linecard = kzalloc(sizeof(*linecard), GFP_KERNEL);
- if (!linecard) {
- mutex_unlock(&devlink->linecards_lock);
+ if (!linecard)
return ERR_PTR(-ENOMEM);
- }
linecard->devlink = devlink;
linecard->index = linecard_index;
@@ -10957,35 +7542,29 @@ devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
if (err) {
mutex_destroy(&linecard->state_lock);
kfree(linecard);
- mutex_unlock(&devlink->linecards_lock);
return ERR_PTR(err);
}
list_add_tail(&linecard->list, &devlink->linecard_list);
- refcount_set(&linecard->refcount, 1);
- mutex_unlock(&devlink->linecards_lock);
devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
return linecard;
}
-EXPORT_SYMBOL_GPL(devlink_linecard_create);
+EXPORT_SYMBOL_GPL(devl_linecard_create);
/**
- * devlink_linecard_destroy - Destroy devlink linecard
+ * devl_linecard_destroy - Destroy devlink linecard
*
* @linecard: devlink linecard
*/
-void devlink_linecard_destroy(struct devlink_linecard *linecard)
+void devl_linecard_destroy(struct devlink_linecard *linecard)
{
- struct devlink *devlink = linecard->devlink;
-
devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
- mutex_lock(&devlink->linecards_lock);
list_del(&linecard->list);
devlink_linecard_types_fini(linecard);
- mutex_unlock(&devlink->linecards_lock);
- devlink_linecard_put(linecard);
+ mutex_destroy(&linecard->state_lock);
+ kfree(linecard);
}
-EXPORT_SYMBOL_GPL(devlink_linecard_destroy);
+EXPORT_SYMBOL_GPL(devl_linecard_destroy);
/**
* devlink_linecard_provision_set - Set provisioning on linecard
@@ -11588,8 +8167,53 @@ static int devlink_param_verify(const struct devlink_param *param)
return devlink_param_driver_verify(param);
}
+static int devlink_param_register(struct devlink *devlink,
+ const struct devlink_param *param)
+{
+ struct devlink_param_item *param_item;
+ int err;
+
+ WARN_ON(devlink_param_verify(param));
+ WARN_ON(devlink_param_find_by_name(&devlink->params, param->name));
+
+ if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
+ WARN_ON(param->get || param->set);
+ else
+ WARN_ON(!param->get || !param->set);
+
+ param_item = kzalloc(sizeof(*param_item), GFP_KERNEL);
+ if (!param_item)
+ return -ENOMEM;
+
+ param_item->param = param;
+
+ err = xa_insert(&devlink->params, param->id, param_item, GFP_KERNEL);
+ if (err)
+ goto err_xa_insert;
+
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
+ return 0;
+
+err_xa_insert:
+ kfree(param_item);
+ return err;
+}
+
+static void devlink_param_unregister(struct devlink *devlink,
+ const struct devlink_param *param)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(&devlink->params, param->id);
+ if (WARN_ON(!param_item))
+ return;
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_DEL);
+ xa_erase(&devlink->params, param->id);
+ kfree(param_item);
+}
+
/**
- * devlink_params_register - register configuration parameters
+ * devl_params_register - register configuration parameters
*
* @devlink: devlink
* @params: configuration parameters array
@@ -11597,14 +8221,14 @@ static int devlink_param_verify(const struct devlink_param *param)
*
* Register the configuration parameters supported by the driver.
*/
-int devlink_params_register(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
+int devl_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
{
const struct devlink_param *param = params;
int i, err;
- ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+ lockdep_assert_held(&devlink->lock);
for (i = 0; i < params_count; i++, param++) {
err = devlink_param_register(devlink, param);
@@ -11621,124 +8245,103 @@ rollback:
devlink_param_unregister(devlink, param);
return err;
}
+EXPORT_SYMBOL_GPL(devl_params_register);
+
+int devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_params_register(devlink, params, params_count);
+ devl_unlock(devlink);
+ return err;
+}
EXPORT_SYMBOL_GPL(devlink_params_register);
/**
- * devlink_params_unregister - unregister configuration parameters
+ * devl_params_unregister - unregister configuration parameters
* @devlink: devlink
* @params: configuration parameters to unregister
* @params_count: number of parameters provided
*/
-void devlink_params_unregister(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
+void devl_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
{
const struct devlink_param *param = params;
int i;
- ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+ lockdep_assert_held(&devlink->lock);
for (i = 0; i < params_count; i++, param++)
devlink_param_unregister(devlink, param);
}
-EXPORT_SYMBOL_GPL(devlink_params_unregister);
-
-/**
- * devlink_param_register - register one configuration parameter
- *
- * @devlink: devlink
- * @param: one configuration parameter
- *
- * Register the configuration parameter supported by the driver.
- * Return: returns 0 on successful registration or error code otherwise.
- */
-int devlink_param_register(struct devlink *devlink,
- const struct devlink_param *param)
-{
- struct devlink_param_item *param_item;
-
- ASSERT_DEVLINK_NOT_REGISTERED(devlink);
-
- WARN_ON(devlink_param_verify(param));
- WARN_ON(devlink_param_find_by_name(&devlink->param_list, param->name));
-
- if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
- WARN_ON(param->get || param->set);
- else
- WARN_ON(!param->get || !param->set);
-
- param_item = kzalloc(sizeof(*param_item), GFP_KERNEL);
- if (!param_item)
- return -ENOMEM;
-
- param_item->param = param;
-
- list_add_tail(&param_item->list, &devlink->param_list);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_param_register);
+EXPORT_SYMBOL_GPL(devl_params_unregister);
-/**
- * devlink_param_unregister - unregister one configuration parameter
- * @devlink: devlink
- * @param: configuration parameter to unregister
- */
-void devlink_param_unregister(struct devlink *devlink,
- const struct devlink_param *param)
+void devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
{
- struct devlink_param_item *param_item;
-
- ASSERT_DEVLINK_NOT_REGISTERED(devlink);
-
- param_item =
- devlink_param_find_by_name(&devlink->param_list, param->name);
- WARN_ON(!param_item);
- list_del(&param_item->list);
- kfree(param_item);
+ devl_lock(devlink);
+ devl_params_unregister(devlink, params, params_count);
+ devl_unlock(devlink);
}
-EXPORT_SYMBOL_GPL(devlink_param_unregister);
+EXPORT_SYMBOL_GPL(devlink_params_unregister);
/**
- * devlink_param_driverinit_value_get - get configuration parameter
- * value for driver initializing
+ * devl_param_driverinit_value_get - get configuration parameter
+ * value for driver initializing
*
* @devlink: devlink
* @param_id: parameter ID
- * @init_val: value of parameter in driverinit configuration mode
+ * @val: pointer to store the value of parameter in driverinit
+ * configuration mode
*
* This function should be used by the driver to get driverinit
* configuration for initialization after reload command.
+ *
+ * Note that lockless call of this function relies on the
+ * driver to maintain following basic sane behavior:
+ * 1) Driver ensures a call to this function cannot race with
+ * registering/unregistering the parameter with the same parameter ID.
+ * 2) Driver ensures a call to this function cannot race with
+ * devl_param_driverinit_value_set() call with the same parameter ID.
+ * 3) Driver ensures a call to this function cannot race with
+ * reload operation.
+ * If the driver is not able to comply, it has to take the devlink->lock
+ * while calling this.
*/
-int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
- union devlink_param_value *init_val)
+int devl_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *val)
{
struct devlink_param_item *param_item;
- if (!devlink_reload_supported(devlink->ops))
+ if (WARN_ON(!devlink_reload_supported(devlink->ops)))
return -EOPNOTSUPP;
- param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+ param_item = devlink_param_find_by_id(&devlink->params, param_id);
if (!param_item)
return -EINVAL;
- if (!param_item->driverinit_value_valid ||
- !devlink_param_cmode_is_supported(param_item->param,
- DEVLINK_PARAM_CMODE_DRIVERINIT))
+ if (!param_item->driverinit_value_valid)
return -EOPNOTSUPP;
- if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
- strcpy(init_val->vstr, param_item->driverinit_value.vstr);
- else
- *init_val = param_item->driverinit_value;
+ if (WARN_ON(!devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT)))
+ return -EOPNOTSUPP;
+
+ *val = param_item->driverinit_value;
return 0;
}
-EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
+EXPORT_SYMBOL_GPL(devl_param_driverinit_value_get);
/**
- * devlink_param_driverinit_value_set - set value of configuration
- * parameter for driverinit
- * configuration mode
+ * devl_param_driverinit_value_set - set value of configuration
+ * parameter for driverinit
+ * configuration mode
*
* @devlink: devlink
* @param_id: parameter ID
@@ -11747,34 +8350,48 @@ EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
* This function should be used by the driver to set driverinit
* configuration mode default value.
*/
-int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val)
+void devl_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
{
struct devlink_param_item *param_item;
- ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+ devl_assert_locked(devlink);
- param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
- if (!param_item)
- return -EINVAL;
+ param_item = devlink_param_find_by_id(&devlink->params, param_id);
+ if (WARN_ON(!param_item))
+ return;
- if (!devlink_param_cmode_is_supported(param_item->param,
- DEVLINK_PARAM_CMODE_DRIVERINIT))
- return -EOPNOTSUPP;
+ if (WARN_ON(!devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT)))
+ return;
- if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
- strcpy(param_item->driverinit_value.vstr, init_val.vstr);
- else
- param_item->driverinit_value = init_val;
+ param_item->driverinit_value = init_val;
param_item->driverinit_value_valid = true;
- return 0;
+
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
+}
+EXPORT_SYMBOL_GPL(devl_param_driverinit_value_set);
+
+void devlink_params_driverinit_load_new(struct devlink *devlink)
+{
+ struct devlink_param_item *param_item;
+ unsigned long param_id;
+
+ xa_for_each(&devlink->params, param_id, param_item) {
+ if (!devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT) ||
+ !param_item->driverinit_value_new_valid)
+ continue;
+ param_item->driverinit_value = param_item->driverinit_value_new;
+ param_item->driverinit_value_valid = true;
+ param_item->driverinit_value_new_valid = false;
+ }
}
-EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
/**
- * devlink_param_value_changed - notify devlink on a parameter's value
- * change. Should be called by the driver
- * right after the change.
+ * devl_param_value_changed - notify devlink on a parameter's value
+ * change. Should be called by the driver
+ * right after the change.
*
* @devlink: devlink
* @param_id: parameter ID
@@ -11783,16 +8400,16 @@ EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
* change, excluding driverinit configuration mode.
* For driverinit configuration mode driver should use the function
*/
-void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
+void devl_param_value_changed(struct devlink *devlink, u32 param_id)
{
struct devlink_param_item *param_item;
- param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+ param_item = devlink_param_find_by_id(&devlink->params, param_id);
WARN_ON(!param_item);
devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
}
-EXPORT_SYMBOL_GPL(devlink_param_value_changed);
+EXPORT_SYMBOL_GPL(devl_param_value_changed);
/**
* devl_region_create - create a new address region
@@ -12878,76 +9495,6 @@ devl_trap_policers_unregister(struct devlink *devlink,
}
EXPORT_SYMBOL_GPL(devl_trap_policers_unregister);
-static void __devlink_compat_running_version(struct devlink *devlink,
- char *buf, size_t len)
-{
- struct devlink_info_req req = {};
- const struct nlattr *nlattr;
- struct sk_buff *msg;
- int rem, err;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- req.msg = msg;
- err = devlink->ops->info_get(devlink, &req, NULL);
- if (err)
- goto free_msg;
-
- nla_for_each_attr(nlattr, (void *)msg->data, msg->len, rem) {
- const struct nlattr *kv;
- int rem_kv;
-
- if (nla_type(nlattr) != DEVLINK_ATTR_INFO_VERSION_RUNNING)
- continue;
-
- nla_for_each_nested(kv, nlattr, rem_kv) {
- if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE)
- continue;
-
- strlcat(buf, nla_data(kv), len);
- strlcat(buf, " ", len);
- }
- }
-free_msg:
- nlmsg_free(msg);
-}
-
-void devlink_compat_running_version(struct devlink *devlink,
- char *buf, size_t len)
-{
- if (!devlink->ops->info_get)
- return;
-
- devl_lock(devlink);
- __devlink_compat_running_version(devlink, buf, len);
- devl_unlock(devlink);
-}
-
-int devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
-{
- struct devlink_flash_update_params params = {};
- int ret;
-
- if (!devlink->ops->flash_update)
- return -EOPNOTSUPP;
-
- ret = request_firmware(&params.fw, file_name, devlink->dev);
- if (ret)
- return ret;
-
- devl_lock(devlink);
- devlink_flash_update_begin_notify(devlink);
- ret = devlink->ops->flash_update(devlink, &params, NULL);
- devlink_flash_update_end_notify(devlink);
- devl_unlock(devlink);
-
- release_firmware(params.fw);
-
- return ret;
-}
-
int devlink_compat_phys_port_name_get(struct net_device *dev,
char *name, size_t len)
{
@@ -12983,47 +9530,3 @@ int devlink_compat_switch_id_get(struct net_device *dev,
return 0;
}
-
-static void __net_exit devlink_pernet_pre_exit(struct net *net)
-{
- struct devlink *devlink;
- u32 actions_performed;
- unsigned long index;
- int err;
-
- /* In case network namespace is getting destroyed, reload
- * all devlink instances from this namespace into init_net.
- */
- devlinks_xa_for_each_registered_get(net, index, devlink) {
- WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
- mutex_lock(&devlink->lock);
- err = devlink_reload(devlink, &init_net,
- DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
- DEVLINK_RELOAD_LIMIT_UNSPEC,
- &actions_performed, NULL);
- mutex_unlock(&devlink->lock);
- if (err && err != -EOPNOTSUPP)
- pr_warn("Failed to reload devlink instance into init_net\n");
- devlink_put(devlink);
- }
-}
-
-static struct pernet_operations devlink_pernet_ops __net_initdata = {
- .pre_exit = devlink_pernet_pre_exit,
-};
-
-static int __init devlink_init(void)
-{
- int err;
-
- err = genl_register_family(&devlink_nl_family);
- if (err)
- goto out;
- err = register_pernet_subsys(&devlink_pernet_ops);
-
-out:
- WARN_ON(err);
- return err;
-}
-
-subsys_initcall(devlink_init);
diff --git a/net/devlink/netlink.c b/net/devlink/netlink.c
new file mode 100644
index 000000000000..7a332eb70f70
--- /dev/null
+++ b/net/devlink/netlink.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include <net/genetlink.h>
+#include <net/sock.h>
+
+#include "devl_internal.h"
+
+static const struct genl_multicast_group devlink_nl_mcgrps[] = {
+ [DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME },
+};
+
+static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_UNSPEC] = { .strict_start_type =
+ DEVLINK_ATTR_TRAP_POLICER_ID },
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_PORT_TYPE_AUTO,
+ DEVLINK_PORT_TYPE_IB),
+ [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_ESWITCH_MODE_LEGACY,
+ DEVLINK_ESWITCH_MODE_SWITCHDEV),
+ [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
+ [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
+ [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] =
+ NLA_POLICY_BITFIELD32(DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS),
+ [DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED },
+ [DEVLINK_ATTR_RELOAD_ACTION] = NLA_POLICY_RANGE(NLA_U8, DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
+ DEVLINK_RELOAD_ACTION_MAX),
+ [DEVLINK_ATTR_RELOAD_LIMITS] = NLA_POLICY_BITFIELD32(DEVLINK_RELOAD_LIMITS_VALID_MASK),
+ [DEVLINK_ATTR_PORT_FLAVOUR] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_RATE_TYPE] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_SELFTESTS] = { .type = NLA_NESTED },
+ [DEVLINK_ATTR_RATE_TX_PRIORITY] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_RATE_TX_WEIGHT] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_REGION_DIRECT] = { .type = NLA_FLAG },
+};
+
+struct devlink *
+devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs)
+{
+ struct devlink *devlink;
+ unsigned long index;
+ char *busname;
+ char *devname;
+
+ if (!attrs[DEVLINK_ATTR_BUS_NAME] || !attrs[DEVLINK_ATTR_DEV_NAME])
+ return ERR_PTR(-EINVAL);
+
+ busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]);
+ devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
+
+ devlinks_xa_for_each_registered_get(net, index, devlink) {
+ devl_lock(devlink);
+ if (devl_is_registered(devlink) &&
+ strcmp(devlink->dev->bus->name, busname) == 0 &&
+ strcmp(dev_name(devlink->dev), devname) == 0)
+ return devlink;
+ devl_unlock(devlink);
+ devlink_put(devlink);
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int devlink_nl_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink_linecard *linecard;
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_get_from_attrs_lock(genl_info_net(info), info->attrs);
+ if (IS_ERR(devlink))
+ return PTR_ERR(devlink);
+
+ info->user_ptr[0] = devlink;
+ if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
+ devlink_port = devlink_port_get_from_info(devlink, info);
+ if (IS_ERR(devlink_port)) {
+ err = PTR_ERR(devlink_port);
+ goto unlock;
+ }
+ info->user_ptr[1] = devlink_port;
+ } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) {
+ devlink_port = devlink_port_get_from_info(devlink, info);
+ if (!IS_ERR(devlink_port))
+ info->user_ptr[1] = devlink_port;
+ } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE) {
+ struct devlink_rate *devlink_rate;
+
+ devlink_rate = devlink_rate_get_from_info(devlink, info);
+ if (IS_ERR(devlink_rate)) {
+ err = PTR_ERR(devlink_rate);
+ goto unlock;
+ }
+ info->user_ptr[1] = devlink_rate;
+ } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE_NODE) {
+ struct devlink_rate *rate_node;
+
+ rate_node = devlink_rate_node_get_from_info(devlink, info);
+ if (IS_ERR(rate_node)) {
+ err = PTR_ERR(rate_node);
+ goto unlock;
+ }
+ info->user_ptr[1] = rate_node;
+ } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
+ linecard = devlink_linecard_get_from_info(devlink, info);
+ if (IS_ERR(linecard)) {
+ err = PTR_ERR(linecard);
+ goto unlock;
+ }
+ info->user_ptr[1] = linecard;
+ }
+ return 0;
+
+unlock:
+ devl_unlock(devlink);
+ devlink_put(devlink);
+ return err;
+}
+
+static void devlink_nl_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink;
+
+ devlink = info->user_ptr[0];
+ devl_unlock(devlink);
+ devlink_put(devlink);
+}
+
+static const struct devlink_cmd *devl_cmds[] = {
+ [DEVLINK_CMD_GET] = &devl_cmd_get,
+ [DEVLINK_CMD_PORT_GET] = &devl_cmd_port_get,
+ [DEVLINK_CMD_SB_GET] = &devl_cmd_sb_get,
+ [DEVLINK_CMD_SB_POOL_GET] = &devl_cmd_sb_pool_get,
+ [DEVLINK_CMD_SB_PORT_POOL_GET] = &devl_cmd_sb_port_pool_get,
+ [DEVLINK_CMD_SB_TC_POOL_BIND_GET] = &devl_cmd_sb_tc_pool_bind_get,
+ [DEVLINK_CMD_PARAM_GET] = &devl_cmd_param_get,
+ [DEVLINK_CMD_REGION_GET] = &devl_cmd_region_get,
+ [DEVLINK_CMD_INFO_GET] = &devl_cmd_info_get,
+ [DEVLINK_CMD_HEALTH_REPORTER_GET] = &devl_cmd_health_reporter_get,
+ [DEVLINK_CMD_TRAP_GET] = &devl_cmd_trap_get,
+ [DEVLINK_CMD_TRAP_GROUP_GET] = &devl_cmd_trap_group_get,
+ [DEVLINK_CMD_TRAP_POLICER_GET] = &devl_cmd_trap_policer_get,
+ [DEVLINK_CMD_RATE_GET] = &devl_cmd_rate_get,
+ [DEVLINK_CMD_LINECARD_GET] = &devl_cmd_linecard_get,
+ [DEVLINK_CMD_SELFTESTS_GET] = &devl_cmd_selftests_get,
+};
+
+int devlink_nl_instance_iter_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ const struct devlink_cmd *cmd;
+ struct devlink *devlink;
+ int err = 0;
+
+ cmd = devl_cmds[info->op.cmd];
+
+ while ((devlink = devlinks_xa_find_get(sock_net(msg->sk),
+ &state->instance))) {
+ devl_lock(devlink);
+
+ if (devl_is_registered(devlink))
+ err = cmd->dump_one(msg, devlink, cb);
+ else
+ err = 0;
+
+ devl_unlock(devlink);
+ devlink_put(devlink);
+
+ if (err)
+ break;
+
+ state->instance++;
+
+ /* restart sub-object walk for the next instance */
+ state->idx = 0;
+ }
+
+ if (err != -EMSGSIZE)
+ return err;
+ return msg->len;
+}
+
+struct genl_family devlink_nl_family __ro_after_init = {
+ .name = DEVLINK_GENL_NAME,
+ .version = DEVLINK_GENL_VERSION,
+ .maxattr = DEVLINK_ATTR_MAX,
+ .policy = devlink_nl_policy,
+ .netnsok = true,
+ .parallel_ops = true,
+ .pre_doit = devlink_nl_pre_doit,
+ .post_doit = devlink_nl_post_doit,
+ .module = THIS_MODULE,
+ .small_ops = devlink_nl_ops,
+ .n_small_ops = ARRAY_SIZE(devlink_nl_ops),
+ .resv_start_op = DEVLINK_CMD_SELFTESTS_RUN + 1,
+ .mcgrps = devlink_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(devlink_nl_mcgrps),
+};
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 26d90140d271..22d3f16b0e6d 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -299,7 +299,7 @@ static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
struct net_device *dev = to_net_dev(d);
struct dsa_port *cpu_dp = dev->dsa_ptr;
- return sprintf(buf, "%s\n",
+ return sysfs_emit(buf, "%s\n",
dsa_tag_protocol_to_str(cpu_dp->tag_ops));
}
@@ -464,9 +464,7 @@ int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
if (err) {
- if (extack && !extack->_msg)
- NL_SET_ERR_MSG_MOD(extack,
- "CPU port failed to join LAG");
+ NL_SET_ERR_MSG_WEAK_MOD(extack, "CPU port failed to join LAG");
goto out_master_teardown;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index aab79c355224..6957971c2db2 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1117,6 +1117,40 @@ static void dsa_slave_net_selftest(struct net_device *ndev,
net_selftest(ndev, etest, buf);
}
+static int dsa_slave_get_mm(struct net_device *dev,
+ struct ethtool_mm_state *state)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->get_mm)
+ return -EOPNOTSUPP;
+
+ return ds->ops->get_mm(ds, dp->index, state);
+}
+
+static int dsa_slave_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->set_mm)
+ return -EOPNOTSUPP;
+
+ return ds->ops->set_mm(ds, dp->index, cfg, extack);
+}
+
+static void dsa_slave_get_mm_stats(struct net_device *dev,
+ struct ethtool_mm_stats *stats)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_mm_stats)
+ ds->ops->get_mm_stats(ds, dp->index, stats);
+}
+
static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -2205,6 +2239,9 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.set_rxnfc = dsa_slave_set_rxnfc,
.get_ts_info = dsa_slave_get_ts_info,
.self_test = dsa_slave_net_selftest,
+ .get_mm = dsa_slave_get_mm,
+ .set_mm = dsa_slave_set_mm,
+ .get_mm_stats = dsa_slave_get_mm_stats,
};
static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
@@ -2655,9 +2692,8 @@ static int dsa_slave_changeupper(struct net_device *dev,
if (!err)
dsa_bridge_mtu_normalization(dp);
if (err == -EOPNOTSUPP) {
- if (extack && !extack->_msg)
- NL_SET_ERR_MSG_MOD(extack,
- "Offloading not supported");
+ NL_SET_ERR_MSG_WEAK_MOD(extack,
+ "Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
@@ -2670,8 +2706,8 @@ static int dsa_slave_changeupper(struct net_device *dev,
err = dsa_port_lag_join(dp, info->upper_dev,
info->upper_info, extack);
if (err == -EOPNOTSUPP) {
- NL_SET_ERR_MSG_MOD(info->info.extack,
- "Offloading not supported");
+ NL_SET_ERR_MSG_WEAK_MOD(extack,
+ "Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
@@ -2683,8 +2719,8 @@ static int dsa_slave_changeupper(struct net_device *dev,
if (info->linking) {
err = dsa_port_hsr_join(dp, info->upper_dev);
if (err == -EOPNOTSUPP) {
- NL_SET_ERR_MSG_MOD(info->info.extack,
- "Offloading not supported");
+ NL_SET_ERR_MSG_WEAK_MOD(extack,
+ "Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 080e5c369f5b..0eb1c7784c3d 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -4,8 +4,10 @@
* Copyright (c) 2017 Microchip Technology
*/
+#include <linux/dsa/ksz_common.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
+#include <linux/ptp_classify.h>
#include <net/dsa.h>
#include "tag.h"
@@ -16,9 +18,71 @@
#define LAN937X_NAME "lan937x"
/* Typically only one byte is used for tail tag. */
+#define KSZ_PTP_TAG_LEN 4
#define KSZ_EGRESS_TAG_LEN 1
#define KSZ_INGRESS_TAG_LEN 1
+#define KSZ_HWTS_EN 0
+
+struct ksz_tagger_private {
+ struct ksz_tagger_data data; /* Must be first */
+ unsigned long state;
+ struct kthread_worker *xmit_worker;
+};
+
+static struct ksz_tagger_private *
+ksz_tagger_private(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
+static void ksz_hwtstamp_set_state(struct dsa_switch *ds, bool on)
+{
+ struct ksz_tagger_private *priv = ksz_tagger_private(ds);
+
+ if (on)
+ set_bit(KSZ_HWTS_EN, &priv->state);
+ else
+ clear_bit(KSZ_HWTS_EN, &priv->state);
+}
+
+static void ksz_disconnect(struct dsa_switch *ds)
+{
+ struct ksz_tagger_private *priv = ds->tagger_data;
+
+ kthread_destroy_worker(priv->xmit_worker);
+ kfree(priv);
+ ds->tagger_data = NULL;
+}
+
+static int ksz_connect(struct dsa_switch *ds)
+{
+ struct ksz_tagger_data *tagger_data;
+ struct kthread_worker *xmit_worker;
+ struct ksz_tagger_private *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ ds->dst->index, ds->index);
+ if (IS_ERR(xmit_worker)) {
+ ret = PTR_ERR(xmit_worker);
+ kfree(priv);
+ return ret;
+ }
+
+ priv->xmit_worker = xmit_worker;
+ /* Export functions for switch driver use */
+ tagger_data = &priv->data;
+ tagger_data->hwtstamp_set_state = ksz_hwtstamp_set_state;
+ ds->tagger_data = priv;
+
+ return 0;
+}
+
static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
struct net_device *dev,
unsigned int port, unsigned int len)
@@ -92,17 +156,20 @@ DSA_TAG_DRIVER(ksz8795_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME);
/*
- * For Ingress (Host -> KSZ9477), 2 bytes are added before FCS.
+ * For Ingress (Host -> KSZ9477), 2/6 bytes are added before FCS.
* ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes)
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|tag1(1byte)|
+ * FCS(4bytes)
* ---------------------------------------------------------------------------
+ * ts : time stamp (Present only if PTP is enabled in the Hardware)
* tag0 : Prioritization (not used now)
* tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5)
*
- * For Egress (KSZ9477 -> Host), 1 byte is added before FCS.
+ * For Egress (KSZ9477 -> Host), 1/5 bytes is added before FCS.
* ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
+ * ts : time stamp (Present only if bit 7 of tag0 is set)
* tag0 : zero-based value represents port
* (eg, 0x00=port1, 0x02=port3, 0x06=port7)
*/
@@ -111,12 +178,100 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME);
#define KSZ9477_PTP_TAG_LEN 4
#define KSZ9477_PTP_TAG_INDICATION 0x80
+#define KSZ9477_TAIL_TAG_PRIO GENMASK(8, 7)
#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
+static void ksz_rcv_timestamp(struct sk_buff *skb, u8 *tag)
+{
+ u8 *tstamp_raw = tag - KSZ_PTP_TAG_LEN;
+ ktime_t tstamp;
+
+ tstamp = ksz_decode_tstamp(get_unaligned_be32(tstamp_raw));
+ KSZ_SKB_CB(skb)->tstamp = tstamp;
+}
+
+/* Time stamp tag *needs* to be inserted if PTP is enabled in hardware.
+ * Regardless of Whether it is a PTP frame or not.
+ */
+static void ksz_xmit_timestamp(struct dsa_port *dp, struct sk_buff *skb)
+{
+ struct ksz_tagger_private *priv;
+ struct ptp_header *ptp_hdr;
+ unsigned int ptp_type;
+ u32 tstamp_raw = 0;
+ s64 correction;
+
+ priv = ksz_tagger_private(dp->ds);
+
+ if (!test_bit(KSZ_HWTS_EN, &priv->state))
+ return;
+
+ if (!KSZ_SKB_CB(skb)->update_correction)
+ goto output_tag;
+
+ ptp_type = KSZ_SKB_CB(skb)->ptp_type;
+
+ ptp_hdr = ptp_parse_header(skb, ptp_type);
+ if (!ptp_hdr)
+ goto output_tag;
+
+ correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
+
+ if (correction < 0) {
+ struct timespec64 ts;
+
+ ts = ns_to_timespec64(-correction >> 16);
+ tstamp_raw = ((ts.tv_sec & 3) << 30) | ts.tv_nsec;
+
+ /* Set correction field to 0 and update UDP checksum */
+ ptp_header_update_correction(skb, ptp_type, ptp_hdr, 0);
+ }
+
+output_tag:
+ put_unaligned_be32(tstamp_raw, skb_put(skb, KSZ_PTP_TAG_LEN));
+}
+
+/* Defer transmit if waiting for egress time stamp is required. */
+static struct sk_buff *ksz_defer_xmit(struct dsa_port *dp, struct sk_buff *skb)
+{
+ struct ksz_tagger_data *tagger_data = ksz_tagger_data(dp->ds);
+ struct ksz_tagger_private *priv = ksz_tagger_private(dp->ds);
+ void (*xmit_work_fn)(struct kthread_work *work);
+ struct sk_buff *clone = KSZ_SKB_CB(skb)->clone;
+ struct ksz_deferred_xmit_work *xmit_work;
+ struct kthread_worker *xmit_worker;
+
+ if (!clone)
+ return skb; /* no deferred xmit for this packet */
+
+ xmit_work_fn = tagger_data->xmit_work_fn;
+ xmit_worker = priv->xmit_worker;
+
+ if (!xmit_work_fn || !xmit_worker)
+ return NULL;
+
+ xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
+ if (!xmit_work)
+ return NULL;
+
+ kthread_init_work(&xmit_work->work, xmit_work_fn);
+ /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ * won't really free the packet.
+ */
+ xmit_work->dp = dp;
+ xmit_work->skb = skb_get(skb);
+
+ kthread_queue_work(xmit_worker, &xmit_work->work);
+
+ return NULL;
+}
+
static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u8 prio = netdev_txq_to_tc(dev, queue_mapping);
struct dsa_port *dp = dsa_slave_to_port(dev);
__be16 *tag;
u8 *addr;
@@ -126,17 +281,21 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
return NULL;
/* Tag encoding */
+ ksz_xmit_timestamp(dp, skb);
+
tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN);
addr = skb_mac_header(skb);
val = BIT(dp->index);
+ val |= FIELD_PREP(KSZ9477_TAIL_TAG_PRIO, prio);
+
if (is_link_local_ether_addr(addr))
val |= KSZ9477_TAIL_TAG_OVERRIDE;
*tag = cpu_to_be16(val);
- return skb;
+ return ksz_defer_xmit(dp, skb);
}
static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
@@ -147,8 +306,10 @@ static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
unsigned int len = KSZ_EGRESS_TAG_LEN;
/* Extra 4-bytes PTP timestamp */
- if (tag[0] & KSZ9477_PTP_TAG_INDICATION)
- len += KSZ9477_PTP_TAG_LEN;
+ if (tag[0] & KSZ9477_PTP_TAG_INDICATION) {
+ ksz_rcv_timestamp(skb, tag);
+ len += KSZ_PTP_TAG_LEN;
+ }
return ksz_common_rcv(skb, dev, port, len);
}
@@ -158,18 +319,23 @@ static const struct dsa_device_ops ksz9477_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ9477,
.xmit = ksz9477_xmit,
.rcv = ksz9477_rcv,
- .needed_tailroom = KSZ9477_INGRESS_TAG_LEN,
+ .connect = ksz_connect,
+ .disconnect = ksz_disconnect,
+ .needed_tailroom = KSZ9477_INGRESS_TAG_LEN + KSZ_PTP_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9477_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9477, KSZ9477_NAME);
+#define KSZ9893_TAIL_TAG_PRIO GENMASK(4, 3)
#define KSZ9893_TAIL_TAG_OVERRIDE BIT(5)
#define KSZ9893_TAIL_TAG_LOOKUP BIT(6)
static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u8 prio = netdev_txq_to_tc(dev, queue_mapping);
struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *addr;
u8 *tag;
@@ -178,15 +344,19 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
return NULL;
/* Tag encoding */
+ ksz_xmit_timestamp(dp, skb);
+
tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
addr = skb_mac_header(skb);
*tag = BIT(dp->index);
+ *tag |= FIELD_PREP(KSZ9893_TAIL_TAG_PRIO, prio);
+
if (is_link_local_ether_addr(addr))
*tag |= KSZ9893_TAIL_TAG_OVERRIDE;
- return skb;
+ return ksz_defer_xmit(dp, skb);
}
static const struct dsa_device_ops ksz9893_netdev_ops = {
@@ -194,23 +364,28 @@ static const struct dsa_device_ops ksz9893_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ9893,
.xmit = ksz9893_xmit,
.rcv = ksz9477_rcv,
- .needed_tailroom = KSZ_INGRESS_TAG_LEN,
+ .connect = ksz_connect,
+ .disconnect = ksz_disconnect,
+ .needed_tailroom = KSZ_INGRESS_TAG_LEN + KSZ_PTP_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9893_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9893, KSZ9893_NAME);
-/* For xmit, 2 bytes are added before FCS.
+/* For xmit, 2/6 bytes are added before FCS.
* ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes)
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|tag1(1byte)|
+ * FCS(4bytes)
* ---------------------------------------------------------------------------
+ * ts : time stamp (Present only if PTP is enabled in the Hardware)
* tag0 : represents tag override, lookup and valid
* tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x80=port8)
*
- * For rcv, 1 byte is added before FCS.
+ * For rcv, 1/5 bytes is added before FCS.
* ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
+ * ts : time stamp (Present only if bit 7 of tag0 is set)
* tag0 : zero-based value represents port
* (eg, 0x00=port1, 0x02=port3, 0x07=port8)
*/
@@ -219,11 +394,14 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9893, KSZ9893_NAME);
#define LAN937X_TAIL_TAG_BLOCKING_OVERRIDE BIT(11)
#define LAN937X_TAIL_TAG_LOOKUP BIT(12)
#define LAN937X_TAIL_TAG_VALID BIT(13)
+#define LAN937X_TAIL_TAG_PRIO GENMASK(10, 8)
#define LAN937X_TAIL_TAG_PORT_MASK 7
static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u8 prio = netdev_txq_to_tc(dev, queue_mapping);
struct dsa_port *dp = dsa_slave_to_port(dev);
const struct ethhdr *hdr = eth_hdr(skb);
__be16 *tag;
@@ -232,10 +410,14 @@ static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
+ ksz_xmit_timestamp(dp, skb);
+
tag = skb_put(skb, LAN937X_EGRESS_TAG_LEN);
val = BIT(dp->index);
+ val |= FIELD_PREP(LAN937X_TAIL_TAG_PRIO, prio);
+
if (is_link_local_ether_addr(hdr->h_dest))
val |= LAN937X_TAIL_TAG_BLOCKING_OVERRIDE;
@@ -244,7 +426,7 @@ static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
put_unaligned_be16(val, tag);
- return skb;
+ return ksz_defer_xmit(dp, skb);
}
static const struct dsa_device_ops lan937x_netdev_ops = {
@@ -252,7 +434,9 @@ static const struct dsa_device_ops lan937x_netdev_ops = {
.proto = DSA_TAG_PROTO_LAN937X,
.xmit = lan937x_xmit,
.rcv = ksz9477_rcv,
- .needed_tailroom = LAN937X_EGRESS_TAG_LEN,
+ .connect = ksz_connect,
+ .disconnect = ksz_disconnect,
+ .needed_tailroom = LAN937X_EGRESS_TAG_LEN + KSZ_PTP_TAG_LEN,
};
DSA_TAG_DRIVER(lan937x_netdev_ops);
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index 228f13df2e18..504f954a1b28 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -7,5 +7,5 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o
ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o rss.o \
linkstate.o debug.o wol.o features.o privflags.o rings.o \
channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
- tunnels.o fec.o eeprom.o stats.o phc_vclocks.o module.o \
- pse-pd.o
+ tunnels.o fec.o eeprom.o stats.o phc_vclocks.o mm.o \
+ module.o pse-pd.o plca.o mm.o
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index c7e37130647e..61c40e889a4d 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -86,18 +86,6 @@ static int channels_fill_reply(struct sk_buff *skb,
return 0;
}
-const struct ethnl_request_ops ethnl_channels_request_ops = {
- .request_cmd = ETHTOOL_MSG_CHANNELS_GET,
- .reply_cmd = ETHTOOL_MSG_CHANNELS_GET_REPLY,
- .hdr_attr = ETHTOOL_A_CHANNELS_HEADER,
- .req_info_size = sizeof(struct channels_req_info),
- .reply_data_size = sizeof(struct channels_reply_data),
-
- .prepare_data = channels_prepare_data,
- .reply_size = channels_reply_size,
- .fill_reply = channels_fill_reply,
-};
-
/* CHANNELS_SET */
const struct nla_policy ethnl_channels_set_policy[] = {
@@ -109,36 +97,28 @@ const struct nla_policy ethnl_channels_set_policy[] = {
[ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .type = NLA_U32 },
};
-int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_channels_validate(struct ethnl_req_info *req_info,
+ struct genl_info *info)
+{
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+
+ return ops->get_channels && ops->set_channels ? 1 : -EOPNOTSUPP;
+}
+
+static int
+ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info)
{
unsigned int from_channel, old_total, i;
bool mod = false, mod_combined = false;
+ struct net_device *dev = req_info->dev;
struct ethtool_channels channels = {};
- struct ethnl_req_info req_info = {};
struct nlattr **tb = info->attrs;
u32 err_attr, max_rxfh_in_use;
- const struct ethtool_ops *ops;
- struct net_device *dev;
u64 max_rxnfc_in_use;
int ret;
- ret = ethnl_parse_header_dev_get(&req_info,
- tb[ETHTOOL_A_CHANNELS_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
- dev = req_info.dev;
- ops = dev->ethtool_ops;
- ret = -EOPNOTSUPP;
- if (!ops->get_channels || !ops->set_channels)
- goto out_dev;
-
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
- ops->get_channels(dev, &channels);
+ dev->ethtool_ops->get_channels(dev, &channels);
old_total = channels.combined_count +
max(channels.rx_count, channels.tx_count);
@@ -151,9 +131,8 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
ethnl_update_u32(&channels.combined_count,
tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT], &mod_combined);
mod |= mod_combined;
- ret = 0;
if (!mod)
- goto out_ops;
+ return 0;
/* ensure new channel counts are within limits */
if (channels.rx_count > channels.max_rx)
@@ -167,10 +146,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
else
err_attr = 0;
if (err_attr) {
- ret = -EINVAL;
NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr],
"requested channel count exceeds maximum");
- goto out_ops;
+ return -EINVAL;
}
/* ensure there is at least one RX and one TX channel */
@@ -183,10 +161,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
if (err_attr) {
if (mod_combined)
err_attr = ETHTOOL_A_CHANNELS_COMBINED_COUNT;
- ret = -EINVAL;
NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr],
"requested channel counts would result in no RX or TX channel being configured");
- goto out_ops;
+ return -EINVAL;
}
/* ensure the new Rx count fits within the configured Rx flow
@@ -198,14 +175,12 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use))
max_rxfh_in_use = 0;
if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) {
- ret = -EINVAL;
GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
- goto out_ops;
+ return -EINVAL;
}
if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) {
- ret = -EINVAL;
GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing ntuple filter settings");
- goto out_ops;
+ return -EINVAL;
}
/* Disabling channels, query zero-copy AF_XDP sockets */
@@ -213,21 +188,26 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
min(channels.rx_count, channels.tx_count);
for (i = from_channel; i < old_total; i++)
if (xsk_get_pool_from_qid(dev, i)) {
- ret = -EINVAL;
GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
- goto out_ops;
+ return -EINVAL;
}
ret = dev->ethtool_ops->set_channels(dev, &channels);
- if (ret < 0)
- goto out_ops;
- ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL);
-
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return ret < 0 ? ret : 1;
}
+
+const struct ethnl_request_ops ethnl_channels_request_ops = {
+ .request_cmd = ETHTOOL_MSG_CHANNELS_GET,
+ .reply_cmd = ETHTOOL_MSG_CHANNELS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_CHANNELS_HEADER,
+ .req_info_size = sizeof(struct channels_req_info),
+ .reply_data_size = sizeof(struct channels_reply_data),
+
+ .prepare_data = channels_prepare_data,
+ .reply_size = channels_reply_size,
+ .fill_reply = channels_fill_reply,
+
+ .set_validate = ethnl_set_channels_validate,
+ .set = ethnl_set_channels,
+ .set_ntf_cmd = ETHTOOL_MSG_CHANNELS_NTF,
+};
diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c
index 487bdf345541..443e7e642c96 100644
--- a/net/ethtool/coalesce.c
+++ b/net/ethtool/coalesce.c
@@ -105,7 +105,10 @@ static int coalesce_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _TX_MAX_FRAMES_HIGH */
nla_total_size(sizeof(u32)) + /* _RATE_SAMPLE_INTERVAL */
nla_total_size(sizeof(u8)) + /* _USE_CQE_MODE_TX */
- nla_total_size(sizeof(u8)); /* _USE_CQE_MODE_RX */
+ nla_total_size(sizeof(u8)) + /* _USE_CQE_MODE_RX */
+ nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_BYTES */
+ nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_FRAMES */
+ nla_total_size(sizeof(u32)); /* _TX_AGGR_TIME_USECS */
}
static bool coalesce_put_u32(struct sk_buff *skb, u16 attr_type, u32 val,
@@ -180,24 +183,18 @@ static int coalesce_fill_reply(struct sk_buff *skb,
coalesce_put_bool(skb, ETHTOOL_A_COALESCE_USE_CQE_MODE_TX,
kcoal->use_cqe_mode_tx, supported) ||
coalesce_put_bool(skb, ETHTOOL_A_COALESCE_USE_CQE_MODE_RX,
- kcoal->use_cqe_mode_rx, supported))
+ kcoal->use_cqe_mode_rx, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES,
+ kcoal->tx_aggr_max_bytes, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES,
+ kcoal->tx_aggr_max_frames, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS,
+ kcoal->tx_aggr_time_usecs, supported))
return -EMSGSIZE;
return 0;
}
-const struct ethnl_request_ops ethnl_coalesce_request_ops = {
- .request_cmd = ETHTOOL_MSG_COALESCE_GET,
- .reply_cmd = ETHTOOL_MSG_COALESCE_GET_REPLY,
- .hdr_attr = ETHTOOL_A_COALESCE_HEADER,
- .req_info_size = sizeof(struct coalesce_req_info),
- .reply_data_size = sizeof(struct coalesce_reply_data),
-
- .prepare_data = coalesce_prepare_data,
- .reply_size = coalesce_reply_size,
- .fill_reply = coalesce_fill_reply,
-};
-
/* COALESCE_SET */
const struct nla_policy ethnl_coalesce_set_policy[] = {
@@ -227,51 +224,49 @@ const struct nla_policy ethnl_coalesce_set_policy[] = {
[ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL] = { .type = NLA_U32 },
[ETHTOOL_A_COALESCE_USE_CQE_MODE_TX] = NLA_POLICY_MAX(NLA_U8, 1),
[ETHTOOL_A_COALESCE_USE_CQE_MODE_RX] = NLA_POLICY_MAX(NLA_U8, 1),
+ [ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS] = { .type = NLA_U32 },
};
-int ethnl_set_coalesce(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_coalesce_validate(struct ethnl_req_info *req_info,
+ struct genl_info *info)
{
- struct kernel_ethtool_coalesce kernel_coalesce = {};
- struct ethtool_coalesce coalesce = {};
- struct ethnl_req_info req_info = {};
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
struct nlattr **tb = info->attrs;
- const struct ethtool_ops *ops;
- struct net_device *dev;
u32 supported_params;
- bool mod = false;
- int ret;
u16 a;
- ret = ethnl_parse_header_dev_get(&req_info,
- tb[ETHTOOL_A_COALESCE_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
- dev = req_info.dev;
- ops = dev->ethtool_ops;
- ret = -EOPNOTSUPP;
if (!ops->get_coalesce || !ops->set_coalesce)
- goto out_dev;
+ return -EOPNOTSUPP;
/* make sure that only supported parameters are present */
supported_params = ops->supported_coalesce_params;
for (a = ETHTOOL_A_COALESCE_RX_USECS; a < __ETHTOOL_A_COALESCE_CNT; a++)
if (tb[a] && !(supported_params & attr_to_mask(a))) {
- ret = -EINVAL;
NL_SET_ERR_MSG_ATTR(info->extack, tb[a],
"cannot modify an unsupported parameter");
- goto out_dev;
+ return -EINVAL;
}
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
- ret = ops->get_coalesce(dev, &coalesce, &kernel_coalesce,
- info->extack);
+ return 1;
+}
+
+static int
+ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ struct kernel_ethtool_coalesce kernel_coalesce = {};
+ struct net_device *dev = req_info->dev;
+ struct ethtool_coalesce coalesce = {};
+ struct nlattr **tb = info->attrs;
+ bool mod = false;
+ int ret;
+
+ ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce,
+ info->extack);
if (ret < 0)
- goto out_ops;
+ return ret;
ethnl_update_u32(&coalesce.rx_coalesce_usecs,
tb[ETHTOOL_A_COALESCE_RX_USECS], &mod);
@@ -321,21 +316,32 @@ int ethnl_set_coalesce(struct sk_buff *skb, struct genl_info *info)
tb[ETHTOOL_A_COALESCE_USE_CQE_MODE_TX], &mod);
ethnl_update_u8(&kernel_coalesce.use_cqe_mode_rx,
tb[ETHTOOL_A_COALESCE_USE_CQE_MODE_RX], &mod);
- ret = 0;
+ ethnl_update_u32(&kernel_coalesce.tx_aggr_max_bytes,
+ tb[ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES], &mod);
+ ethnl_update_u32(&kernel_coalesce.tx_aggr_max_frames,
+ tb[ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES], &mod);
+ ethnl_update_u32(&kernel_coalesce.tx_aggr_time_usecs,
+ tb[ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS], &mod);
if (!mod)
- goto out_ops;
+ return 0;
ret = dev->ethtool_ops->set_coalesce(dev, &coalesce, &kernel_coalesce,
info->extack);
- if (ret < 0)
- goto out_ops;
- ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF, NULL);
-
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return ret < 0 ? ret : 1;
}
+
+const struct ethnl_request_ops ethnl_coalesce_request_ops = {
+ .request_cmd = ETHTOOL_MSG_COALESCE_GET,
+ .reply_cmd = ETHTOOL_MSG_COALESCE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_COALESCE_HEADER,
+ .req_info_size = sizeof(struct coalesce_req_info),
+ .reply_data_size = sizeof(struct coalesce_reply_data),
+
+ .prepare_data = coalesce_prepare_data,
+ .reply_size = coalesce_reply_size,
+ .fill_reply = coalesce_fill_reply,
+
+ .set_validate = ethnl_set_coalesce_validate,
+ .set = ethnl_set_coalesce,
+ .set_ntf_cmd = ETHTOOL_MSG_COALESCE_NTF,
+};
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 6f399afc2ff2..5fb19050991e 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -208,6 +208,9 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
__DEFINE_LINK_MODE_NAME(800000, DR8_2, Full),
__DEFINE_LINK_MODE_NAME(800000, SR8, Full),
__DEFINE_LINK_MODE_NAME(800000, VR8, Full),
+ __DEFINE_LINK_MODE_NAME(10, T1S, Full),
+ __DEFINE_LINK_MODE_NAME(10, T1S, Half),
+ __DEFINE_LINK_MODE_NAME(10, T1S_P2MP, Half),
};
static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -244,6 +247,8 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
#define __LINK_MODE_LANES_X 1
#define __LINK_MODE_LANES_FX 1
#define __LINK_MODE_LANES_T1L 1
+#define __LINK_MODE_LANES_T1S 1
+#define __LINK_MODE_LANES_T1S_P2MP 1
#define __LINK_MODE_LANES_VR8 8
#define __LINK_MODE_LANES_DR8_2 8
@@ -366,6 +371,9 @@ const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(800000, DR8_2, Full),
__DEFINE_LINK_MODE_PARAMS(800000, SR8, Full),
__DEFINE_LINK_MODE_PARAMS(800000, VR8, Full),
+ __DEFINE_LINK_MODE_PARAMS(10, T1S, Full),
+ __DEFINE_LINK_MODE_PARAMS(10, T1S, Half),
+ __DEFINE_LINK_MODE_PARAMS(10, T1S_P2MP, Half),
};
static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index b1b9db810eca..28b8aaaf9bcb 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -54,4 +54,6 @@ int ethtool_get_module_info_call(struct net_device *dev,
int ethtool_get_module_eeprom_call(struct net_device *dev,
struct ethtool_eeprom *ee, u8 *data);
+bool __ethtool_dev_mm_supported(struct net_device *dev);
+
#endif /* _ETHTOOL_COMMON_H */
diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c
index d73888c7d19c..e4369769817e 100644
--- a/net/ethtool/debug.c
+++ b/net/ethtool/debug.c
@@ -63,18 +63,6 @@ static int debug_fill_reply(struct sk_buff *skb,
netif_msg_class_names, compact);
}
-const struct ethnl_request_ops ethnl_debug_request_ops = {
- .request_cmd = ETHTOOL_MSG_DEBUG_GET,
- .reply_cmd = ETHTOOL_MSG_DEBUG_GET_REPLY,
- .hdr_attr = ETHTOOL_A_DEBUG_HEADER,
- .req_info_size = sizeof(struct debug_req_info),
- .reply_data_size = sizeof(struct debug_reply_data),
-
- .prepare_data = debug_prepare_data,
- .reply_size = debug_reply_size,
- .fill_reply = debug_fill_reply,
-};
-
/* DEBUG_SET */
const struct nla_policy ethnl_debug_set_policy[] = {
@@ -83,46 +71,47 @@ const struct nla_policy ethnl_debug_set_policy[] = {
[ETHTOOL_A_DEBUG_MSGMASK] = { .type = NLA_NESTED },
};
-int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_debug_validate(struct ethnl_req_info *req_info,
+ struct genl_info *info)
{
- struct ethnl_req_info req_info = {};
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+
+ return ops->get_msglevel && ops->set_msglevel ? 1 : -EOPNOTSUPP;
+}
+
+static int
+ethnl_set_debug(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- struct net_device *dev;
bool mod = false;
u32 msg_mask;
int ret;
- ret = ethnl_parse_header_dev_get(&req_info,
- tb[ETHTOOL_A_DEBUG_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
- dev = req_info.dev;
- ret = -EOPNOTSUPP;
- if (!dev->ethtool_ops->get_msglevel || !dev->ethtool_ops->set_msglevel)
- goto out_dev;
-
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
-
msg_mask = dev->ethtool_ops->get_msglevel(dev);
ret = ethnl_update_bitset32(&msg_mask, NETIF_MSG_CLASS_COUNT,
tb[ETHTOOL_A_DEBUG_MSGMASK],
netif_msg_class_names, info->extack, &mod);
if (ret < 0 || !mod)
- goto out_ops;
+ return ret;
dev->ethtool_ops->set_msglevel(dev, msg_mask);
- ethtool_notify(dev, ETHTOOL_MSG_DEBUG_NTF, NULL);
-
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return 1;
}
+
+const struct ethnl_request_ops ethnl_debug_request_ops = {
+ .request_cmd = ETHTOOL_MSG_DEBUG_GET,
+ .reply_cmd = ETHTOOL_MSG_DEBUG_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_DEBUG_HEADER,
+ .req_info_size = sizeof(struct debug_req_info),
+ .reply_data_size = sizeof(struct debug_reply_data),
+
+ .prepare_data = debug_prepare_data,
+ .reply_size = debug_reply_size,
+ .fill_reply = debug_fill_reply,
+
+ .set_validate = ethnl_set_debug_validate,
+ .set = ethnl_set_debug,
+ .set_ntf_cmd = ETHTOOL_MSG_DEBUG_NTF,
+};
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
index 45c42b2d5f17..42104bcb0e47 100644
--- a/net/ethtool/eee.c
+++ b/net/ethtool/eee.c
@@ -108,18 +108,6 @@ static int eee_fill_reply(struct sk_buff *skb,
return 0;
}
-const struct ethnl_request_ops ethnl_eee_request_ops = {
- .request_cmd = ETHTOOL_MSG_EEE_GET,
- .reply_cmd = ETHTOOL_MSG_EEE_GET_REPLY,
- .hdr_attr = ETHTOOL_A_EEE_HEADER,
- .req_info_size = sizeof(struct eee_req_info),
- .reply_data_size = sizeof(struct eee_reply_data),
-
- .prepare_data = eee_prepare_data,
- .reply_size = eee_reply_size,
- .fill_reply = eee_fill_reply,
-};
-
/* EEE_SET */
const struct nla_policy ethnl_eee_set_policy[] = {
@@ -131,60 +119,56 @@ const struct nla_policy ethnl_eee_set_policy[] = {
[ETHTOOL_A_EEE_TX_LPI_TIMER] = { .type = NLA_U32 },
};
-int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_eee_validate(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct ethnl_req_info req_info = {};
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+
+ return ops->get_eee && ops->set_eee ? 1 : -EOPNOTSUPP;
+}
+
+static int
+ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- const struct ethtool_ops *ops;
struct ethtool_eee eee = {};
- struct net_device *dev;
bool mod = false;
int ret;
- ret = ethnl_parse_header_dev_get(&req_info,
- tb[ETHTOOL_A_EEE_HEADER],
- genl_info_net(info), info->extack,
- true);
+ ret = dev->ethtool_ops->get_eee(dev, &eee);
if (ret < 0)
return ret;
- dev = req_info.dev;
- ops = dev->ethtool_ops;
- ret = -EOPNOTSUPP;
- if (!ops->get_eee || !ops->set_eee)
- goto out_dev;
-
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
- ret = ops->get_eee(dev, &eee);
- if (ret < 0)
- goto out_ops;
ret = ethnl_update_bitset32(&eee.advertised, EEE_MODES_COUNT,
tb[ETHTOOL_A_EEE_MODES_OURS],
link_mode_names, info->extack, &mod);
if (ret < 0)
- goto out_ops;
+ return ret;
ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
ethnl_update_bool32(&eee.tx_lpi_enabled,
tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
&mod);
- ret = 0;
if (!mod)
- goto out_ops;
+ return 0;
ret = dev->ethtool_ops->set_eee(dev, &eee);
- if (ret < 0)
- goto out_ops;
- ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
-
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return ret < 0 ? ret : 1;
}
+
+const struct ethnl_request_ops ethnl_eee_request_ops = {
+ .request_cmd = ETHTOOL_MSG_EEE_GET,
+ .reply_cmd = ETHTOOL_MSG_EEE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_EEE_HEADER,
+ .req_info_size = sizeof(struct eee_req_info),
+ .reply_data_size = sizeof(struct eee_reply_data),
+
+ .prepare_data = eee_prepare_data,
+ .reply_size = eee_reply_size,
+ .fill_reply = eee_fill_reply,
+
+ .set_validate = ethnl_set_eee_validate,
+ .set = ethnl_set_eee,
+ .set_ntf_cmd = ETHTOOL_MSG_EEE_NTF,
+};
diff --git a/net/ethtool/fec.c b/net/ethtool/fec.c
index 9f5a134e2e01..0d9a3d153170 100644
--- a/net/ethtool/fec.c
+++ b/net/ethtool/fec.c
@@ -217,18 +217,6 @@ static int fec_fill_reply(struct sk_buff *skb,
return 0;
}
-const struct ethnl_request_ops ethnl_fec_request_ops = {
- .request_cmd = ETHTOOL_MSG_FEC_GET,
- .reply_cmd = ETHTOOL_MSG_FEC_GET_REPLY,
- .hdr_attr = ETHTOOL_A_FEC_HEADER,
- .req_info_size = sizeof(struct fec_req_info),
- .reply_data_size = sizeof(struct fec_reply_data),
-
- .prepare_data = fec_prepare_data,
- .reply_size = fec_reply_size,
- .fill_reply = fec_fill_reply,
-};
-
/* FEC_SET */
const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1] = {
@@ -237,36 +225,28 @@ const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1] = {
[ETHTOOL_A_FEC_AUTO] = NLA_POLICY_MAX(NLA_U8, 1),
};
-int ethnl_set_fec(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_fec_validate(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+
+ return ops->get_fecparam && ops->set_fecparam ? 1 : -EOPNOTSUPP;
+}
+
+static int
+ethnl_set_fec(struct ethnl_req_info *req_info, struct genl_info *info)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(fec_link_modes) = {};
- struct ethnl_req_info req_info = {};
+ struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
struct ethtool_fecparam fec = {};
- const struct ethtool_ops *ops;
- struct net_device *dev;
bool mod = false;
u8 fec_auto;
int ret;
- ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_FEC_HEADER],
- genl_info_net(info), info->extack,
- true);
+ ret = dev->ethtool_ops->get_fecparam(dev, &fec);
if (ret < 0)
return ret;
- dev = req_info.dev;
- ops = dev->ethtool_ops;
- ret = -EOPNOTSUPP;
- if (!ops->get_fecparam || !ops->set_fecparam)
- goto out_dev;
-
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
- ret = ops->get_fecparam(dev, &fec);
- if (ret < 0)
- goto out_ops;
ethtool_fec_to_link_modes(fec.fec, fec_link_modes, &fec_auto);
@@ -275,36 +255,39 @@ int ethnl_set_fec(struct sk_buff *skb, struct genl_info *info)
tb[ETHTOOL_A_FEC_MODES],
link_mode_names, info->extack, &mod);
if (ret < 0)
- goto out_ops;
+ return ret;
ethnl_update_u8(&fec_auto, tb[ETHTOOL_A_FEC_AUTO], &mod);
-
- ret = 0;
if (!mod)
- goto out_ops;
+ return 0;
ret = ethtool_link_modes_to_fecparam(&fec, fec_link_modes, fec_auto);
if (ret) {
NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_FEC_MODES],
"invalid FEC modes requested");
- goto out_ops;
+ return ret;
}
if (!fec.fec) {
- ret = -EINVAL;
NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_FEC_MODES],
"no FEC modes set");
- goto out_ops;
+ return -EINVAL;
}
ret = dev->ethtool_ops->set_fecparam(dev, &fec);
- if (ret < 0)
- goto out_ops;
- ethtool_notify(dev, ETHTOOL_MSG_FEC_NTF, NULL);
-
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return ret < 0 ? ret : 1;
}
+
+const struct ethnl_request_ops ethnl_fec_request_ops = {
+ .request_cmd = ETHTOOL_MSG_FEC_GET,
+ .reply_cmd = ETHTOOL_MSG_FEC_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_FEC_HEADER,
+ .req_info_size = sizeof(struct fec_req_info),
+ .reply_data_size = sizeof(struct fec_reply_data),
+
+ .prepare_data = fec_prepare_data,
+ .reply_size = fec_reply_size,
+ .fill_reply = fec_fill_reply,
+
+ .set_validate = ethnl_set_fec_validate,
+ .set = ethnl_set_fec,
+ .set_ntf_cmd = ETHTOOL_MSG_FEC_NTF,
+};
diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c
index efa0f7f48836..310dfe63292a 100644
--- a/net/ethtool/linkinfo.c
+++ b/net/ethtool/linkinfo.c
@@ -73,18 +73,6 @@ static int linkinfo_fill_reply(struct sk_buff *skb,
return 0;
}
-const struct ethnl_request_ops ethnl_linkinfo_request_ops = {
- .request_cmd = ETHTOOL_MSG_LINKINFO_GET,
- .reply_cmd = ETHTOOL_MSG_LINKINFO_GET_REPLY,
- .hdr_attr = ETHTOOL_A_LINKINFO_HEADER,
- .req_info_size = sizeof(struct linkinfo_req_info),
- .reply_data_size = sizeof(struct linkinfo_reply_data),
-
- .prepare_data = linkinfo_prepare_data,
- .reply_size = linkinfo_reply_size,
- .fill_reply = linkinfo_fill_reply,
-};
-
/* LINKINFO_SET */
const struct nla_policy ethnl_linkinfo_set_policy[] = {
@@ -95,37 +83,31 @@ const struct nla_policy ethnl_linkinfo_set_policy[] = {
[ETHTOOL_A_LINKINFO_TP_MDIX_CTRL] = { .type = NLA_U8 },
};
-int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_linkinfo_validate(struct ethnl_req_info *req_info,
+ struct genl_info *info)
+{
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+
+ if (!ops->get_link_ksettings || !ops->set_link_ksettings)
+ return -EOPNOTSUPP;
+ return 1;
+}
+
+static int
+ethnl_set_linkinfo(struct ethnl_req_info *req_info, struct genl_info *info)
{
struct ethtool_link_ksettings ksettings = {};
struct ethtool_link_settings *lsettings;
- struct ethnl_req_info req_info = {};
+ struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- struct net_device *dev;
bool mod = false;
int ret;
- ret = ethnl_parse_header_dev_get(&req_info,
- tb[ETHTOOL_A_LINKINFO_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
- dev = req_info.dev;
- ret = -EOPNOTSUPP;
- if (!dev->ethtool_ops->get_link_ksettings ||
- !dev->ethtool_ops->set_link_ksettings)
- goto out_dev;
-
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
-
ret = __ethtool_get_link_ksettings(dev, &ksettings);
if (ret < 0) {
GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
- goto out_ops;
+ return ret;
}
lsettings = &ksettings.base;
@@ -134,21 +116,30 @@ int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info)
&mod);
ethnl_update_u8(&lsettings->eth_tp_mdix_ctrl,
tb[ETHTOOL_A_LINKINFO_TP_MDIX_CTRL], &mod);
- ret = 0;
if (!mod)
- goto out_ops;
+ return 0;
ret = dev->ethtool_ops->set_link_ksettings(dev, &ksettings);
- if (ret < 0)
+ if (ret < 0) {
GENL_SET_ERR_MSG(info, "link settings update failed");
- else
- ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL);
+ return ret;
+ }
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return 1;
}
+
+const struct ethnl_request_ops ethnl_linkinfo_request_ops = {
+ .request_cmd = ETHTOOL_MSG_LINKINFO_GET,
+ .reply_cmd = ETHTOOL_MSG_LINKINFO_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_LINKINFO_HEADER,
+ .req_info_size = sizeof(struct linkinfo_req_info),
+ .reply_data_size = sizeof(struct linkinfo_reply_data),
+
+ .prepare_data = linkinfo_prepare_data,
+ .reply_size = linkinfo_reply_size,
+ .fill_reply = linkinfo_fill_reply,
+
+ .set_validate = ethnl_set_linkinfo_validate,
+ .set = ethnl_set_linkinfo,
+ .set_ntf_cmd = ETHTOOL_MSG_LINKINFO_NTF,
+};
diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
index 126e06c713a3..fab66c169b9f 100644
--- a/net/ethtool/linkmodes.c
+++ b/net/ethtool/linkmodes.c
@@ -151,18 +151,6 @@ static int linkmodes_fill_reply(struct sk_buff *skb,
return 0;
}
-const struct ethnl_request_ops ethnl_linkmodes_request_ops = {
- .request_cmd = ETHTOOL_MSG_LINKMODES_GET,
- .reply_cmd = ETHTOOL_MSG_LINKMODES_GET_REPLY,
- .hdr_attr = ETHTOOL_A_LINKMODES_HEADER,
- .req_info_size = sizeof(struct linkmodes_req_info),
- .reply_data_size = sizeof(struct linkmodes_reply_data),
-
- .prepare_data = linkmodes_prepare_data,
- .reply_size = linkmodes_reply_size,
- .fill_reply = linkmodes_fill_reply,
-};
-
/* LINKMODES_SET */
const struct nla_policy ethnl_linkmodes_set_policy[] = {
@@ -310,59 +298,64 @@ static int ethnl_update_linkmodes(struct genl_info *info, struct nlattr **tb,
return 0;
}
-int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_linkmodes_validate(struct ethnl_req_info *req_info,
+ struct genl_info *info)
{
- struct ethtool_link_ksettings ksettings = {};
- struct ethnl_req_info req_info = {};
- struct nlattr **tb = info->attrs;
- struct net_device *dev;
- bool mod = false;
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
int ret;
- ret = ethnl_check_linkmodes(info, tb);
+ ret = ethnl_check_linkmodes(info, info->attrs);
if (ret < 0)
return ret;
- ret = ethnl_parse_header_dev_get(&req_info,
- tb[ETHTOOL_A_LINKMODES_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
- dev = req_info.dev;
- ret = -EOPNOTSUPP;
- if (!dev->ethtool_ops->get_link_ksettings ||
- !dev->ethtool_ops->set_link_ksettings)
- goto out_dev;
+ if (!ops->get_link_ksettings || !ops->set_link_ksettings)
+ return -EOPNOTSUPP;
+ return 1;
+}
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
+static int
+ethnl_set_linkmodes(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ struct ethtool_link_ksettings ksettings = {};
+ struct net_device *dev = req_info->dev;
+ struct nlattr **tb = info->attrs;
+ bool mod = false;
+ int ret;
ret = __ethtool_get_link_ksettings(dev, &ksettings);
if (ret < 0) {
GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
- goto out_ops;
+ return ret;
}
ret = ethnl_update_linkmodes(info, tb, &ksettings, &mod, dev);
if (ret < 0)
- goto out_ops;
+ return ret;
+ if (!mod)
+ return 0;
- if (mod) {
- ret = dev->ethtool_ops->set_link_ksettings(dev, &ksettings);
- if (ret < 0)
- GENL_SET_ERR_MSG(info, "link settings update failed");
- else
- ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL);
+ ret = dev->ethtool_ops->set_link_ksettings(dev, &ksettings);
+ if (ret < 0) {
+ GENL_SET_ERR_MSG(info, "link settings update failed");
+ return ret;
}
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return 1;
}
+
+const struct ethnl_request_ops ethnl_linkmodes_request_ops = {
+ .request_cmd = ETHTOOL_MSG_LINKMODES_GET,
+ .reply_cmd = ETHTOOL_MSG_LINKMODES_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_LINKMODES_HEADER,
+ .req_info_size = sizeof(struct linkmodes_req_info),
+ .reply_data_size = sizeof(struct linkmodes_reply_data),
+
+ .prepare_data = linkmodes_prepare_data,
+ .reply_size = linkmodes_reply_size,
+ .fill_reply = linkmodes_fill_reply,
+
+ .set_validate = ethnl_set_linkmodes_validate,
+ .set = ethnl_set_linkmodes,
+ .set_ntf_cmd = ETHTOOL_MSG_LINKMODES_NTF,
+};
diff --git a/net/ethtool/mm.c b/net/ethtool/mm.c
new file mode 100644
index 000000000000..fce3cc2734f9
--- /dev/null
+++ b/net/ethtool/mm.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022-2023 NXP
+ */
+#include "common.h"
+#include "netlink.h"
+
+struct mm_req_info {
+ struct ethnl_req_info base;
+};
+
+struct mm_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_mm_state state;
+ struct ethtool_mm_stats stats;
+};
+
+#define MM_REPDATA(__reply_base) \
+ container_of(__reply_base, struct mm_reply_data, base)
+
+#define ETHTOOL_MM_STAT_CNT \
+ (__ETHTOOL_A_MM_STAT_CNT - (ETHTOOL_A_MM_STAT_PAD + 1))
+
+const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1] = {
+ [ETHTOOL_A_MM_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_stats),
+};
+
+static int mm_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct mm_reply_data *data = MM_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ const struct ethtool_ops *ops;
+ int ret;
+
+ ops = dev->ethtool_ops;
+
+ if (!ops->get_mm)
+ return -EOPNOTSUPP;
+
+ ethtool_stats_init((u64 *)&data->stats,
+ sizeof(data->stats) / sizeof(u64));
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = ops->get_mm(dev, &data->state);
+ if (ret)
+ goto out_complete;
+
+ if (ops->get_mm_stats && (req_base->flags & ETHTOOL_FLAG_STATS))
+ ops->get_mm_stats(dev, &data->stats);
+
+out_complete:
+ ethnl_ops_complete(dev);
+
+ return ret;
+}
+
+static int mm_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ int len = 0;
+
+ len += nla_total_size(sizeof(u8)); /* _MM_PMAC_ENABLED */
+ len += nla_total_size(sizeof(u8)); /* _MM_TX_ENABLED */
+ len += nla_total_size(sizeof(u8)); /* _MM_TX_ACTIVE */
+ len += nla_total_size(sizeof(u8)); /* _MM_VERIFY_ENABLED */
+ len += nla_total_size(sizeof(u8)); /* _MM_VERIFY_STATUS */
+ len += nla_total_size(sizeof(u32)); /* _MM_VERIFY_TIME */
+ len += nla_total_size(sizeof(u32)); /* _MM_MAX_VERIFY_TIME */
+ len += nla_total_size(sizeof(u32)); /* _MM_TX_MIN_FRAG_SIZE */
+ len += nla_total_size(sizeof(u32)); /* _MM_RX_MIN_FRAG_SIZE */
+
+ if (req_base->flags & ETHTOOL_FLAG_STATS)
+ len += nla_total_size(0) + /* _MM_STATS */
+ nla_total_size_64bit(sizeof(u64)) * ETHTOOL_MM_STAT_CNT;
+
+ return len;
+}
+
+static int mm_put_stat(struct sk_buff *skb, u64 val, u16 attrtype)
+{
+ if (val == ETHTOOL_STAT_NOT_SET)
+ return 0;
+ if (nla_put_u64_64bit(skb, attrtype, val, ETHTOOL_A_MM_STAT_PAD))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int mm_put_stats(struct sk_buff *skb,
+ const struct ethtool_mm_stats *stats)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_MM_STATS);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (mm_put_stat(skb, stats->MACMergeFrameAssErrorCount,
+ ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS) ||
+ mm_put_stat(skb, stats->MACMergeFrameSmdErrorCount,
+ ETHTOOL_A_MM_STAT_SMD_ERRORS) ||
+ mm_put_stat(skb, stats->MACMergeFrameAssOkCount,
+ ETHTOOL_A_MM_STAT_REASSEMBLY_OK) ||
+ mm_put_stat(skb, stats->MACMergeFragCountRx,
+ ETHTOOL_A_MM_STAT_RX_FRAG_COUNT) ||
+ mm_put_stat(skb, stats->MACMergeFragCountTx,
+ ETHTOOL_A_MM_STAT_TX_FRAG_COUNT) ||
+ mm_put_stat(skb, stats->MACMergeHoldCount,
+ ETHTOOL_A_MM_STAT_HOLD_COUNT))
+ goto err_cancel;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int mm_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct mm_reply_data *data = MM_REPDATA(reply_base);
+ const struct ethtool_mm_state *state = &data->state;
+
+ if (nla_put_u8(skb, ETHTOOL_A_MM_TX_ENABLED, state->tx_enabled) ||
+ nla_put_u8(skb, ETHTOOL_A_MM_TX_ACTIVE, state->tx_active) ||
+ nla_put_u8(skb, ETHTOOL_A_MM_PMAC_ENABLED, state->pmac_enabled) ||
+ nla_put_u8(skb, ETHTOOL_A_MM_VERIFY_ENABLED, state->verify_enabled) ||
+ nla_put_u8(skb, ETHTOOL_A_MM_VERIFY_STATUS, state->verify_status) ||
+ nla_put_u32(skb, ETHTOOL_A_MM_VERIFY_TIME, state->verify_time) ||
+ nla_put_u32(skb, ETHTOOL_A_MM_MAX_VERIFY_TIME, state->max_verify_time) ||
+ nla_put_u32(skb, ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, state->tx_min_frag_size) ||
+ nla_put_u32(skb, ETHTOOL_A_MM_RX_MIN_FRAG_SIZE, state->rx_min_frag_size))
+ return -EMSGSIZE;
+
+ if (req_base->flags & ETHTOOL_FLAG_STATS &&
+ mm_put_stats(skb, &data->stats))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct nla_policy ethnl_mm_set_policy[ETHTOOL_A_MM_MAX + 1] = {
+ [ETHTOOL_A_MM_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_MM_VERIFY_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1),
+ [ETHTOOL_A_MM_VERIFY_TIME] = NLA_POLICY_RANGE(NLA_U32, 1, 128),
+ [ETHTOOL_A_MM_TX_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1),
+ [ETHTOOL_A_MM_PMAC_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1),
+ [ETHTOOL_A_MM_TX_MIN_FRAG_SIZE] = NLA_POLICY_RANGE(NLA_U32, 60, 252),
+};
+
+static void mm_state_to_cfg(const struct ethtool_mm_state *state,
+ struct ethtool_mm_cfg *cfg)
+{
+ /* We could also compare state->verify_status against
+ * ETHTOOL_MM_VERIFY_STATUS_DISABLED, but state->verify_enabled
+ * is more like an administrative state which should be seen in
+ * ETHTOOL_MSG_MM_GET replies. For example, a port with verification
+ * disabled might be in the ETHTOOL_MM_VERIFY_STATUS_INITIAL
+ * if it's down.
+ */
+ cfg->verify_enabled = state->verify_enabled;
+ cfg->verify_time = state->verify_time;
+ cfg->tx_enabled = state->tx_enabled;
+ cfg->pmac_enabled = state->pmac_enabled;
+ cfg->tx_min_frag_size = state->tx_min_frag_size;
+}
+
+static int
+ethnl_set_mm_validate(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+
+ return ops->get_mm && ops->set_mm ? 1 : -EOPNOTSUPP;
+}
+
+static int ethnl_set_mm(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct net_device *dev = req_info->dev;
+ struct ethtool_mm_state state = {};
+ struct nlattr **tb = info->attrs;
+ struct ethtool_mm_cfg cfg = {};
+ bool mod = false;
+ int ret;
+
+ ret = dev->ethtool_ops->get_mm(dev, &state);
+ if (ret)
+ return ret;
+
+ mm_state_to_cfg(&state, &cfg);
+
+ ethnl_update_bool(&cfg.verify_enabled, tb[ETHTOOL_A_MM_VERIFY_ENABLED],
+ &mod);
+ ethnl_update_u32(&cfg.verify_time, tb[ETHTOOL_A_MM_VERIFY_TIME], &mod);
+ ethnl_update_bool(&cfg.tx_enabled, tb[ETHTOOL_A_MM_TX_ENABLED], &mod);
+ ethnl_update_bool(&cfg.pmac_enabled, tb[ETHTOOL_A_MM_PMAC_ENABLED],
+ &mod);
+ ethnl_update_u32(&cfg.tx_min_frag_size,
+ tb[ETHTOOL_A_MM_TX_MIN_FRAG_SIZE], &mod);
+
+ if (!mod)
+ return 0;
+
+ if (cfg.verify_time > state.max_verify_time) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MM_VERIFY_TIME],
+ "verifyTime exceeds device maximum");
+ return -ERANGE;
+ }
+
+ ret = dev->ethtool_ops->set_mm(dev, &cfg, extack);
+ return ret < 0 ? ret : 1;
+}
+
+const struct ethnl_request_ops ethnl_mm_request_ops = {
+ .request_cmd = ETHTOOL_MSG_MM_GET,
+ .reply_cmd = ETHTOOL_MSG_MM_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_MM_HEADER,
+ .req_info_size = sizeof(struct mm_req_info),
+ .reply_data_size = sizeof(struct mm_reply_data),
+
+ .prepare_data = mm_prepare_data,
+ .reply_size = mm_reply_size,
+ .fill_reply = mm_fill_reply,
+
+ .set_validate = ethnl_set_mm_validate,
+ .set = ethnl_set_mm,
+ .set_ntf_cmd = ETHTOOL_MSG_MM_NTF,
+};
+
+/* Returns whether a given device supports the MAC merge layer
+ * (has an eMAC and a pMAC). Must be called under rtnl_lock() and
+ * ethnl_ops_begin().
+ */
+bool __ethtool_dev_mm_supported(struct net_device *dev)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_mm_state state = {};
+ int ret = -EOPNOTSUPP;
+
+ if (ops && ops->get_mm)
+ ret = ops->get_mm(dev, &state);
+
+ return !ret;
+}
diff --git a/net/ethtool/module.c b/net/ethtool/module.c
index 898ed436b9e4..e0d539b21423 100644
--- a/net/ethtool/module.c
+++ b/net/ethtool/module.c
@@ -91,18 +91,6 @@ static int module_fill_reply(struct sk_buff *skb,
return 0;
}
-const struct ethnl_request_ops ethnl_module_request_ops = {
- .request_cmd = ETHTOOL_MSG_MODULE_GET,
- .reply_cmd = ETHTOOL_MSG_MODULE_GET_REPLY,
- .hdr_attr = ETHTOOL_A_MODULE_HEADER,
- .req_info_size = sizeof(struct module_req_info),
- .reply_data_size = sizeof(struct module_reply_data),
-
- .prepare_data = module_prepare_data,
- .reply_size = module_reply_size,
- .fill_reply = module_fill_reply,
-};
-
/* MODULE_SET */
const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1] = {
@@ -112,69 +100,62 @@ const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLI
ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO),
};
-static int module_set_power_mode(struct net_device *dev, struct nlattr **tb,
- bool *p_mod, struct netlink_ext_ack *extack)
+static int
+ethnl_set_module_validate(struct ethnl_req_info *req_info,
+ struct genl_info *info)
{
- struct ethtool_module_power_mode_params power = {};
- struct ethtool_module_power_mode_params power_new;
- const struct ethtool_ops *ops = dev->ethtool_ops;
- int ret;
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+ struct nlattr **tb = info->attrs;
if (!tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY])
return 0;
if (!ops->get_module_power_mode || !ops->set_module_power_mode) {
- NL_SET_ERR_MSG_ATTR(extack,
+ NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY],
"Setting power mode policy is not supported by this device");
return -EOPNOTSUPP;
}
- power_new.policy = nla_get_u8(tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]);
- ret = ops->get_module_power_mode(dev, &power, extack);
- if (ret < 0)
- return ret;
-
- if (power_new.policy == power.policy)
- return 0;
- *p_mod = true;
-
- return ops->set_module_power_mode(dev, &power_new, extack);
+ return 1;
}
-int ethnl_set_module(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_module(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct ethnl_req_info req_info = {};
+ struct ethtool_module_power_mode_params power = {};
+ struct ethtool_module_power_mode_params power_new;
+ const struct ethtool_ops *ops;
+ struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- struct net_device *dev;
- bool mod = false;
int ret;
- ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_MODULE_HEADER],
- genl_info_net(info), info->extack,
- true);
+ ops = dev->ethtool_ops;
+
+ power_new.policy = nla_get_u8(tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]);
+ ret = ops->get_module_power_mode(dev, &power, info->extack);
if (ret < 0)
return ret;
- dev = req_info.dev;
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
+ if (power_new.policy == power.policy)
+ return 0;
- ret = module_set_power_mode(dev, tb, &mod, info->extack);
- if (ret < 0)
- goto out_ops;
+ ret = ops->set_module_power_mode(dev, &power_new, info->extack);
+ return ret < 0 ? ret : 1;
+}
- if (!mod)
- goto out_ops;
+const struct ethnl_request_ops ethnl_module_request_ops = {
+ .request_cmd = ETHTOOL_MSG_MODULE_GET,
+ .reply_cmd = ETHTOOL_MSG_MODULE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_MODULE_HEADER,
+ .req_info_size = sizeof(struct module_req_info),
+ .reply_data_size = sizeof(struct module_reply_data),
- ethtool_notify(dev, ETHTOOL_MSG_MODULE_NTF, NULL);
+ .prepare_data = module_prepare_data,
+ .reply_size = module_reply_size,
+ .fill_reply = module_fill_reply,
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
- ethnl_parse_header_dev_put(&req_info);
- return ret;
-}
+ .set_validate = ethnl_set_module_validate,
+ .set = ethnl_set_module,
+ .set_ntf_cmd = ETHTOOL_MSG_MODULE_NTF,
+};
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index aee98be6237f..08120095cc68 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -269,25 +269,43 @@ static const struct ethnl_request_ops *
ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_STRSET_GET] = &ethnl_strset_request_ops,
[ETHTOOL_MSG_LINKINFO_GET] = &ethnl_linkinfo_request_ops,
+ [ETHTOOL_MSG_LINKINFO_SET] = &ethnl_linkinfo_request_ops,
[ETHTOOL_MSG_LINKMODES_GET] = &ethnl_linkmodes_request_ops,
+ [ETHTOOL_MSG_LINKMODES_SET] = &ethnl_linkmodes_request_ops,
[ETHTOOL_MSG_LINKSTATE_GET] = &ethnl_linkstate_request_ops,
[ETHTOOL_MSG_DEBUG_GET] = &ethnl_debug_request_ops,
+ [ETHTOOL_MSG_DEBUG_SET] = &ethnl_debug_request_ops,
[ETHTOOL_MSG_WOL_GET] = &ethnl_wol_request_ops,
+ [ETHTOOL_MSG_WOL_SET] = &ethnl_wol_request_ops,
[ETHTOOL_MSG_FEATURES_GET] = &ethnl_features_request_ops,
[ETHTOOL_MSG_PRIVFLAGS_GET] = &ethnl_privflags_request_ops,
+ [ETHTOOL_MSG_PRIVFLAGS_SET] = &ethnl_privflags_request_ops,
[ETHTOOL_MSG_RINGS_GET] = &ethnl_rings_request_ops,
+ [ETHTOOL_MSG_RINGS_SET] = &ethnl_rings_request_ops,
[ETHTOOL_MSG_CHANNELS_GET] = &ethnl_channels_request_ops,
+ [ETHTOOL_MSG_CHANNELS_SET] = &ethnl_channels_request_ops,
[ETHTOOL_MSG_COALESCE_GET] = &ethnl_coalesce_request_ops,
+ [ETHTOOL_MSG_COALESCE_SET] = &ethnl_coalesce_request_ops,
[ETHTOOL_MSG_PAUSE_GET] = &ethnl_pause_request_ops,
+ [ETHTOOL_MSG_PAUSE_SET] = &ethnl_pause_request_ops,
[ETHTOOL_MSG_EEE_GET] = &ethnl_eee_request_ops,
+ [ETHTOOL_MSG_EEE_SET] = &ethnl_eee_request_ops,
[ETHTOOL_MSG_FEC_GET] = &ethnl_fec_request_ops,
+ [ETHTOOL_MSG_FEC_SET] = &ethnl_fec_request_ops,
[ETHTOOL_MSG_TSINFO_GET] = &ethnl_tsinfo_request_ops,
[ETHTOOL_MSG_MODULE_EEPROM_GET] = &ethnl_module_eeprom_request_ops,
[ETHTOOL_MSG_STATS_GET] = &ethnl_stats_request_ops,
[ETHTOOL_MSG_PHC_VCLOCKS_GET] = &ethnl_phc_vclocks_request_ops,
[ETHTOOL_MSG_MODULE_GET] = &ethnl_module_request_ops,
+ [ETHTOOL_MSG_MODULE_SET] = &ethnl_module_request_ops,
[ETHTOOL_MSG_PSE_GET] = &ethnl_pse_request_ops,
+ [ETHTOOL_MSG_PSE_SET] = &ethnl_pse_request_ops,
[ETHTOOL_MSG_RSS_GET] = &ethnl_rss_request_ops,
+ [ETHTOOL_MSG_PLCA_GET_CFG] = &ethnl_plca_cfg_request_ops,
+ [ETHTOOL_MSG_PLCA_SET_CFG] = &ethnl_plca_cfg_request_ops,
+ [ETHTOOL_MSG_PLCA_GET_STATUS] = &ethnl_plca_status_request_ops,
+ [ETHTOOL_MSG_MM_GET] = &ethnl_mm_request_ops,
+ [ETHTOOL_MSG_MM_SET] = &ethnl_mm_request_ops,
};
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -588,6 +606,52 @@ static int ethnl_default_done(struct netlink_callback *cb)
return 0;
}
+static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ const struct ethnl_request_ops *ops;
+ struct ethnl_req_info req_info = {};
+ const u8 cmd = info->genlhdr->cmd;
+ int ret;
+
+ ops = ethnl_default_requests[cmd];
+ if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", cmd))
+ return -EOPNOTSUPP;
+ if (GENL_REQ_ATTR_CHECK(info, ops->hdr_attr))
+ return -EINVAL;
+
+ ret = ethnl_parse_header_dev_get(&req_info, info->attrs[ops->hdr_attr],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+
+ if (ops->set_validate) {
+ ret = ops->set_validate(&req_info, info);
+ /* 0 means nothing to do */
+ if (ret <= 0)
+ goto out_dev;
+ }
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(req_info.dev);
+ if (ret < 0)
+ goto out_rtnl;
+
+ ret = ops->set(&req_info, info);
+ if (ret <= 0)
+ goto out_ops;
+ ethtool_notify(req_info.dev, ops->set_ntf_cmd, NULL);
+
+ ret = 0;
+out_ops:
+ ethnl_ops_complete(req_info.dev);
+out_rtnl:
+ rtnl_unlock();
+out_dev:
+ ethnl_parse_header_dev_put(&req_info);
+ return ret;
+}
+
static const struct ethnl_request_ops *
ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = {
[ETHTOOL_MSG_LINKINFO_NTF] = &ethnl_linkinfo_request_ops,
@@ -603,6 +667,8 @@ ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = {
[ETHTOOL_MSG_EEE_NTF] = &ethnl_eee_request_ops,
[ETHTOOL_MSG_FEC_NTF] = &ethnl_fec_request_ops,
[ETHTOOL_MSG_MODULE_NTF] = &ethnl_module_request_ops,
+ [ETHTOOL_MSG_PLCA_NTF] = &ethnl_plca_cfg_request_ops,
+ [ETHTOOL_MSG_MM_NTF] = &ethnl_mm_request_ops,
};
/* default notification handler */
@@ -696,6 +762,8 @@ static const ethnl_notify_handler_t ethnl_notify_handlers[] = {
[ETHTOOL_MSG_EEE_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_FEC_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_MODULE_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_PLCA_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_MM_NTF] = ethnl_default_notify,
};
void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
@@ -760,7 +828,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_LINKINFO_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_linkinfo,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_linkinfo_set_policy,
.maxattr = ARRAY_SIZE(ethnl_linkinfo_set_policy) - 1,
},
@@ -776,7 +844,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_LINKMODES_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_linkmodes,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_linkmodes_set_policy,
.maxattr = ARRAY_SIZE(ethnl_linkmodes_set_policy) - 1,
},
@@ -801,7 +869,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_DEBUG_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_debug,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_debug_set_policy,
.maxattr = ARRAY_SIZE(ethnl_debug_set_policy) - 1,
},
@@ -818,7 +886,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_WOL_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_wol,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_wol_set_policy,
.maxattr = ARRAY_SIZE(ethnl_wol_set_policy) - 1,
},
@@ -850,7 +918,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_PRIVFLAGS_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_privflags,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_privflags_set_policy,
.maxattr = ARRAY_SIZE(ethnl_privflags_set_policy) - 1,
},
@@ -866,7 +934,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_RINGS_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_rings,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_rings_set_policy,
.maxattr = ARRAY_SIZE(ethnl_rings_set_policy) - 1,
},
@@ -882,7 +950,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_CHANNELS_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_channels,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_channels_set_policy,
.maxattr = ARRAY_SIZE(ethnl_channels_set_policy) - 1,
},
@@ -898,7 +966,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_COALESCE_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_coalesce,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_coalesce_set_policy,
.maxattr = ARRAY_SIZE(ethnl_coalesce_set_policy) - 1,
},
@@ -914,7 +982,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_PAUSE_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_pause,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_pause_set_policy,
.maxattr = ARRAY_SIZE(ethnl_pause_set_policy) - 1,
},
@@ -930,7 +998,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_EEE_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_eee,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_eee_set_policy,
.maxattr = ARRAY_SIZE(ethnl_eee_set_policy) - 1,
},
@@ -977,7 +1045,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_FEC_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_fec,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_fec_set_policy,
.maxattr = ARRAY_SIZE(ethnl_fec_set_policy) - 1,
},
@@ -1021,7 +1089,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_MODULE_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_module,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_module_set_policy,
.maxattr = ARRAY_SIZE(ethnl_module_set_policy) - 1,
},
@@ -1037,7 +1105,7 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_PSE_SET,
.flags = GENL_UNS_ADMIN_PERM,
- .doit = ethnl_set_pse,
+ .doit = ethnl_default_set_doit,
.policy = ethnl_pse_set_policy,
.maxattr = ARRAY_SIZE(ethnl_pse_set_policy) - 1,
},
@@ -1047,6 +1115,47 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_rss_get_policy,
.maxattr = ARRAY_SIZE(ethnl_rss_get_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_PLCA_GET_CFG,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ .policy = ethnl_plca_get_cfg_policy,
+ .maxattr = ARRAY_SIZE(ethnl_plca_get_cfg_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PLCA_SET_CFG,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_default_set_doit,
+ .policy = ethnl_plca_set_cfg_policy,
+ .maxattr = ARRAY_SIZE(ethnl_plca_set_cfg_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PLCA_GET_STATUS,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ .policy = ethnl_plca_get_status_policy,
+ .maxattr = ARRAY_SIZE(ethnl_plca_get_status_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_MM_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ .policy = ethnl_mm_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_mm_get_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_MM_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_default_set_doit,
+ .policy = ethnl_mm_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_mm_set_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 3753787ba233..f7b189ed96b2 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -138,6 +138,32 @@ static inline void ethnl_update_bool32(u32 *dst, const struct nlattr *attr,
}
/**
+ * ethnl_update_bool() - updateb bool used as bool from NLA_U8 attribute
+ * @dst: value to update
+ * @attr: netlink attribute with new value or null
+ * @mod: pointer to bool for modification tracking
+ *
+ * Use the bool value from NLA_U8 netlink attribute @attr to set bool variable
+ * pointed to by @dst to 0 (if zero) or 1 (if not); do nothing if @attr is
+ * null. Bool pointed to by @mod is set to true if this function changed the
+ * logical value of *dst, otherwise it is left as is.
+ */
+static inline void ethnl_update_bool(bool *dst, const struct nlattr *attr,
+ bool *mod)
+{
+ u8 val;
+
+ if (!attr)
+ return;
+ val = !!nla_get_u8(attr);
+ if (!!*dst == val)
+ return;
+
+ *dst = val;
+ *mod = true;
+}
+
+/**
* ethnl_update_binary() - update binary data from NLA_BINARY attribute
* @dst: value to update
* @len: destination buffer length
@@ -258,13 +284,14 @@ int ethnl_ops_begin(struct net_device *dev);
void ethnl_ops_complete(struct net_device *dev);
/**
- * struct ethnl_request_ops - unified handling of GET requests
+ * struct ethnl_request_ops - unified handling of GET and SET requests
* @request_cmd: command id for request (GET)
* @reply_cmd: command id for reply (GET_REPLY)
* @hdr_attr: attribute type for request header
* @req_info_size: size of request info
* @reply_data_size: size of reply data
* @allow_nodev_do: allow non-dump request with no device identification
+ * @set_ntf_cmd: notification to generate on changes (SET)
* @parse_request:
* Parse request except common header (struct ethnl_req_info). Common
* header is already filled on entry, the rest up to @repdata_offset
@@ -293,6 +320,18 @@ void ethnl_ops_complete(struct net_device *dev);
* used e.g. to free any additional data structures outside the main
* structure which were allocated by ->prepare_data(). When processing
* dump requests, ->cleanup() is called for each message.
+ * @set_validate:
+ * Check if set operation is supported for a given device, and perform
+ * extra input checks. Expected return values:
+ * - 0 if the operation is a noop for the device (rare)
+ * - 1 if operation should proceed to calling @set
+ * - negative errno on errors
+ * Called without any locks, just a reference on the netdev.
+ * @set:
+ * Execute the set operation. The implementation should return
+ * - 0 if no configuration has changed
+ * - 1 if configuration changed and notification should be generated
+ * - negative errno on errors
*
* Description of variable parts of GET request handling when using the
* unified infrastructure. When used, a pointer to an instance of this
@@ -309,6 +348,7 @@ struct ethnl_request_ops {
unsigned int req_info_size;
unsigned int reply_data_size;
bool allow_nodev_do;
+ u8 set_ntf_cmd;
int (*parse_request)(struct ethnl_req_info *req_info,
struct nlattr **tb,
@@ -322,6 +362,11 @@ struct ethnl_request_ops {
const struct ethnl_req_info *req_info,
const struct ethnl_reply_data *reply_data);
void (*cleanup_data)(struct ethnl_reply_data *reply_data);
+
+ int (*set_validate)(struct ethnl_req_info *req_info,
+ struct genl_info *info);
+ int (*set)(struct ethnl_req_info *req_info,
+ struct genl_info *info);
};
/* request handlers */
@@ -347,6 +392,9 @@ extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops;
extern const struct ethnl_request_ops ethnl_module_request_ops;
extern const struct ethnl_request_ops ethnl_pse_request_ops;
extern const struct ethnl_request_ops ethnl_rss_request_ops;
+extern const struct ethnl_request_ops ethnl_plca_cfg_request_ops;
+extern const struct ethnl_request_ops ethnl_plca_status_request_ops;
+extern const struct ethnl_request_ops ethnl_mm_request_ops;
extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1];
extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1];
@@ -365,12 +413,12 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
-extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH + 1];
+extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_PUSH + 1];
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
extern const struct nla_policy ethnl_coalesce_set_policy[ETHTOOL_A_COALESCE_MAX + 1];
-extern const struct nla_policy ethnl_pause_get_policy[ETHTOOL_A_PAUSE_HEADER + 1];
+extern const struct nla_policy ethnl_pause_get_policy[ETHTOOL_A_PAUSE_STATS_SRC + 1];
extern const struct nla_policy ethnl_pause_set_policy[ETHTOOL_A_PAUSE_TX + 1];
extern const struct nla_policy ethnl_eee_get_policy[ETHTOOL_A_EEE_HEADER + 1];
extern const struct nla_policy ethnl_eee_set_policy[ETHTOOL_A_EEE_TX_LPI_TIMER + 1];
@@ -381,33 +429,25 @@ extern const struct nla_policy ethnl_tunnel_info_get_policy[ETHTOOL_A_TUNNEL_INF
extern const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1];
extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1];
extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1];
-extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1];
+extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_SRC + 1];
extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1];
extern const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER + 1];
extern const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1];
extern const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1];
extern const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1];
extern const struct nla_policy ethnl_rss_get_policy[ETHTOOL_A_RSS_CONTEXT + 1];
+extern const struct nla_policy ethnl_plca_get_cfg_policy[ETHTOOL_A_PLCA_HEADER + 1];
+extern const struct nla_policy ethnl_plca_set_cfg_policy[ETHTOOL_A_PLCA_MAX + 1];
+extern const struct nla_policy ethnl_plca_get_status_policy[ETHTOOL_A_PLCA_HEADER + 1];
+extern const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1];
+extern const struct nla_policy ethnl_mm_set_policy[ETHTOOL_A_MM_MAX + 1];
-int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_features(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_privflags(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_coalesce(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_pause(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info);
int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info);
int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info);
int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info);
int ethnl_tunnel_info_start(struct netlink_callback *cb);
int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
-int ethnl_set_fec(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_module(struct sk_buff *skb, struct genl_info *info);
-int ethnl_set_pse(struct sk_buff *skb, struct genl_info *info);
extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN];
diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
index a8c113d244db..6657d0b888d8 100644
--- a/net/ethtool/pause.c
+++ b/net/ethtool/pause.c
@@ -5,8 +5,12 @@
struct pause_req_info {
struct ethnl_req_info base;
+ enum ethtool_mac_stats_src src;
};
+#define PAUSE_REQINFO(__req_base) \
+ container_of(__req_base, struct pause_req_info, base)
+
struct pause_reply_data {
struct ethnl_reply_data base;
struct ethtool_pauseparam pauseparam;
@@ -19,13 +23,40 @@ struct pause_reply_data {
const struct nla_policy ethnl_pause_get_policy[] = {
[ETHTOOL_A_PAUSE_HEADER] =
NLA_POLICY_NESTED(ethnl_header_policy_stats),
+ [ETHTOOL_A_PAUSE_STATS_SRC] =
+ NLA_POLICY_MAX(NLA_U32, ETHTOOL_MAC_STATS_SRC_PMAC),
};
+static int pause_parse_request(struct ethnl_req_info *req_base,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ enum ethtool_mac_stats_src src = ETHTOOL_MAC_STATS_SRC_AGGREGATE;
+ struct pause_req_info *req_info = PAUSE_REQINFO(req_base);
+
+ if (tb[ETHTOOL_A_PAUSE_STATS_SRC]) {
+ if (!(req_base->flags & ETHTOOL_FLAG_STATS)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "ETHTOOL_FLAG_STATS must be set when requesting a source of stats");
+ return -EINVAL;
+ }
+
+ src = nla_get_u32(tb[ETHTOOL_A_PAUSE_STATS_SRC]);
+ }
+
+ req_info->src = src;
+
+ return 0;
+}
+
static int pause_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
struct genl_info *info)
{
+ const struct pause_req_info *req_info = PAUSE_REQINFO(req_base);
+ struct netlink_ext_ack *extack = info ? info->extack : NULL;
struct pause_reply_data *data = PAUSE_REPDATA(reply_base);
+ enum ethtool_mac_stats_src src = req_info->src;
struct net_device *dev = reply_base->dev;
int ret;
@@ -34,14 +65,26 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
ethtool_stats_init((u64 *)&data->pausestat,
sizeof(data->pausestat) / 8);
+ data->pausestat.src = src;
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
+
+ if ((src == ETHTOOL_MAC_STATS_SRC_EMAC ||
+ src == ETHTOOL_MAC_STATS_SRC_PMAC) &&
+ !__ethtool_dev_mm_supported(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device does not support MAC merge layer");
+ ethnl_ops_complete(dev);
+ return -EOPNOTSUPP;
+ }
+
dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
if (req_base->flags & ETHTOOL_FLAG_STATS &&
dev->ethtool_ops->get_pause_stats)
dev->ethtool_ops->get_pause_stats(dev, &data->pausestat);
+
ethnl_ops_complete(dev);
return 0;
@@ -56,6 +99,7 @@ static int pause_reply_size(const struct ethnl_req_info *req_base,
if (req_base->flags & ETHTOOL_FLAG_STATS)
n += nla_total_size(0) + /* _PAUSE_STATS */
+ nla_total_size(sizeof(u32)) + /* _PAUSE_STATS_SRC */
nla_total_size_64bit(sizeof(u64)) * ETHTOOL_PAUSE_STAT_CNT;
return n;
}
@@ -77,6 +121,9 @@ static int pause_put_stats(struct sk_buff *skb,
const u16 pad = ETHTOOL_A_PAUSE_STAT_PAD;
struct nlattr *nest;
+ if (nla_put_u32(skb, ETHTOOL_A_PAUSE_STATS_SRC, pause_stats->src))
+ return -EMSGSIZE;
+
nest = nla_nest_start(skb, ETHTOOL_A_PAUSE_STATS);
if (!nest)
return -EMSGSIZE;
@@ -114,18 +161,6 @@ static int pause_fill_reply(struct sk_buff *skb,
return 0;
}
-const struct ethnl_request_ops ethnl_pause_request_ops = {
- .request_cmd = ETHTOOL_MSG_PAUSE_GET,
- .reply_cmd = ETHTOOL_MSG_PAUSE_GET_REPLY,
- .hdr_attr = ETHTOOL_A_PAUSE_HEADER,
- .req_info_size = sizeof(struct pause_req_info),
- .reply_data_size = sizeof(struct pause_reply_data),
-
- .prepare_data = pause_prepare_data,
- .reply_size = pause_reply_size,
- .fill_reply = pause_fill_reply,
-};
-
/* PAUSE_SET */
const struct nla_policy ethnl_pause_set_policy[] = {
@@ -136,51 +171,49 @@ const struct nla_policy ethnl_pause_set_policy[] = {
[ETHTOOL_A_PAUSE_TX] = { .type = NLA_U8 },
};
-int ethnl_set_pause(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_pause_validate(struct ethnl_req_info *req_info,
+ struct genl_info *info)
{
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+
+ return ops->get_pauseparam && ops->set_pauseparam ? 1 : -EOPNOTSUPP;
+}
+
+static int
+ethnl_set_pause(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ struct net_device *dev = req_info->dev;
struct ethtool_pauseparam params = {};
- struct ethnl_req_info req_info = {};
struct nlattr **tb = info->attrs;
- const struct ethtool_ops *ops;
- struct net_device *dev;
bool mod = false;
int ret;
- ret = ethnl_parse_header_dev_get(&req_info,
- tb[ETHTOOL_A_PAUSE_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
- dev = req_info.dev;
- ops = dev->ethtool_ops;
- ret = -EOPNOTSUPP;
- if (!ops->get_pauseparam || !ops->set_pauseparam)
- goto out_dev;
-
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
- ops->get_pauseparam(dev, &params);
+ dev->ethtool_ops->get_pauseparam(dev, &params);
ethnl_update_bool32(&params.autoneg, tb[ETHTOOL_A_PAUSE_AUTONEG], &mod);
ethnl_update_bool32(&params.rx_pause, tb[ETHTOOL_A_PAUSE_RX], &mod);
ethnl_update_bool32(&params.tx_pause, tb[ETHTOOL_A_PAUSE_TX], &mod);
- ret = 0;
if (!mod)
- goto out_ops;
+ return 0;
ret = dev->ethtool_ops->set_pauseparam(dev, &params);
- if (ret < 0)
- goto out_ops;
- ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF, NULL);
-
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return ret < 0 ? ret : 1;
}
+
+const struct ethnl_request_ops ethnl_pause_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PAUSE_GET,
+ .reply_cmd = ETHTOOL_MSG_PAUSE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_PAUSE_HEADER,
+ .req_info_size = sizeof(struct pause_req_info),
+ .reply_data_size = sizeof(struct pause_reply_data),
+
+ .parse_request = pause_parse_request,
+ .prepare_data = pause_prepare_data,
+ .reply_size = pause_reply_size,
+ .fill_reply = pause_fill_reply,
+
+ .set_validate = ethnl_set_pause_validate,
+ .set = ethnl_set_pause,
+ .set_ntf_cmd = ETHTOOL_MSG_PAUSE_NTF,
+};
diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c
new file mode 100644
index 000000000000..5a8cab4df0c9
--- /dev/null
+++ b/net/ethtool/plca.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/phy.h>
+#include <linux/ethtool_netlink.h>
+
+#include "netlink.h"
+#include "common.h"
+
+struct plca_req_info {
+ struct ethnl_req_info base;
+};
+
+struct plca_reply_data {
+ struct ethnl_reply_data base;
+ struct phy_plca_cfg plca_cfg;
+ struct phy_plca_status plca_st;
+};
+
+// Helpers ------------------------------------------------------------------ //
+
+#define PLCA_REPDATA(__reply_base) \
+ container_of(__reply_base, struct plca_reply_data, base)
+
+static void plca_update_sint(int *dst, const struct nlattr *attr,
+ bool *mod)
+{
+ if (!attr)
+ return;
+
+ *dst = nla_get_u32(attr);
+ *mod = true;
+}
+
+// PLCA get configuration message ------------------------------------------- //
+
+const struct nla_policy ethnl_plca_get_cfg_policy[] = {
+ [ETHTOOL_A_PLCA_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct plca_reply_data *data = PLCA_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ const struct ethtool_phy_ops *ops;
+ int ret;
+
+ // check that the PHY device is available and connected
+ if (!dev->phydev) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ // note: rtnl_lock is held already by ethnl_default_doit
+ ops = ethtool_phy_ops;
+ if (!ops || !ops->get_plca_cfg) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out;
+
+ memset(&data->plca_cfg, 0xff,
+ sizeof_field(struct plca_reply_data, plca_cfg));
+
+ ret = ops->get_plca_cfg(dev->phydev, &data->plca_cfg);
+ ethnl_ops_complete(dev);
+
+out:
+ return ret;
+}
+
+static int plca_get_cfg_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ return nla_total_size(sizeof(u16)) + /* _VERSION */
+ nla_total_size(sizeof(u8)) + /* _ENABLED */
+ nla_total_size(sizeof(u32)) + /* _NODE_CNT */
+ nla_total_size(sizeof(u32)) + /* _NODE_ID */
+ nla_total_size(sizeof(u32)) + /* _TO_TIMER */
+ nla_total_size(sizeof(u32)) + /* _BURST_COUNT */
+ nla_total_size(sizeof(u32)); /* _BURST_TIMER */
+}
+
+static int plca_get_cfg_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct plca_reply_data *data = PLCA_REPDATA(reply_base);
+ const struct phy_plca_cfg *plca = &data->plca_cfg;
+
+ if ((plca->version >= 0 &&
+ nla_put_u16(skb, ETHTOOL_A_PLCA_VERSION, plca->version)) ||
+ (plca->enabled >= 0 &&
+ nla_put_u8(skb, ETHTOOL_A_PLCA_ENABLED, !!plca->enabled)) ||
+ (plca->node_id >= 0 &&
+ nla_put_u32(skb, ETHTOOL_A_PLCA_NODE_ID, plca->node_id)) ||
+ (plca->node_cnt >= 0 &&
+ nla_put_u32(skb, ETHTOOL_A_PLCA_NODE_CNT, plca->node_cnt)) ||
+ (plca->to_tmr >= 0 &&
+ nla_put_u32(skb, ETHTOOL_A_PLCA_TO_TMR, plca->to_tmr)) ||
+ (plca->burst_cnt >= 0 &&
+ nla_put_u32(skb, ETHTOOL_A_PLCA_BURST_CNT, plca->burst_cnt)) ||
+ (plca->burst_tmr >= 0 &&
+ nla_put_u32(skb, ETHTOOL_A_PLCA_BURST_TMR, plca->burst_tmr)))
+ return -EMSGSIZE;
+
+ return 0;
+};
+
+// PLCA set configuration message ------------------------------------------- //
+
+const struct nla_policy ethnl_plca_set_cfg_policy[] = {
+ [ETHTOOL_A_PLCA_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_PLCA_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1),
+ [ETHTOOL_A_PLCA_NODE_ID] = NLA_POLICY_MAX(NLA_U32, 255),
+ [ETHTOOL_A_PLCA_NODE_CNT] = NLA_POLICY_RANGE(NLA_U32, 1, 255),
+ [ETHTOOL_A_PLCA_TO_TMR] = NLA_POLICY_MAX(NLA_U32, 255),
+ [ETHTOOL_A_PLCA_BURST_CNT] = NLA_POLICY_MAX(NLA_U32, 255),
+ [ETHTOOL_A_PLCA_BURST_TMR] = NLA_POLICY_MAX(NLA_U32, 255),
+};
+
+static int
+ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ struct net_device *dev = req_info->dev;
+ const struct ethtool_phy_ops *ops;
+ struct nlattr **tb = info->attrs;
+ struct phy_plca_cfg plca_cfg;
+ bool mod = false;
+ int ret;
+
+ // check that the PHY device is available and connected
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+
+ ops = ethtool_phy_ops;
+ if (!ops || !ops->set_plca_cfg)
+ return -EOPNOTSUPP;
+
+ memset(&plca_cfg, 0xff, sizeof(plca_cfg));
+ plca_update_sint(&plca_cfg.enabled, tb[ETHTOOL_A_PLCA_ENABLED], &mod);
+ plca_update_sint(&plca_cfg.node_id, tb[ETHTOOL_A_PLCA_NODE_ID], &mod);
+ plca_update_sint(&plca_cfg.node_cnt, tb[ETHTOOL_A_PLCA_NODE_CNT], &mod);
+ plca_update_sint(&plca_cfg.to_tmr, tb[ETHTOOL_A_PLCA_TO_TMR], &mod);
+ plca_update_sint(&plca_cfg.burst_cnt, tb[ETHTOOL_A_PLCA_BURST_CNT],
+ &mod);
+ plca_update_sint(&plca_cfg.burst_tmr, tb[ETHTOOL_A_PLCA_BURST_TMR],
+ &mod);
+ if (!mod)
+ return 0;
+
+ ret = ops->set_plca_cfg(dev->phydev, &plca_cfg, info->extack);
+ return ret < 0 ? ret : 1;
+}
+
+const struct ethnl_request_ops ethnl_plca_cfg_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PLCA_GET_CFG,
+ .reply_cmd = ETHTOOL_MSG_PLCA_GET_CFG_REPLY,
+ .hdr_attr = ETHTOOL_A_PLCA_HEADER,
+ .req_info_size = sizeof(struct plca_req_info),
+ .reply_data_size = sizeof(struct plca_reply_data),
+
+ .prepare_data = plca_get_cfg_prepare_data,
+ .reply_size = plca_get_cfg_reply_size,
+ .fill_reply = plca_get_cfg_fill_reply,
+
+ .set = ethnl_set_plca,
+ .set_ntf_cmd = ETHTOOL_MSG_PLCA_NTF,
+};
+
+// PLCA get status message -------------------------------------------------- //
+
+const struct nla_policy ethnl_plca_get_status_policy[] = {
+ [ETHTOOL_A_PLCA_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct plca_reply_data *data = PLCA_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ const struct ethtool_phy_ops *ops;
+ int ret;
+
+ // check that the PHY device is available and connected
+ if (!dev->phydev) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ // note: rtnl_lock is held already by ethnl_default_doit
+ ops = ethtool_phy_ops;
+ if (!ops || !ops->get_plca_status) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out;
+
+ memset(&data->plca_st, 0xff,
+ sizeof_field(struct plca_reply_data, plca_st));
+
+ ret = ops->get_plca_status(dev->phydev, &data->plca_st);
+ ethnl_ops_complete(dev);
+out:
+ return ret;
+}
+
+static int plca_get_status_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ return nla_total_size(sizeof(u8)); /* _STATUS */
+}
+
+static int plca_get_status_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct plca_reply_data *data = PLCA_REPDATA(reply_base);
+ const u8 status = data->plca_st.pst;
+
+ if (nla_put_u8(skb, ETHTOOL_A_PLCA_STATUS, !!status))
+ return -EMSGSIZE;
+
+ return 0;
+};
+
+const struct ethnl_request_ops ethnl_plca_status_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PLCA_GET_STATUS,
+ .reply_cmd = ETHTOOL_MSG_PLCA_GET_STATUS_REPLY,
+ .hdr_attr = ETHTOOL_A_PLCA_HEADER,
+ .req_info_size = sizeof(struct plca_req_info),
+ .reply_data_size = sizeof(struct plca_reply_data),
+
+ .prepare_data = plca_get_status_prepare_data,
+ .reply_size = plca_get_status_reply_size,
+ .fill_reply = plca_get_status_fill_reply,
+};
diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c
index 4c7bfa81e4ab..23264a1ebf12 100644
--- a/net/ethtool/privflags.c
+++ b/net/ethtool/privflags.c
@@ -118,19 +118,6 @@ static void privflags_cleanup_data(struct ethnl_reply_data *reply_data)
kfree(data->priv_flag_names);
}
-const struct ethnl_request_ops ethnl_privflags_request_ops = {
- .request_cmd = ETHTOOL_MSG_PRIVFLAGS_GET,
- .reply_cmd = ETHTOOL_MSG_PRIVFLAGS_GET_REPLY,
- .hdr_attr = ETHTOOL_A_PRIVFLAGS_HEADER,
- .req_info_size = sizeof(struct privflags_req_info),
- .reply_data_size = sizeof(struct privflags_reply_data),
-
- .prepare_data = privflags_prepare_data,
- .reply_size = privflags_reply_size,
- .fill_reply = privflags_fill_reply,
- .cleanup_data = privflags_cleanup_data,
-};
-
/* PRIVFLAGS_SET */
const struct nla_policy ethnl_privflags_set_policy[] = {
@@ -139,63 +126,70 @@ const struct nla_policy ethnl_privflags_set_policy[] = {
[ETHTOOL_A_PRIVFLAGS_FLAGS] = { .type = NLA_NESTED },
};
-int ethnl_set_privflags(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_privflags_validate(struct ethnl_req_info *req_info,
+ struct genl_info *info)
+{
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+
+ if (!info->attrs[ETHTOOL_A_PRIVFLAGS_FLAGS])
+ return -EINVAL;
+
+ if (!ops->get_priv_flags || !ops->set_priv_flags ||
+ !ops->get_sset_count || !ops->get_strings)
+ return -EOPNOTSUPP;
+ return 1;
+}
+
+static int
+ethnl_set_privflags(struct ethnl_req_info *req_info, struct genl_info *info)
{
const char (*names)[ETH_GSTRING_LEN] = NULL;
- struct ethnl_req_info req_info = {};
+ struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- const struct ethtool_ops *ops;
- struct net_device *dev;
unsigned int nflags;
bool mod = false;
bool compact;
u32 flags;
int ret;
- if (!tb[ETHTOOL_A_PRIVFLAGS_FLAGS])
- return -EINVAL;
ret = ethnl_bitset_is_compact(tb[ETHTOOL_A_PRIVFLAGS_FLAGS], &compact);
if (ret < 0)
return ret;
- ret = ethnl_parse_header_dev_get(&req_info,
- tb[ETHTOOL_A_PRIVFLAGS_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
- dev = req_info.dev;
- ops = dev->ethtool_ops;
- ret = -EOPNOTSUPP;
- if (!ops->get_priv_flags || !ops->set_priv_flags ||
- !ops->get_sset_count || !ops->get_strings)
- goto out_dev;
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
ret = ethnl_get_priv_flags_info(dev, &nflags, compact ? NULL : &names);
if (ret < 0)
- goto out_ops;
- flags = ops->get_priv_flags(dev);
+ return ret;
+ flags = dev->ethtool_ops->get_priv_flags(dev);
ret = ethnl_update_bitset32(&flags, nflags,
tb[ETHTOOL_A_PRIVFLAGS_FLAGS], names,
info->extack, &mod);
if (ret < 0 || !mod)
goto out_free;
- ret = ops->set_priv_flags(dev, flags);
+ ret = dev->ethtool_ops->set_priv_flags(dev, flags);
if (ret < 0)
goto out_free;
- ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL);
+ ret = 1;
out_free:
kfree(names);
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
return ret;
}
+
+const struct ethnl_request_ops ethnl_privflags_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PRIVFLAGS_GET,
+ .reply_cmd = ETHTOOL_MSG_PRIVFLAGS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_PRIVFLAGS_HEADER,
+ .req_info_size = sizeof(struct privflags_req_info),
+ .reply_data_size = sizeof(struct privflags_reply_data),
+
+ .prepare_data = privflags_prepare_data,
+ .reply_size = privflags_reply_size,
+ .fill_reply = privflags_fill_reply,
+ .cleanup_data = privflags_cleanup_data,
+
+ .set_validate = ethnl_set_privflags_validate,
+ .set = ethnl_set_privflags,
+ .set_ntf_cmd = ETHTOOL_MSG_PRIVFLAGS_NTF,
+};
diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
index e8683e485dc9..530b8b99e6df 100644
--- a/net/ethtool/pse-pd.c
+++ b/net/ethtool/pse-pd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
//
-// ethtool interface for for Ethernet PSE (Power Sourcing Equipment)
+// ethtool interface for Ethernet PSE (Power Sourcing Equipment)
// and PD (Powered Device)
//
// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
@@ -106,18 +106,6 @@ static int pse_fill_reply(struct sk_buff *skb,
return 0;
}
-const struct ethnl_request_ops ethnl_pse_request_ops = {
- .request_cmd = ETHTOOL_MSG_PSE_GET,
- .reply_cmd = ETHTOOL_MSG_PSE_GET_REPLY,
- .hdr_attr = ETHTOOL_A_PSE_HEADER,
- .req_info_size = sizeof(struct pse_req_info),
- .reply_data_size = sizeof(struct pse_reply_data),
-
- .prepare_data = pse_prepare_data,
- .reply_size = pse_reply_size,
- .fill_reply = pse_fill_reply,
-};
-
/* PSE_SET */
const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
@@ -127,59 +115,50 @@ const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED),
};
-static int pse_set_pse_config(struct net_device *dev,
- struct netlink_ext_ack *extack,
- struct nlattr **tb)
+static int
+ethnl_set_pse_validate(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct phy_device *phydev = dev->phydev;
- struct pse_control_config config = {};
+ return !!info->attrs[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL];
+}
- /* Optional attribute. Do not return error if not set. */
- if (!tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL])
- return 0;
+static int
+ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ struct net_device *dev = req_info->dev;
+ struct pse_control_config config = {};
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
/* this values are already validated by the ethnl_pse_set_policy */
config.admin_cotrol = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]);
+ phydev = dev->phydev;
if (!phydev) {
- NL_SET_ERR_MSG(extack, "No PHY is attached");
+ NL_SET_ERR_MSG(info->extack, "No PHY is attached");
return -EOPNOTSUPP;
}
if (!phydev->psec) {
- NL_SET_ERR_MSG(extack, "No PSE is attached");
+ NL_SET_ERR_MSG(info->extack, "No PSE is attached");
return -EOPNOTSUPP;
}
- return pse_ethtool_set_config(phydev->psec, extack, &config);
+ /* Return errno directly - PSE has no notification */
+ return pse_ethtool_set_config(phydev->psec, info->extack, &config);
}
-int ethnl_set_pse(struct sk_buff *skb, struct genl_info *info)
-{
- struct ethnl_req_info req_info = {};
- struct nlattr **tb = info->attrs;
- struct net_device *dev;
- int ret;
-
- ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_PSE_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
-
- dev = req_info.dev;
-
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
-
- ret = pse_set_pse_config(dev, info->extack, tb);
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
+const struct ethnl_request_ops ethnl_pse_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PSE_GET,
+ .reply_cmd = ETHTOOL_MSG_PSE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_PSE_HEADER,
+ .req_info_size = sizeof(struct pse_req_info),
+ .reply_data_size = sizeof(struct pse_reply_data),
- ethnl_parse_header_dev_put(&req_info);
+ .prepare_data = pse_prepare_data,
+ .reply_size = pse_reply_size,
+ .fill_reply = pse_fill_reply,
- return ret;
-}
+ .set_validate = ethnl_set_pse_validate,
+ .set = ethnl_set_pse,
+ /* PSE has no notification */
+};
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
index fa3ec8d438f7..f358cd57d094 100644
--- a/net/ethtool/rings.c
+++ b/net/ethtool/rings.c
@@ -56,7 +56,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
- nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */
+ nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */
+ nla_total_size(sizeof(u8))); /* _RINGS_RX_PUSH */
}
static int rings_fill_reply(struct sk_buff *skb,
@@ -96,24 +97,13 @@ static int rings_fill_reply(struct sk_buff *skb,
kr->tcp_data_split))) ||
(kr->cqe_size &&
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
- nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push))
+ nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) ||
+ nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push))
return -EMSGSIZE;
return 0;
}
-const struct ethnl_request_ops ethnl_rings_request_ops = {
- .request_cmd = ETHTOOL_MSG_RINGS_GET,
- .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY,
- .hdr_attr = ETHTOOL_A_RINGS_HEADER,
- .req_info_size = sizeof(struct rings_req_info),
- .reply_data_size = sizeof(struct rings_reply_data),
-
- .prepare_data = rings_prepare_data,
- .reply_size = rings_reply_size,
- .fill_reply = rings_fill_reply,
-};
-
/* RINGS_SET */
const struct nla_policy ethnl_rings_set_policy[] = {
@@ -126,64 +116,64 @@ const struct nla_policy ethnl_rings_set_policy[] = {
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
+ [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
};
-int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_rings_validate(struct ethnl_req_info *req_info,
+ struct genl_info *info)
{
- struct kernel_ethtool_ringparam kernel_ringparam = {};
- struct ethtool_ringparam ringparam = {};
- struct ethnl_req_info req_info = {};
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
struct nlattr **tb = info->attrs;
- const struct nlattr *err_attr;
- const struct ethtool_ops *ops;
- struct net_device *dev;
- bool mod = false;
- int ret;
-
- ret = ethnl_parse_header_dev_get(&req_info,
- tb[ETHTOOL_A_RINGS_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
- dev = req_info.dev;
- ops = dev->ethtool_ops;
- ret = -EOPNOTSUPP;
- if (!ops->get_ringparam || !ops->set_ringparam)
- goto out_dev;
if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) {
- ret = -EOPNOTSUPP;
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_RINGS_RX_BUF_LEN],
"setting rx buf len not supported");
- goto out_dev;
+ return -EOPNOTSUPP;
}
if (tb[ETHTOOL_A_RINGS_CQE_SIZE] &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) {
- ret = -EOPNOTSUPP;
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_RINGS_CQE_SIZE],
"setting cqe size not supported");
- goto out_dev;
+ return -EOPNOTSUPP;
}
if (tb[ETHTOOL_A_RINGS_TX_PUSH] &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) {
- ret = -EOPNOTSUPP;
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_RINGS_TX_PUSH],
"setting tx push not supported");
- goto out_dev;
+ return -EOPNOTSUPP;
}
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
- ops->get_ringparam(dev, &ringparam, &kernel_ringparam, info->extack);
+ if (tb[ETHTOOL_A_RINGS_RX_PUSH] &&
+ !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RINGS_RX_PUSH],
+ "setting rx push not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP;
+}
+
+static int
+ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ struct kernel_ethtool_ringparam kernel_ringparam = {};
+ struct ethtool_ringparam ringparam = {};
+ struct net_device *dev = req_info->dev;
+ struct nlattr **tb = info->attrs;
+ const struct nlattr *err_attr;
+ bool mod = false;
+ int ret;
+
+ dev->ethtool_ops->get_ringparam(dev, &ringparam,
+ &kernel_ringparam, info->extack);
ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
ethnl_update_u32(&ringparam.rx_mini_pending,
@@ -197,9 +187,10 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
ethnl_update_u8(&kernel_ringparam.tx_push,
tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
- ret = 0;
+ ethnl_update_u8(&kernel_ringparam.rx_push,
+ tb[ETHTOOL_A_RINGS_RX_PUSH], &mod);
if (!mod)
- goto out_ops;
+ return 0;
/* ensure new ring parameters are within limits */
if (ringparam.rx_pending > ringparam.rx_max_pending)
@@ -213,23 +204,28 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
else
err_attr = NULL;
if (err_attr) {
- ret = -EINVAL;
NL_SET_ERR_MSG_ATTR(info->extack, err_attr,
"requested ring size exceeds maximum");
- goto out_ops;
+ return -EINVAL;
}
ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
&kernel_ringparam, info->extack);
- if (ret < 0)
- goto out_ops;
- ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
-
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return ret < 0 ? ret : 1;
}
+
+const struct ethnl_request_ops ethnl_rings_request_ops = {
+ .request_cmd = ETHTOOL_MSG_RINGS_GET,
+ .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_RINGS_HEADER,
+ .req_info_size = sizeof(struct rings_req_info),
+ .reply_data_size = sizeof(struct rings_reply_data),
+
+ .prepare_data = rings_prepare_data,
+ .reply_size = rings_reply_size,
+ .fill_reply = rings_fill_reply,
+
+ .set_validate = ethnl_set_rings_validate,
+ .set = ethnl_set_rings,
+ .set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF,
+};
diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c
index a20e0a24ff61..010ed19ccc99 100644
--- a/net/ethtool/stats.c
+++ b/net/ethtool/stats.c
@@ -7,6 +7,7 @@
struct stats_req_info {
struct ethnl_req_info base;
DECLARE_BITMAP(stat_mask, __ETHTOOL_STATS_CNT);
+ enum ethtool_mac_stats_src src;
};
#define STATS_REQINFO(__req_base) \
@@ -75,16 +76,19 @@ const char stats_rmon_names[__ETHTOOL_A_STATS_RMON_CNT][ETH_GSTRING_LEN] = {
[ETHTOOL_A_STATS_RMON_JABBER] = "etherStatsJabbers",
};
-const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1] = {
+const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_SRC + 1] = {
[ETHTOOL_A_STATS_HEADER] =
NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_STATS_GROUPS] = { .type = NLA_NESTED },
+ [ETHTOOL_A_STATS_SRC] =
+ NLA_POLICY_MAX(NLA_U32, ETHTOOL_MAC_STATS_SRC_PMAC),
};
static int stats_parse_request(struct ethnl_req_info *req_base,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
+ enum ethtool_mac_stats_src src = ETHTOOL_MAC_STATS_SRC_AGGREGATE;
struct stats_req_info *req_info = STATS_REQINFO(req_base);
bool mod = false;
int err;
@@ -100,6 +104,11 @@ static int stats_parse_request(struct ethnl_req_info *req_base,
return -EINVAL;
}
+ if (tb[ETHTOOL_A_STATS_SRC])
+ src = nla_get_u32(tb[ETHTOOL_A_STATS_SRC]);
+
+ req_info->src = src;
+
return 0;
}
@@ -108,7 +117,9 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
struct genl_info *info)
{
const struct stats_req_info *req_info = STATS_REQINFO(req_base);
+ struct netlink_ext_ack *extack = info ? info->extack : NULL;
struct stats_reply_data *data = STATS_REPDATA(reply_base);
+ enum ethtool_mac_stats_src src = req_info->src;
struct net_device *dev = reply_base->dev;
int ret;
@@ -116,11 +127,25 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
if (ret < 0)
return ret;
+ if ((src == ETHTOOL_MAC_STATS_SRC_EMAC ||
+ src == ETHTOOL_MAC_STATS_SRC_PMAC) &&
+ !__ethtool_dev_mm_supported(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device does not support MAC merge layer");
+ ethnl_ops_complete(dev);
+ return -EOPNOTSUPP;
+ }
+
/* Mark all stats as unset (see ETHTOOL_STAT_NOT_SET) to prevent them
* from being reported to user space in case driver did not set them.
*/
memset(&data->stats, 0xff, sizeof(data->stats));
+ data->phy_stats.src = src;
+ data->mac_stats.src = src;
+ data->ctrl_stats.src = src;
+ data->rmon_stats.src = src;
+
if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) &&
dev->ethtool_ops->get_eth_phy_stats)
dev->ethtool_ops->get_eth_phy_stats(dev, &data->phy_stats);
@@ -146,6 +171,8 @@ static int stats_reply_size(const struct ethnl_req_info *req_base,
unsigned int n_grps = 0, n_stats = 0;
int len = 0;
+ len += nla_total_size(sizeof(u32)); /* _STATS_SRC */
+
if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask)) {
n_stats += sizeof(struct ethtool_eth_phy_stats) / sizeof(u64);
n_grps++;
@@ -379,6 +406,9 @@ static int stats_fill_reply(struct sk_buff *skb,
const struct stats_reply_data *data = STATS_REPDATA(reply_base);
int ret = 0;
+ if (nla_put_u32(skb, ETHTOOL_A_STATS_SRC, req_info->src))
+ return -EMSGSIZE;
+
if (!ret && test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask))
ret = stats_put_stats(skb, data, ETHTOOL_STATS_ETH_PHY,
ETH_SS_STATS_ETH_PHY,
@@ -410,3 +440,130 @@ const struct ethnl_request_ops ethnl_stats_request_ops = {
.reply_size = stats_reply_size,
.fill_reply = stats_fill_reply,
};
+
+static u64 ethtool_stats_sum(u64 a, u64 b)
+{
+ if (a == ETHTOOL_STAT_NOT_SET)
+ return b;
+ if (b == ETHTOOL_STAT_NOT_SET)
+ return a;
+ return a + b;
+}
+
+/* Avoid modifying the aggregation procedure every time a new counter is added
+ * by treating the structures as an array of u64 statistics.
+ */
+static void ethtool_aggregate_stats(void *aggr_stats, const void *emac_stats,
+ const void *pmac_stats, size_t stats_size,
+ size_t stats_offset)
+{
+ size_t num_stats = stats_size / sizeof(u64);
+ const u64 *s1 = emac_stats + stats_offset;
+ const u64 *s2 = pmac_stats + stats_offset;
+ u64 *s = aggr_stats + stats_offset;
+ int i;
+
+ for (i = 0; i < num_stats; i++)
+ s[i] = ethtool_stats_sum(s1[i], s2[i]);
+}
+
+void ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_eth_mac_stats pmac, emac;
+
+ memset(&emac, 0xff, sizeof(emac));
+ memset(&pmac, 0xff, sizeof(pmac));
+ emac.src = ETHTOOL_MAC_STATS_SRC_EMAC;
+ pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC;
+
+ ops->get_eth_mac_stats(dev, &emac);
+ ops->get_eth_mac_stats(dev, &pmac);
+
+ ethtool_aggregate_stats(mac_stats, &emac, &pmac,
+ sizeof(mac_stats->stats),
+ offsetof(struct ethtool_eth_mac_stats, stats));
+}
+EXPORT_SYMBOL(ethtool_aggregate_mac_stats);
+
+void ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_eth_phy_stats pmac, emac;
+
+ memset(&emac, 0xff, sizeof(emac));
+ memset(&pmac, 0xff, sizeof(pmac));
+ emac.src = ETHTOOL_MAC_STATS_SRC_EMAC;
+ pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC;
+
+ ops->get_eth_phy_stats(dev, &emac);
+ ops->get_eth_phy_stats(dev, &pmac);
+
+ ethtool_aggregate_stats(phy_stats, &emac, &pmac,
+ sizeof(phy_stats->stats),
+ offsetof(struct ethtool_eth_phy_stats, stats));
+}
+EXPORT_SYMBOL(ethtool_aggregate_phy_stats);
+
+void ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_eth_ctrl_stats pmac, emac;
+
+ memset(&emac, 0xff, sizeof(emac));
+ memset(&pmac, 0xff, sizeof(pmac));
+ emac.src = ETHTOOL_MAC_STATS_SRC_EMAC;
+ pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC;
+
+ ops->get_eth_ctrl_stats(dev, &emac);
+ ops->get_eth_ctrl_stats(dev, &pmac);
+
+ ethtool_aggregate_stats(ctrl_stats, &emac, &pmac,
+ sizeof(ctrl_stats->stats),
+ offsetof(struct ethtool_eth_ctrl_stats, stats));
+}
+EXPORT_SYMBOL(ethtool_aggregate_ctrl_stats);
+
+void ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_pause_stats pmac, emac;
+
+ memset(&emac, 0xff, sizeof(emac));
+ memset(&pmac, 0xff, sizeof(pmac));
+ emac.src = ETHTOOL_MAC_STATS_SRC_EMAC;
+ pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC;
+
+ ops->get_pause_stats(dev, &emac);
+ ops->get_pause_stats(dev, &pmac);
+
+ ethtool_aggregate_stats(pause_stats, &emac, &pmac,
+ sizeof(pause_stats->stats),
+ offsetof(struct ethtool_pause_stats, stats));
+}
+EXPORT_SYMBOL(ethtool_aggregate_pause_stats);
+
+void ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ const struct ethtool_rmon_hist_range *dummy;
+ struct ethtool_rmon_stats pmac, emac;
+
+ memset(&emac, 0xff, sizeof(emac));
+ memset(&pmac, 0xff, sizeof(pmac));
+ emac.src = ETHTOOL_MAC_STATS_SRC_EMAC;
+ pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC;
+
+ ops->get_rmon_stats(dev, &emac, &dummy);
+ ops->get_rmon_stats(dev, &pmac, &dummy);
+
+ ethtool_aggregate_stats(rmon_stats, &emac, &pmac,
+ sizeof(rmon_stats->stats),
+ offsetof(struct ethtool_rmon_stats, stats));
+}
+EXPORT_SYMBOL(ethtool_aggregate_rmon_stats);
diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c
index 88f435e76481..a4a43d9e6e9d 100644
--- a/net/ethtool/wol.c
+++ b/net/ethtool/wol.c
@@ -82,18 +82,6 @@ static int wol_fill_reply(struct sk_buff *skb,
return 0;
}
-const struct ethnl_request_ops ethnl_wol_request_ops = {
- .request_cmd = ETHTOOL_MSG_WOL_GET,
- .reply_cmd = ETHTOOL_MSG_WOL_GET_REPLY,
- .hdr_attr = ETHTOOL_A_WOL_HEADER,
- .req_info_size = sizeof(struct wol_req_info),
- .reply_data_size = sizeof(struct wol_reply_data),
-
- .prepare_data = wol_prepare_data,
- .reply_size = wol_reply_size,
- .fill_reply = wol_fill_reply,
-};
-
/* WOL_SET */
const struct nla_policy ethnl_wol_set_policy[] = {
@@ -104,67 +92,66 @@ const struct nla_policy ethnl_wol_set_policy[] = {
.len = SOPASS_MAX },
};
-int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info)
+static int
+ethnl_set_wol_validate(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+
+ return ops->get_wol && ops->set_wol ? 1 : -EOPNOTSUPP;
+}
+
+static int
+ethnl_set_wol(struct ethnl_req_info *req_info, struct genl_info *info)
{
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
- struct ethnl_req_info req_info = {};
+ struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- struct net_device *dev;
bool mod = false;
int ret;
- ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_WOL_HEADER],
- genl_info_net(info), info->extack,
- true);
- if (ret < 0)
- return ret;
- dev = req_info.dev;
- ret = -EOPNOTSUPP;
- if (!dev->ethtool_ops->get_wol || !dev->ethtool_ops->set_wol)
- goto out_dev;
-
- rtnl_lock();
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- goto out_rtnl;
-
dev->ethtool_ops->get_wol(dev, &wol);
ret = ethnl_update_bitset32(&wol.wolopts, WOL_MODE_COUNT,
tb[ETHTOOL_A_WOL_MODES], wol_mode_names,
info->extack, &mod);
if (ret < 0)
- goto out_ops;
+ return ret;
if (wol.wolopts & ~wol.supported) {
NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_WOL_MODES],
"cannot enable unsupported WoL mode");
- ret = -EINVAL;
- goto out_ops;
+ return -EINVAL;
}
if (tb[ETHTOOL_A_WOL_SOPASS]) {
if (!(wol.supported & WAKE_MAGICSECURE)) {
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_WOL_SOPASS],
"magicsecure not supported, cannot set password");
- ret = -EINVAL;
- goto out_ops;
+ return -EINVAL;
}
ethnl_update_binary(wol.sopass, sizeof(wol.sopass),
tb[ETHTOOL_A_WOL_SOPASS], &mod);
}
if (!mod)
- goto out_ops;
+ return 0;
ret = dev->ethtool_ops->set_wol(dev, &wol);
if (ret)
- goto out_ops;
+ return ret;
dev->wol_enabled = !!wol.wolopts;
- ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL);
-
-out_ops:
- ethnl_ops_complete(dev);
-out_rtnl:
- rtnl_unlock();
-out_dev:
- ethnl_parse_header_dev_put(&req_info);
- return ret;
+ return 1;
}
+
+const struct ethnl_request_ops ethnl_wol_request_ops = {
+ .request_cmd = ETHTOOL_MSG_WOL_GET,
+ .reply_cmd = ETHTOOL_MSG_WOL_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_WOL_HEADER,
+ .req_info_size = sizeof(struct wol_req_info),
+ .reply_data_size = sizeof(struct wol_reply_data),
+
+ .prepare_data = wol_prepare_data,
+ .reply_size = wol_reply_size,
+ .fill_reply = wol_fill_reply,
+
+ .set_validate = ethnl_set_wol_validate,
+ .set = ethnl_set_wol,
+ .set_ntf_cmd = ETHTOOL_MSG_WOL_NTF,
+};
diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c
index af337cf62764..35d384dfe29d 100644
--- a/net/ieee802154/header_ops.c
+++ b/net/ieee802154/header_ops.c
@@ -120,6 +120,30 @@ ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr)
}
EXPORT_SYMBOL_GPL(ieee802154_hdr_push);
+int ieee802154_beacon_push(struct sk_buff *skb,
+ struct ieee802154_beacon_frame *beacon)
+{
+ struct ieee802154_beacon_hdr *mac_pl = &beacon->mac_pl;
+ struct ieee802154_hdr *mhr = &beacon->mhr;
+ int ret;
+
+ skb_reserve(skb, sizeof(*mhr));
+ ret = ieee802154_hdr_push(skb, mhr);
+ if (ret < 0)
+ return ret;
+
+ skb_reset_mac_header(skb);
+ skb->mac_len = ret;
+
+ skb_put_data(skb, mac_pl, sizeof(*mac_pl));
+
+ if (mac_pl->pend_short_addr_count || mac_pl->pend_ext_addr_count)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ieee802154_beacon_push);
+
static int
ieee802154_hdr_get_addr(const u8 *buf, int mode, bool omit_pan,
struct ieee802154_addr *addr)
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 248ad5e46969..2215f576ee37 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -187,8 +187,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
[NL802154_ATTR_WPAN_DEV] = { .type = NLA_U64 },
- [NL802154_ATTR_PAGE] = { .type = NLA_U8, },
- [NL802154_ATTR_CHANNEL] = { .type = NLA_U8, },
+ [NL802154_ATTR_PAGE] = NLA_POLICY_MAX(NLA_U8, IEEE802154_MAX_PAGE),
+ [NL802154_ATTR_CHANNEL] = NLA_POLICY_MAX(NLA_U8, IEEE802154_MAX_CHANNEL),
[NL802154_ATTR_TX_POWER] = { .type = NLA_S32, },
@@ -221,6 +221,20 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
[NL802154_ATTR_COORDINATOR] = { .type = NLA_NESTED },
+ [NL802154_ATTR_SCAN_TYPE] =
+ NLA_POLICY_RANGE(NLA_U8, NL802154_SCAN_ED, NL802154_SCAN_RIT_PASSIVE),
+ [NL802154_ATTR_SCAN_CHANNELS] =
+ NLA_POLICY_MASK(NLA_U32, GENMASK(IEEE802154_MAX_CHANNEL, 0)),
+ [NL802154_ATTR_SCAN_PREAMBLE_CODES] = { .type = NLA_REJECT },
+ [NL802154_ATTR_SCAN_MEAN_PRF] = { .type = NLA_REJECT },
+ [NL802154_ATTR_SCAN_DURATION] =
+ NLA_POLICY_MAX(NLA_U8, IEEE802154_MAX_SCAN_DURATION),
+ [NL802154_ATTR_SCAN_DONE_REASON] =
+ NLA_POLICY_RANGE(NLA_U8, NL802154_SCAN_DONE_REASON_FINISHED,
+ NL802154_SCAN_DONE_REASON_ABORTED),
+ [NL802154_ATTR_BEACON_INTERVAL] =
+ NLA_POLICY_MAX(NLA_U8, IEEE802154_MAX_SCAN_DURATION),
+
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
[NL802154_ATTR_SEC_ENABLED] = { .type = NLA_U8, },
[NL802154_ATTR_SEC_OUT_LEVEL] = { .type = NLA_U32, },
@@ -969,8 +983,7 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]);
/* check 802.15.4 constraints */
- if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL ||
- !(rdev->wpan_phy.supported.channels[page] & BIT(channel)))
+ if (!ieee802154_chan_is_valid(&rdev->wpan_phy, page, channel))
return -EINVAL;
return rdev_set_channel(rdev, page, channel);
@@ -1384,6 +1397,236 @@ int nl802154_scan_event(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
}
EXPORT_SYMBOL_GPL(nl802154_scan_event);
+static int nl802154_trigger_scan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct wpan_phy *wpan_phy = &rdev->wpan_phy;
+ struct cfg802154_scan_request *request;
+ u8 type;
+ int err;
+
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+ NL_SET_ERR_MSG(info->extack, "Monitors are not allowed to perform scans");
+ return -EOPNOTSUPP;
+ }
+
+ if (!nla_get_u8(info->attrs[NL802154_ATTR_SCAN_TYPE])) {
+ NL_SET_ERR_MSG(info->extack, "Malformed request, missing scan type");
+ return -EINVAL;
+ }
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->wpan_dev = wpan_dev;
+ request->wpan_phy = wpan_phy;
+
+ type = nla_get_u8(info->attrs[NL802154_ATTR_SCAN_TYPE]);
+ switch (type) {
+ case NL802154_SCAN_PASSIVE:
+ request->type = type;
+ break;
+ default:
+ NL_SET_ERR_MSG_FMT(info->extack, "Unsupported scan type: %d", type);
+ err = -EINVAL;
+ goto free_request;
+ }
+
+ /* Use current page by default */
+ if (info->attrs[NL802154_ATTR_PAGE])
+ request->page = nla_get_u8(info->attrs[NL802154_ATTR_PAGE]);
+ else
+ request->page = wpan_phy->current_page;
+
+ /* Scan all supported channels by default */
+ if (info->attrs[NL802154_ATTR_SCAN_CHANNELS])
+ request->channels = nla_get_u32(info->attrs[NL802154_ATTR_SCAN_CHANNELS]);
+ else
+ request->channels = wpan_phy->supported.channels[request->page];
+
+ /* Use maximum duration order by default */
+ if (info->attrs[NL802154_ATTR_SCAN_DURATION])
+ request->duration = nla_get_u8(info->attrs[NL802154_ATTR_SCAN_DURATION]);
+ else
+ request->duration = IEEE802154_MAX_SCAN_DURATION;
+
+ err = rdev_trigger_scan(rdev, request);
+ if (err) {
+ pr_err("Failure starting scanning (%d)\n", err);
+ goto free_request;
+ }
+
+ return 0;
+
+free_request:
+ kfree(request);
+
+ return err;
+}
+
+static int nl802154_prep_scan_msg(struct sk_buff *msg,
+ struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, u32 portid,
+ u32 seq, int flags, u8 cmd, u8 arg)
+{
+ void *hdr;
+
+ hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx))
+ goto nla_put_failure;
+
+ if (wpan_dev->netdev &&
+ nla_put_u32(msg, NL802154_ATTR_IFINDEX, wpan_dev->netdev->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, NL802154_ATTR_WPAN_DEV,
+ wpan_dev_id(wpan_dev), NL802154_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (cmd == NL802154_CMD_SCAN_DONE &&
+ nla_put_u8(msg, NL802154_ATTR_SCAN_DONE_REASON, arg))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+
+ return -EMSGSIZE;
+}
+
+static int nl802154_send_scan_msg(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, u8 cmd, u8 arg)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = nl802154_prep_scan_msg(msg, rdev, wpan_dev, 0, 0, 0, cmd, arg);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ return genlmsg_multicast_netns(&nl802154_fam,
+ wpan_phy_net(&rdev->wpan_phy), msg, 0,
+ NL802154_MCGRP_SCAN, GFP_KERNEL);
+}
+
+int nl802154_scan_started(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev)
+{
+ struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(wpan_phy);
+ int err;
+
+ /* Ignore errors when there are no listeners */
+ err = nl802154_send_scan_msg(rdev, wpan_dev, NL802154_CMD_TRIGGER_SCAN, 0);
+ if (err == -ESRCH)
+ err = 0;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(nl802154_scan_started);
+
+int nl802154_scan_done(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ enum nl802154_scan_done_reasons reason)
+{
+ struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(wpan_phy);
+ int err;
+
+ /* Ignore errors when there are no listeners */
+ err = nl802154_send_scan_msg(rdev, wpan_dev, NL802154_CMD_SCAN_DONE, reason);
+ if (err == -ESRCH)
+ err = 0;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(nl802154_scan_done);
+
+static int nl802154_abort_scan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+
+ /* Resources are released in the notification helper above */
+ return rdev_abort_scan(rdev, wpan_dev);
+}
+
+static int
+nl802154_send_beacons(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct wpan_phy *wpan_phy = &rdev->wpan_phy;
+ struct cfg802154_beacon_request *request;
+ int err;
+
+ if (wpan_dev->iftype != NL802154_IFTYPE_COORD) {
+ NL_SET_ERR_MSG(info->extack, "Only coordinators can send beacons");
+ return -EOPNOTSUPP;
+ }
+
+ if (wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
+ NL_SET_ERR_MSG(info->extack, "Device is not part of any PAN");
+ return -EPERM;
+ }
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->wpan_dev = wpan_dev;
+ request->wpan_phy = wpan_phy;
+
+ /* Use maximum duration order by default */
+ if (info->attrs[NL802154_ATTR_BEACON_INTERVAL])
+ request->interval = nla_get_u8(info->attrs[NL802154_ATTR_BEACON_INTERVAL]);
+ else
+ request->interval = IEEE802154_MAX_SCAN_DURATION;
+
+ err = rdev_send_beacons(rdev, request);
+ if (err) {
+ pr_err("Failure starting sending beacons (%d)\n", err);
+ goto free_request;
+ }
+
+ return 0;
+
+free_request:
+ kfree(request);
+
+ return err;
+}
+
+void nl802154_beaconing_done(struct wpan_dev *wpan_dev)
+{
+ /* NOP */
+}
+EXPORT_SYMBOL_GPL(nl802154_beaconing_done);
+
+static int
+nl802154_stop_beacons(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+
+ /* Resources are released in the notification helper above */
+ return rdev_stop_beacons(rdev, wpan_dev);
+}
+
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
static const struct nla_policy nl802154_dev_addr_policy[NL802154_DEV_ADDR_ATTR_MAX + 1] = {
[NL802154_DEV_ADDR_ATTR_PAN_ID] = { .type = NLA_U16 },
@@ -2474,6 +2717,38 @@ static const struct genl_ops nl802154_ops[] = {
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL802154_CMD_TRIGGER_SCAN,
+ .doit = nl802154_trigger_scan,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_CHECK_NETDEV_UP |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_ABORT_SCAN,
+ .doit = nl802154_abort_scan,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_CHECK_NETDEV_UP |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SEND_BEACONS,
+ .doit = nl802154_send_beacons,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_CHECK_NETDEV_UP |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_STOP_BEACONS,
+ .doit = nl802154_stop_beacons,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_CHECK_NETDEV_UP |
+ NL802154_FLAG_NEED_RTNL,
+ },
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
{
.cmd = NL802154_CMD_SET_SEC_PARAMS,
diff --git a/net/ieee802154/nl802154.h b/net/ieee802154/nl802154.h
index 89b805500032..d69d950f9a6a 100644
--- a/net/ieee802154/nl802154.h
+++ b/net/ieee802154/nl802154.h
@@ -6,5 +6,9 @@ int nl802154_init(void);
void nl802154_exit(void);
int nl802154_scan_event(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
struct ieee802154_coord_desc *desc);
+int nl802154_scan_started(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev);
+int nl802154_scan_done(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ enum nl802154_scan_done_reasons reason);
+void nl802154_beaconing_done(struct wpan_dev *wpan_dev);
#endif /* __IEEE802154_NL802154_H */
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index 598f5af49775..5eaae15c610e 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -209,6 +209,62 @@ rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
return ret;
}
+static inline int rdev_trigger_scan(struct cfg802154_registered_device *rdev,
+ struct cfg802154_scan_request *request)
+{
+ int ret;
+
+ if (!rdev->ops->trigger_scan)
+ return -EOPNOTSUPP;
+
+ trace_802154_rdev_trigger_scan(&rdev->wpan_phy, request);
+ ret = rdev->ops->trigger_scan(&rdev->wpan_phy, request);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int rdev_abort_scan(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev)
+{
+ int ret;
+
+ if (!rdev->ops->abort_scan)
+ return -EOPNOTSUPP;
+
+ trace_802154_rdev_abort_scan(&rdev->wpan_phy, wpan_dev);
+ ret = rdev->ops->abort_scan(&rdev->wpan_phy, wpan_dev);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int rdev_send_beacons(struct cfg802154_registered_device *rdev,
+ struct cfg802154_beacon_request *request)
+{
+ int ret;
+
+ if (!rdev->ops->send_beacons)
+ return -EOPNOTSUPP;
+
+ trace_802154_rdev_send_beacons(&rdev->wpan_phy, request);
+ ret = rdev->ops->send_beacons(&rdev->wpan_phy, request);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int rdev_stop_beacons(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev)
+{
+ int ret;
+
+ if (!rdev->ops->stop_beacons)
+ return -EOPNOTSUPP;
+
+ trace_802154_rdev_stop_beacons(&rdev->wpan_phy, wpan_dev);
+ ret = rdev->ops->stop_beacons(&rdev->wpan_phy, wpan_dev);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
/* TODO this is already a nl802154, so move into ieee802154 */
static inline void
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
index 19c2e5d60e76..e5d8439b9e45 100644
--- a/net/ieee802154/trace.h
+++ b/net/ieee802154/trace.h
@@ -295,6 +295,67 @@ TRACE_EVENT(802154_rdev_set_ackreq_default,
WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq))
);
+TRACE_EVENT(802154_rdev_trigger_scan,
+ TP_PROTO(struct wpan_phy *wpan_phy,
+ struct cfg802154_scan_request *request),
+ TP_ARGS(wpan_phy, request),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ __field(u8, page)
+ __field(u32, channels)
+ __field(u8, duration)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ __entry->page = request->page;
+ __entry->channels = request->channels;
+ __entry->duration = request->duration;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", scan, page: %d, channels: %x, duration %d",
+ WPAN_PHY_PR_ARG, __entry->page, __entry->channels, __entry->duration)
+);
+
+TRACE_EVENT(802154_rdev_send_beacons,
+ TP_PROTO(struct wpan_phy *wpan_phy,
+ struct cfg802154_beacon_request *request),
+ TP_ARGS(wpan_phy, request),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ __field(u8, interval)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ __entry->interval = request->interval;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", sending beacons (interval order: %d)",
+ WPAN_PHY_PR_ARG, __entry->interval)
+);
+
+DECLARE_EVENT_CLASS(802154_wdev_template,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev),
+ TP_ARGS(wpan_phy, wpan_dev),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT,
+ WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG)
+);
+
+DEFINE_EVENT(802154_wdev_template, 802154_rdev_abort_scan,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev),
+ TP_ARGS(wpan_phy, wpan_dev)
+);
+
+DEFINE_EVENT(802154_wdev_template, 802154_rdev_stop_beacons,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev),
+ TP_ARGS(wpan_phy, wpan_dev)
+);
+
TRACE_EVENT(802154_rdev_return_int,
TP_PROTO(struct wpan_phy *wpan_phy, int ret),
TP_ARGS(wpan_phy, ret),
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index af7d2cf490fb..880277c9fd07 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_IP_MROUTE) += ipmr.o
obj-$(CONFIG_IP_MROUTE_COMMON) += ipmr_base.o
obj-$(CONFIG_NET_IPIP) += ipip.o
gre-y := gre_demux.o
+fou-y := fou_core.o fou_nl.o
obj-$(CONFIG_NET_FOU) += fou.o
obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index cf11f10927e1..8db6747f892f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -156,7 +156,6 @@ void inet_sock_destruct(struct sock *sk)
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
- sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
@@ -357,8 +356,6 @@ lookup_protocol:
inet->mc_list = NULL;
inet->rcv_tos = 0;
- sk_refcnt_debug_inc(sk);
-
if (inet->inet_num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
@@ -1486,6 +1483,7 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
if (unlikely(ip_fast_csum((u8 *)iph, 5)))
goto out;
+ NAPI_GRO_CB(skb)->proto = proto;
id = ntohl(*(__be32 *)&iph->id);
flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
id >>= 16;
@@ -1619,9 +1617,9 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
int inet_gro_complete(struct sk_buff *skb, int nhoff)
{
- __be16 newlen = htons(skb->len - nhoff);
struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
const struct net_offload *ops;
+ __be16 totlen = iph->tot_len;
int proto = iph->protocol;
int err = -ENOSYS;
@@ -1630,8 +1628,8 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
skb_set_inner_network_header(skb, nhoff);
}
- csum_replace2(&iph->check, iph->tot_len, newlen);
- iph->tot_len = newlen;
+ iph_set_totlen(iph, skb->len - nhoff);
+ csum_replace2(&iph->check, totlen, iph->tot_len);
ops = rcu_dereference(inet_offloads[proto]);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 4517d2bd186a..13fc0c185cd9 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -248,7 +248,8 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
}
static int bpf_tcp_ca_check_member(const struct btf_type *t,
- const struct btf_member *member)
+ const struct btf_member *member,
+ const struct bpf_prog *prog)
{
if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
return -ENOTSUPP;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 6cd3b6c559f0..79ae7204e8ed 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -2222,7 +2222,7 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len);
if (len_delta != 0) {
iph->ihl = 5 + (opt_len >> 2);
- iph->tot_len = htons(skb->len);
+ iph_set_totlen(iph, skb->len);
}
ip_send_check(iph);
diff --git a/net/ipv4/fou.c b/net/ipv4/fou_core.c
index 0c3c6d0cee29..cafec9b4eee0 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou_core.c
@@ -19,6 +19,8 @@
#include <uapi/linux/fou.h>
#include <uapi/linux/genetlink.h>
+#include "fou_nl.h"
+
struct fou {
struct socket *sock;
u8 protocol;
@@ -640,20 +642,6 @@ static int fou_destroy(struct net *net, struct fou_cfg *cfg)
static struct genl_family fou_nl_family;
-static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
- [FOU_ATTR_PORT] = { .type = NLA_U16, },
- [FOU_ATTR_AF] = { .type = NLA_U8, },
- [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
- [FOU_ATTR_TYPE] = { .type = NLA_U8, },
- [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
- [FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
- [FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
- [FOU_ATTR_LOCAL_V6] = { .len = sizeof(struct in6_addr), },
- [FOU_ATTR_PEER_V6] = { .len = sizeof(struct in6_addr), },
- [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
- [FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
-};
-
static int parse_nl_config(struct genl_info *info,
struct fou_cfg *cfg)
{
@@ -745,7 +733,7 @@ static int parse_nl_config(struct genl_info *info,
return 0;
}
-static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
+int fou_nl_add_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
struct fou_cfg cfg;
@@ -758,7 +746,7 @@ static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
return fou_create(net, &cfg, NULL);
}
-static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
+int fou_nl_del_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
struct fou_cfg cfg;
@@ -827,7 +815,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
+int fou_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
struct fou_net *fn = net_generic(net, fou_net_id);
@@ -874,7 +862,7 @@ out_free:
return ret;
}
-static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
+int fou_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct fou_net *fn = net_generic(net, fou_net_id);
@@ -897,33 +885,12 @@ static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
-static const struct genl_small_ops fou_nl_ops[] = {
- {
- .cmd = FOU_CMD_ADD,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = fou_nl_cmd_add_port,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = FOU_CMD_DEL,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = fou_nl_cmd_rm_port,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = FOU_CMD_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = fou_nl_cmd_get_port,
- .dumpit = fou_nl_dump,
- },
-};
-
static struct genl_family fou_nl_family __ro_after_init = {
.hdrsize = 0,
.name = FOU_GENL_NAME,
.version = FOU_GENL_VERSION,
.maxattr = FOU_ATTR_MAX,
- .policy = fou_nl_policy,
+ .policy = fou_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
.small_ops = fou_nl_ops,
diff --git a/net/ipv4/fou_nl.c b/net/ipv4/fou_nl.c
new file mode 100644
index 000000000000..6c3820f41dd5
--- /dev/null
+++ b/net/ipv4/fou_nl.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/fou.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "fou_nl.h"
+
+#include <linux/fou.h>
+
+/* Global operation policy for fou */
+const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1] = {
+ [FOU_ATTR_PORT] = { .type = NLA_U16, },
+ [FOU_ATTR_AF] = { .type = NLA_U8, },
+ [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
+ [FOU_ATTR_TYPE] = { .type = NLA_U8, },
+ [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
+ [FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
+ [FOU_ATTR_LOCAL_V6] = { .len = 16, },
+ [FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
+ [FOU_ATTR_PEER_V6] = { .len = 16, },
+ [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
+ [FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
+};
+
+/* Ops table for fou */
+const struct genl_small_ops fou_nl_ops[3] = {
+ {
+ .cmd = FOU_CMD_ADD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = fou_nl_add_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = FOU_CMD_DEL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = fou_nl_del_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = FOU_CMD_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = fou_nl_get_doit,
+ .dumpit = fou_nl_get_dumpit,
+ },
+};
diff --git a/net/ipv4/fou_nl.h b/net/ipv4/fou_nl.h
new file mode 100644
index 000000000000..b7a68121ce6f
--- /dev/null
+++ b/net/ipv4/fou_nl.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/fou.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_FOU_GEN_H
+#define _LINUX_FOU_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <linux/fou.h>
+
+/* Global operation policy for fou */
+extern const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1];
+
+/* Ops table for fou */
+extern const struct genl_small_ops fou_nl_ops[3];
+
+int fou_nl_add_doit(struct sk_buff *skb, struct genl_info *info);
+int fou_nl_del_doit(struct sk_buff *skb, struct genl_info *info);
+int fou_nl_get_doit(struct sk_buff *skb, struct genl_info *info);
+int fou_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+
+#endif /* _LINUX_FOU_GEN_H */
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 46aa2d65e40a..8cebb476b3ab 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -296,6 +296,7 @@ static bool icmpv4_global_allow(struct net *net, int type, int code)
if (icmp_global_allow())
return true;
+ __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
return false;
}
@@ -325,6 +326,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
if (peer)
inet_putpeer(peer);
out:
+ if (!rc)
+ __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
return rc;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f2c43f67187d..65ad4251f6fd 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -117,7 +117,7 @@ bool inet_rcv_saddr_any(const struct sock *sk)
return !sk->sk_rcv_saddr;
}
-void inet_get_local_port_range(struct net *net, int *low, int *high)
+void inet_get_local_port_range(const struct net *net, int *low, int *high)
{
unsigned int seq;
@@ -130,6 +130,27 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
}
EXPORT_SYMBOL(inet_get_local_port_range);
+void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
+{
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct net *net = sock_net(sk);
+ int lo, hi, sk_lo, sk_hi;
+
+ inet_get_local_port_range(net, &lo, &hi);
+
+ sk_lo = inet->local_port_range.lo;
+ sk_hi = inet->local_port_range.hi;
+
+ if (unlikely(lo <= sk_lo && sk_lo <= hi))
+ lo = sk_lo;
+ if (unlikely(lo <= sk_hi && sk_hi <= hi))
+ hi = sk_hi;
+
+ *low = lo;
+ *high = hi;
+}
+EXPORT_SYMBOL(inet_sk_get_local_port_range);
+
static bool inet_use_bhash2_on_bind(const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
@@ -316,7 +337,7 @@ inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
ports_exhausted:
attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
other_half_scan:
- inet_get_local_port_range(net, &low, &high);
+ inet_sk_get_local_port_range(sk, &low, &high);
high++; /* [32768, 60999] -> [32768, 61000[ */
if (high - low < 4)
attempt_half = 0;
@@ -1101,8 +1122,7 @@ static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
if (!icsk->icsk_ulp_ops)
return;
- if (icsk->icsk_ulp_ops->clone)
- icsk->icsk_ulp_ops->clone(req, newsk, priority);
+ icsk->icsk_ulp_ops->clone(req, newsk, priority);
}
/**
@@ -1178,8 +1198,6 @@ void inet_csk_destroy_sock(struct sock *sk)
xfrm_sk_free_policy(sk);
- sk_refcnt_debug_release(sk);
-
this_cpu_dec(*sk->sk_prot->orphan_count);
sock_put(sk);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index f58d73888638..e41fdc38ce19 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -1008,17 +1008,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
u32 index;
if (port) {
- head = &hinfo->bhash[inet_bhashfn(net, port,
- hinfo->bhash_size)];
- tb = inet_csk(sk)->icsk_bind_hash;
- spin_lock_bh(&head->lock);
- if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
- inet_ehash_nolisten(sk, NULL, NULL);
- spin_unlock_bh(&head->lock);
- return 0;
- }
- spin_unlock(&head->lock);
- /* No definite answer... Walk to established hash table */
+ local_bh_disable();
ret = check_established(death_row, sk, port, NULL);
local_bh_enable();
return ret;
@@ -1026,7 +1016,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
l3mdev = inet_sk_bound_l3mdev(sk);
- inet_get_local_port_range(net, &low, &high);
+ inet_sk_get_local_port_range(sk, &low, &high);
high++; /* [32768, 60999] -> [32768, 61000[ */
remaining = high - low;
if (likely(remaining > 1))
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index beed32fff484..40052414c7c7 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -77,9 +77,6 @@ void inet_twsk_free(struct inet_timewait_sock *tw)
{
struct module *owner = tw->tw_prot->owner;
twsk_destructor((struct sock *)tw);
-#ifdef SOCK_REFCNT_DEBUG
- pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
-#endif
kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
module_put(owner);
}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index e880ce77322a..fe9ead9ee863 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -511,7 +511,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto csum_error;
- len = ntohs(iph->tot_len);
+ len = iph_totlen(skb, iph);
if (skb->len < len) {
drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 922c87ef1ab5..4e4e308c3230 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -100,7 +100,7 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = htons(skb->len);
+ iph_set_totlen(iph, skb->len);
ip_send_check(iph);
/* if egress device is enslaved to an L3 master device pass the
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 9f92ae35bb01..b511ff0adc0a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -923,6 +923,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
case IP_CHECKSUM:
case IP_RECVFRAGSIZE:
case IP_RECVERR_RFC4884:
+ case IP_LOCAL_PORT_RANGE:
if (optlen >= sizeof(int)) {
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
@@ -1365,6 +1366,20 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
WRITE_ONCE(inet->min_ttl, val);
break;
+ case IP_LOCAL_PORT_RANGE:
+ {
+ const __u16 lo = val;
+ const __u16 hi = val >> 16;
+
+ if (optlen != sizeof(__u32))
+ goto e_inval;
+ if (lo != 0 && hi != 0 && lo > hi)
+ goto e_inval;
+
+ inet->local_port_range.lo = lo;
+ inet->local_port_range.hi = hi;
+ break;
+ }
default:
err = -ENOPROTOOPT;
break;
@@ -1743,6 +1758,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_MINTTL:
val = inet->min_ttl;
break;
+ case IP_LOCAL_PORT_RANGE:
+ val = inet->local_port_range.hi << 16 | inet->local_port_range.lo;
+ break;
default:
sockopt_release_sock(sk);
return -ENOPROTOOPT;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index aab384126f61..f71a7e9a7de6 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -259,20 +259,6 @@ config IP_NF_MANGLE
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_TARGET_CLUSTERIP
- tristate "CLUSTERIP target support"
- depends on IP_NF_MANGLE
- depends on NF_CONNTRACK
- depends on NETFILTER_ADVANCED
- select NF_CONNTRACK_MARK
- select NETFILTER_FAMILY_ARP
- help
- The CLUSTERIP target allows you to build load-balancing clusters of
- network servers without having a dedicated load-balancing
- router/server/switch.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP_NF_TARGET_ECN
tristate "ECN target support"
depends on IP_NF_MANGLE
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 93bad1184251..5a26f9de1ab9 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
# targets
-obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
deleted file mode 100644
index b3cc416ed292..000000000000
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ /dev/null
@@ -1,929 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Cluster IP hashmark target
- * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
- * based on ideas of Fabio Olive Leite <olive@unixforge.org>
- *
- * Development of this code funded by SuSE Linux AG, https://www.suse.com/
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/jhash.h>
-#include <linux/bitops.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/icmp.h>
-#include <linux/if_arp.h>
-#include <linux/seq_file.h>
-#include <linux/refcount.h>
-#include <linux/netfilter_arp.h>
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-#include <net/checksum.h>
-#include <net/ip.h>
-
-#define CLUSTERIP_VERSION "0.8"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: CLUSTERIP target");
-
-struct clusterip_config {
- struct list_head list; /* list of all configs */
- refcount_t refcount; /* reference count */
- refcount_t entries; /* number of entries/rules
- * referencing us */
-
- __be32 clusterip; /* the IP address */
- u_int8_t clustermac[ETH_ALEN]; /* the MAC address */
- int ifindex; /* device ifindex */
- u_int16_t num_total_nodes; /* total number of nodes */
- unsigned long local_nodes; /* node number array */
-
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *pde; /* proc dir entry */
-#endif
- enum clusterip_hashmode hash_mode; /* which hashing mode */
- u_int32_t hash_initval; /* hash initialization */
- struct rcu_head rcu; /* for call_rcu */
- struct net *net; /* netns for pernet list */
- char ifname[IFNAMSIZ]; /* device ifname */
-};
-
-#ifdef CONFIG_PROC_FS
-static const struct proc_ops clusterip_proc_ops;
-#endif
-
-struct clusterip_net {
- struct list_head configs;
- /* lock protects the configs list */
- spinlock_t lock;
-
- bool clusterip_deprecated_warning;
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *procdir;
- /* mutex protects the config->pde*/
- struct mutex mutex;
-#endif
- unsigned int hook_users;
-};
-
-static unsigned int clusterip_arp_mangle(void *priv, struct sk_buff *skb, const struct nf_hook_state *state);
-
-static const struct nf_hook_ops cip_arp_ops = {
- .hook = clusterip_arp_mangle,
- .pf = NFPROTO_ARP,
- .hooknum = NF_ARP_OUT,
- .priority = -1
-};
-
-static unsigned int clusterip_net_id __read_mostly;
-static inline struct clusterip_net *clusterip_pernet(struct net *net)
-{
- return net_generic(net, clusterip_net_id);
-}
-
-static inline void
-clusterip_config_get(struct clusterip_config *c)
-{
- refcount_inc(&c->refcount);
-}
-
-static void clusterip_config_rcu_free(struct rcu_head *head)
-{
- struct clusterip_config *config;
- struct net_device *dev;
-
- config = container_of(head, struct clusterip_config, rcu);
- dev = dev_get_by_name(config->net, config->ifname);
- if (dev) {
- dev_mc_del(dev, config->clustermac);
- dev_put(dev);
- }
- kfree(config);
-}
-
-static inline void
-clusterip_config_put(struct clusterip_config *c)
-{
- if (refcount_dec_and_test(&c->refcount))
- call_rcu(&c->rcu, clusterip_config_rcu_free);
-}
-
-/* decrease the count of entries using/referencing this config. If last
- * entry(rule) is removed, remove the config from lists, but don't free it
- * yet, since proc-files could still be holding references */
-static inline void
-clusterip_config_entry_put(struct clusterip_config *c)
-{
- struct clusterip_net *cn = clusterip_pernet(c->net);
-
- local_bh_disable();
- if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
- list_del_rcu(&c->list);
- spin_unlock(&cn->lock);
- local_bh_enable();
- /* In case anyone still accesses the file, the open/close
- * functions are also incrementing the refcount on their own,
- * so it's safe to remove the entry even if it's in use. */
-#ifdef CONFIG_PROC_FS
- mutex_lock(&cn->mutex);
- if (cn->procdir)
- proc_remove(c->pde);
- mutex_unlock(&cn->mutex);
-#endif
- return;
- }
- local_bh_enable();
-}
-
-static struct clusterip_config *
-__clusterip_config_find(struct net *net, __be32 clusterip)
-{
- struct clusterip_config *c;
- struct clusterip_net *cn = clusterip_pernet(net);
-
- list_for_each_entry_rcu(c, &cn->configs, list) {
- if (c->clusterip == clusterip)
- return c;
- }
-
- return NULL;
-}
-
-static inline struct clusterip_config *
-clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
-{
- struct clusterip_config *c;
-
- rcu_read_lock_bh();
- c = __clusterip_config_find(net, clusterip);
- if (c) {
-#ifdef CONFIG_PROC_FS
- if (!c->pde)
- c = NULL;
- else
-#endif
- if (unlikely(!refcount_inc_not_zero(&c->refcount)))
- c = NULL;
- else if (entry) {
- if (unlikely(!refcount_inc_not_zero(&c->entries))) {
- clusterip_config_put(c);
- c = NULL;
- }
- }
- }
- rcu_read_unlock_bh();
-
- return c;
-}
-
-static void
-clusterip_config_init_nodelist(struct clusterip_config *c,
- const struct ipt_clusterip_tgt_info *i)
-{
- int n;
-
- for (n = 0; n < i->num_local_nodes; n++)
- set_bit(i->local_nodes[n] - 1, &c->local_nodes);
-}
-
-static int
-clusterip_netdev_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct net *net = dev_net(dev);
- struct clusterip_net *cn = clusterip_pernet(net);
- struct clusterip_config *c;
-
- spin_lock_bh(&cn->lock);
- list_for_each_entry_rcu(c, &cn->configs, list) {
- switch (event) {
- case NETDEV_REGISTER:
- if (!strcmp(dev->name, c->ifname)) {
- c->ifindex = dev->ifindex;
- dev_mc_add(dev, c->clustermac);
- }
- break;
- case NETDEV_UNREGISTER:
- if (dev->ifindex == c->ifindex) {
- dev_mc_del(dev, c->clustermac);
- c->ifindex = -1;
- }
- break;
- case NETDEV_CHANGENAME:
- if (!strcmp(dev->name, c->ifname)) {
- c->ifindex = dev->ifindex;
- dev_mc_add(dev, c->clustermac);
- } else if (dev->ifindex == c->ifindex) {
- dev_mc_del(dev, c->clustermac);
- c->ifindex = -1;
- }
- break;
- }
- }
- spin_unlock_bh(&cn->lock);
-
- return NOTIFY_DONE;
-}
-
-static struct clusterip_config *
-clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
- __be32 ip, const char *iniface)
-{
- struct clusterip_net *cn = clusterip_pernet(net);
- struct clusterip_config *c;
- struct net_device *dev;
- int err;
-
- if (iniface[0] == '\0') {
- pr_info("Please specify an interface name\n");
- return ERR_PTR(-EINVAL);
- }
-
- c = kzalloc(sizeof(*c), GFP_ATOMIC);
- if (!c)
- return ERR_PTR(-ENOMEM);
-
- dev = dev_get_by_name(net, iniface);
- if (!dev) {
- pr_info("no such interface %s\n", iniface);
- kfree(c);
- return ERR_PTR(-ENOENT);
- }
- c->ifindex = dev->ifindex;
- strcpy(c->ifname, dev->name);
- memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
- dev_mc_add(dev, c->clustermac);
- dev_put(dev);
-
- c->clusterip = ip;
- c->num_total_nodes = i->num_total_nodes;
- clusterip_config_init_nodelist(c, i);
- c->hash_mode = i->hash_mode;
- c->hash_initval = i->hash_initval;
- c->net = net;
- refcount_set(&c->refcount, 1);
-
- spin_lock_bh(&cn->lock);
- if (__clusterip_config_find(net, ip)) {
- err = -EBUSY;
- goto out_config_put;
- }
-
- list_add_rcu(&c->list, &cn->configs);
- spin_unlock_bh(&cn->lock);
-
-#ifdef CONFIG_PROC_FS
- {
- char buffer[16];
-
- /* create proc dir entry */
- sprintf(buffer, "%pI4", &ip);
- mutex_lock(&cn->mutex);
- c->pde = proc_create_data(buffer, 0600,
- cn->procdir,
- &clusterip_proc_ops, c);
- mutex_unlock(&cn->mutex);
- if (!c->pde) {
- err = -ENOMEM;
- goto err;
- }
- }
-#endif
-
- refcount_set(&c->entries, 1);
- return c;
-
-#ifdef CONFIG_PROC_FS
-err:
-#endif
- spin_lock_bh(&cn->lock);
- list_del_rcu(&c->list);
-out_config_put:
- spin_unlock_bh(&cn->lock);
- clusterip_config_put(c);
- return ERR_PTR(err);
-}
-
-#ifdef CONFIG_PROC_FS
-static int
-clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum)
-{
-
- if (nodenum == 0 ||
- nodenum > c->num_total_nodes)
- return 1;
-
- /* check if we already have this number in our bitfield */
- if (test_and_set_bit(nodenum - 1, &c->local_nodes))
- return 1;
-
- return 0;
-}
-
-static bool
-clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum)
-{
- if (nodenum == 0 ||
- nodenum > c->num_total_nodes)
- return true;
-
- if (test_and_clear_bit(nodenum - 1, &c->local_nodes))
- return false;
-
- return true;
-}
-#endif
-
-static inline u_int32_t
-clusterip_hashfn(const struct sk_buff *skb,
- const struct clusterip_config *config)
-{
- const struct iphdr *iph = ip_hdr(skb);
- unsigned long hashval;
- u_int16_t sport = 0, dport = 0;
- int poff;
-
- poff = proto_ports_offset(iph->protocol);
- if (poff >= 0) {
- const u_int16_t *ports;
- u16 _ports[2];
-
- ports = skb_header_pointer(skb, iph->ihl * 4 + poff, 4, _ports);
- if (ports) {
- sport = ports[0];
- dport = ports[1];
- }
- } else {
- net_info_ratelimited("unknown protocol %u\n", iph->protocol);
- }
-
- switch (config->hash_mode) {
- case CLUSTERIP_HASHMODE_SIP:
- hashval = jhash_1word(ntohl(iph->saddr),
- config->hash_initval);
- break;
- case CLUSTERIP_HASHMODE_SIP_SPT:
- hashval = jhash_2words(ntohl(iph->saddr), sport,
- config->hash_initval);
- break;
- case CLUSTERIP_HASHMODE_SIP_SPT_DPT:
- hashval = jhash_3words(ntohl(iph->saddr), sport, dport,
- config->hash_initval);
- break;
- default:
- /* to make gcc happy */
- hashval = 0;
- /* This cannot happen, unless the check function wasn't called
- * at rule load time */
- pr_info("unknown mode %u\n", config->hash_mode);
- BUG();
- break;
- }
-
- /* node numbers are 1..n, not 0..n */
- return reciprocal_scale(hashval, config->num_total_nodes) + 1;
-}
-
-static inline int
-clusterip_responsible(const struct clusterip_config *config, u_int32_t hash)
-{
- return test_bit(hash - 1, &config->local_nodes);
-}
-
-/***********************************************************************
- * IPTABLES TARGET
- ***********************************************************************/
-
-static unsigned int
-clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
- const struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- u_int32_t hash;
-
- /* don't need to clusterip_config_get() here, since refcount
- * is only decremented by destroy() - and ip_tables guarantees
- * that the ->target() function isn't called after ->destroy() */
-
- ct = nf_ct_get(skb, &ctinfo);
- if (ct == NULL)
- return NF_DROP;
-
- /* special case: ICMP error handling. conntrack distinguishes between
- * error messages (RELATED) and information requests (see below) */
- if (ip_hdr(skb)->protocol == IPPROTO_ICMP &&
- (ctinfo == IP_CT_RELATED ||
- ctinfo == IP_CT_RELATED_REPLY))
- return XT_CONTINUE;
-
- /* nf_conntrack_proto_icmp guarantees us that we only have ICMP_ECHO,
- * TIMESTAMP, INFO_REQUEST or ICMP_ADDRESS type icmp packets from here
- * on, which all have an ID field [relevant for hashing]. */
-
- hash = clusterip_hashfn(skb, cipinfo->config);
-
- switch (ctinfo) {
- case IP_CT_NEW:
- WRITE_ONCE(ct->mark, hash);
- break;
- case IP_CT_RELATED:
- case IP_CT_RELATED_REPLY:
- /* FIXME: we don't handle expectations at the moment.
- * They can arrive on a different node than
- * the master connection (e.g. FTP passive mode) */
- case IP_CT_ESTABLISHED:
- case IP_CT_ESTABLISHED_REPLY:
- break;
- default: /* Prevent gcc warnings */
- break;
- }
-
-#ifdef DEBUG
- nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-#endif
- pr_debug("hash=%u ct_hash=%u ", hash, READ_ONCE(ct->mark));
- if (!clusterip_responsible(cipinfo->config, hash)) {
- pr_debug("not responsible\n");
- return NF_DROP;
- }
- pr_debug("responsible\n");
-
- /* despite being received via linklayer multicast, this is
- * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */
- skb->pkt_type = PACKET_HOST;
-
- return XT_CONTINUE;
-}
-
-static int clusterip_tg_check(const struct xt_tgchk_param *par)
-{
- struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
- struct clusterip_net *cn = clusterip_pernet(par->net);
- const struct ipt_entry *e = par->entryinfo;
- struct clusterip_config *config;
- int ret, i;
-
- if (par->nft_compat) {
- pr_err("cannot use CLUSTERIP target from nftables compat\n");
- return -EOPNOTSUPP;
- }
-
- if (cn->hook_users == UINT_MAX)
- return -EOVERFLOW;
-
- if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
- cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
- cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) {
- pr_info("unknown mode %u\n", cipinfo->hash_mode);
- return -EINVAL;
-
- }
- if (e->ip.dmsk.s_addr != htonl(0xffffffff) ||
- e->ip.dst.s_addr == 0) {
- pr_info("Please specify destination IP\n");
- return -EINVAL;
- }
- if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) {
- pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes);
- return -EINVAL;
- }
- for (i = 0; i < cipinfo->num_local_nodes; i++) {
- if (cipinfo->local_nodes[i] - 1 >=
- sizeof(config->local_nodes) * 8) {
- pr_info("bad local_nodes[%d] %u\n",
- i, cipinfo->local_nodes[i]);
- return -EINVAL;
- }
- }
-
- config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
- if (!config) {
- if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) {
- pr_info("no config found for %pI4, need 'new'\n",
- &e->ip.dst.s_addr);
- return -EINVAL;
- } else {
- config = clusterip_config_init(par->net, cipinfo,
- e->ip.dst.s_addr,
- e->ip.iniface);
- if (IS_ERR(config))
- return PTR_ERR(config);
- }
- } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN)) {
- clusterip_config_entry_put(config);
- clusterip_config_put(config);
- return -EINVAL;
- }
-
- ret = nf_ct_netns_get(par->net, par->family);
- if (ret < 0) {
- pr_info("cannot load conntrack support for proto=%u\n",
- par->family);
- clusterip_config_entry_put(config);
- clusterip_config_put(config);
- return ret;
- }
-
- if (cn->hook_users == 0) {
- ret = nf_register_net_hook(par->net, &cip_arp_ops);
-
- if (ret < 0) {
- clusterip_config_entry_put(config);
- clusterip_config_put(config);
- nf_ct_netns_put(par->net, par->family);
- return ret;
- }
- }
-
- cn->hook_users++;
-
- if (!cn->clusterip_deprecated_warning) {
- pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
- "use xt_cluster instead\n");
- cn->clusterip_deprecated_warning = true;
- }
-
- cipinfo->config = config;
- return ret;
-}
-
-/* drop reference count of cluster config when rule is deleted */
-static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
-{
- const struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
- struct clusterip_net *cn = clusterip_pernet(par->net);
-
- /* if no more entries are referencing the config, remove it
- * from the list and destroy the proc entry */
- clusterip_config_entry_put(cipinfo->config);
-
- clusterip_config_put(cipinfo->config);
-
- nf_ct_netns_put(par->net, par->family);
- cn->hook_users--;
-
- if (cn->hook_users == 0)
- nf_unregister_net_hook(par->net, &cip_arp_ops);
-}
-
-#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
-struct compat_ipt_clusterip_tgt_info
-{
- u_int32_t flags;
- u_int8_t clustermac[6];
- u_int16_t num_total_nodes;
- u_int16_t num_local_nodes;
- u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
- u_int32_t hash_mode;
- u_int32_t hash_initval;
- compat_uptr_t config;
-};
-#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
-
-static struct xt_target clusterip_tg_reg __read_mostly = {
- .name = "CLUSTERIP",
- .family = NFPROTO_IPV4,
- .target = clusterip_tg,
- .checkentry = clusterip_tg_check,
- .destroy = clusterip_tg_destroy,
- .targetsize = sizeof(struct ipt_clusterip_tgt_info),
- .usersize = offsetof(struct ipt_clusterip_tgt_info, config),
-#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
- .compatsize = sizeof(struct compat_ipt_clusterip_tgt_info),
-#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
- .me = THIS_MODULE
-};
-
-
-/***********************************************************************
- * ARP MANGLING CODE
- ***********************************************************************/
-
-/* hardcoded for 48bit ethernet and 32bit ipv4 addresses */
-struct arp_payload {
- u_int8_t src_hw[ETH_ALEN];
- __be32 src_ip;
- u_int8_t dst_hw[ETH_ALEN];
- __be32 dst_ip;
-} __packed;
-
-#ifdef DEBUG
-static void arp_print(struct arp_payload *payload)
-{
-#define HBUFFERLEN 30
- char hbuffer[HBUFFERLEN];
- int j, k;
-
- for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < ETH_ALEN; j++) {
- hbuffer[k++] = hex_asc_hi(payload->src_hw[j]);
- hbuffer[k++] = hex_asc_lo(payload->src_hw[j]);
- hbuffer[k++] = ':';
- }
- hbuffer[--k] = '\0';
-
- pr_debug("src %pI4@%s, dst %pI4\n",
- &payload->src_ip, hbuffer, &payload->dst_ip);
-}
-#endif
-
-static unsigned int
-clusterip_arp_mangle(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct arphdr *arp = arp_hdr(skb);
- struct arp_payload *payload;
- struct clusterip_config *c;
- struct net *net = state->net;
-
- /* we don't care about non-ethernet and non-ipv4 ARP */
- if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
- arp->ar_pro != htons(ETH_P_IP) ||
- arp->ar_pln != 4 || arp->ar_hln != ETH_ALEN)
- return NF_ACCEPT;
-
- /* we only want to mangle arp requests and replies */
- if (arp->ar_op != htons(ARPOP_REPLY) &&
- arp->ar_op != htons(ARPOP_REQUEST))
- return NF_ACCEPT;
-
- payload = (void *)(arp+1);
-
- /* if there is no clusterip configuration for the arp reply's
- * source ip, we don't want to mangle it */
- c = clusterip_config_find_get(net, payload->src_ip, 0);
- if (!c)
- return NF_ACCEPT;
-
- /* normally the linux kernel always replies to arp queries of
- * addresses on different interfacs. However, in the CLUSTERIP case
- * this wouldn't work, since we didn't subscribe the mcast group on
- * other interfaces */
- if (c->ifindex != state->out->ifindex) {
- pr_debug("not mangling arp reply on different interface: cip'%d'-skb'%d'\n",
- c->ifindex, state->out->ifindex);
- clusterip_config_put(c);
- return NF_ACCEPT;
- }
-
- /* mangle reply hardware address */
- memcpy(payload->src_hw, c->clustermac, arp->ar_hln);
-
-#ifdef DEBUG
- pr_debug("mangled arp reply: ");
- arp_print(payload);
-#endif
-
- clusterip_config_put(c);
-
- return NF_ACCEPT;
-}
-
-/***********************************************************************
- * PROC DIR HANDLING
- ***********************************************************************/
-
-#ifdef CONFIG_PROC_FS
-
-struct clusterip_seq_position {
- unsigned int pos; /* position */
- unsigned int weight; /* number of bits set == size */
- unsigned int bit; /* current bit */
- unsigned long val; /* current value */
-};
-
-static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct clusterip_config *c = s->private;
- unsigned int weight;
- u_int32_t local_nodes;
- struct clusterip_seq_position *idx;
-
- /* FIXME: possible race */
- local_nodes = c->local_nodes;
- weight = hweight32(local_nodes);
- if (*pos >= weight)
- return NULL;
-
- idx = kmalloc(sizeof(struct clusterip_seq_position), GFP_KERNEL);
- if (!idx)
- return ERR_PTR(-ENOMEM);
-
- idx->pos = *pos;
- idx->weight = weight;
- idx->bit = ffs(local_nodes);
- idx->val = local_nodes;
- clear_bit(idx->bit - 1, &idx->val);
-
- return idx;
-}
-
-static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct clusterip_seq_position *idx = v;
-
- *pos = ++idx->pos;
- if (*pos >= idx->weight) {
- kfree(v);
- return NULL;
- }
- idx->bit = ffs(idx->val);
- clear_bit(idx->bit - 1, &idx->val);
- return idx;
-}
-
-static void clusterip_seq_stop(struct seq_file *s, void *v)
-{
- if (!IS_ERR(v))
- kfree(v);
-}
-
-static int clusterip_seq_show(struct seq_file *s, void *v)
-{
- struct clusterip_seq_position *idx = v;
-
- if (idx->pos != 0)
- seq_putc(s, ',');
-
- seq_printf(s, "%u", idx->bit);
-
- if (idx->pos == idx->weight - 1)
- seq_putc(s, '\n');
-
- return 0;
-}
-
-static const struct seq_operations clusterip_seq_ops = {
- .start = clusterip_seq_start,
- .next = clusterip_seq_next,
- .stop = clusterip_seq_stop,
- .show = clusterip_seq_show,
-};
-
-static int clusterip_proc_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open(file, &clusterip_seq_ops);
-
- if (!ret) {
- struct seq_file *sf = file->private_data;
- struct clusterip_config *c = pde_data(inode);
-
- sf->private = c;
-
- clusterip_config_get(c);
- }
-
- return ret;
-}
-
-static int clusterip_proc_release(struct inode *inode, struct file *file)
-{
- struct clusterip_config *c = pde_data(inode);
- int ret;
-
- ret = seq_release(inode, file);
-
- if (!ret)
- clusterip_config_put(c);
-
- return ret;
-}
-
-static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
- size_t size, loff_t *ofs)
-{
- struct clusterip_config *c = pde_data(file_inode(file));
-#define PROC_WRITELEN 10
- char buffer[PROC_WRITELEN+1];
- unsigned long nodenum;
- int rc;
-
- if (size > PROC_WRITELEN)
- return -EIO;
- if (copy_from_user(buffer, input, size))
- return -EFAULT;
- buffer[size] = 0;
-
- if (*buffer == '+') {
- rc = kstrtoul(buffer+1, 10, &nodenum);
- if (rc)
- return rc;
- if (clusterip_add_node(c, nodenum))
- return -ENOMEM;
- } else if (*buffer == '-') {
- rc = kstrtoul(buffer+1, 10, &nodenum);
- if (rc)
- return rc;
- if (clusterip_del_node(c, nodenum))
- return -ENOENT;
- } else
- return -EIO;
-
- return size;
-}
-
-static const struct proc_ops clusterip_proc_ops = {
- .proc_open = clusterip_proc_open,
- .proc_read = seq_read,
- .proc_write = clusterip_proc_write,
- .proc_lseek = seq_lseek,
- .proc_release = clusterip_proc_release,
-};
-
-#endif /* CONFIG_PROC_FS */
-
-static int clusterip_net_init(struct net *net)
-{
- struct clusterip_net *cn = clusterip_pernet(net);
-
- INIT_LIST_HEAD(&cn->configs);
-
- spin_lock_init(&cn->lock);
-
-#ifdef CONFIG_PROC_FS
- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
- if (!cn->procdir) {
- pr_err("Unable to proc dir entry\n");
- return -ENOMEM;
- }
- mutex_init(&cn->mutex);
-#endif /* CONFIG_PROC_FS */
-
- return 0;
-}
-
-static void clusterip_net_exit(struct net *net)
-{
-#ifdef CONFIG_PROC_FS
- struct clusterip_net *cn = clusterip_pernet(net);
-
- mutex_lock(&cn->mutex);
- proc_remove(cn->procdir);
- cn->procdir = NULL;
- mutex_unlock(&cn->mutex);
-#endif
-}
-
-static struct pernet_operations clusterip_net_ops = {
- .init = clusterip_net_init,
- .exit = clusterip_net_exit,
- .id = &clusterip_net_id,
- .size = sizeof(struct clusterip_net),
-};
-
-static struct notifier_block cip_netdev_notifier = {
- .notifier_call = clusterip_netdev_event
-};
-
-static int __init clusterip_tg_init(void)
-{
- int ret;
-
- ret = register_pernet_subsys(&clusterip_net_ops);
- if (ret < 0)
- return ret;
-
- ret = xt_register_target(&clusterip_tg_reg);
- if (ret < 0)
- goto cleanup_subsys;
-
- ret = register_netdevice_notifier(&cip_netdev_notifier);
- if (ret < 0)
- goto unregister_target;
-
- pr_info("ClusterIP Version %s loaded successfully\n",
- CLUSTERIP_VERSION);
-
- return 0;
-
-unregister_target:
- xt_unregister_target(&clusterip_tg_reg);
-cleanup_subsys:
- unregister_pernet_subsys(&clusterip_net_ops);
- return ret;
-}
-
-static void __exit clusterip_tg_exit(void)
-{
- pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
-
- unregister_netdevice_notifier(&cip_netdev_notifier);
- xt_unregister_target(&clusterip_tg_reg);
- unregister_pernet_subsys(&clusterip_net_ops);
-
- /* Wait for completion of call_rcu()'s (clusterip_config_rcu_free) */
- rcu_barrier();
-}
-
-module_init(clusterip_tg_init);
-module_exit(clusterip_tg_exit);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index d640adcaf1b1..f33aeab9424f 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -280,6 +280,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
goto free_nskb;
nf_ct_attach(nskb, oldskb);
+ nf_ct_set_closing(skb_nfct(oldskb));
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* If we use ip_local_out for bridged traffic, the MAC source on
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f88daace9de3..eaf1d3113b62 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -353,7 +353,7 @@ static void icmp_put(struct seq_file *seq)
seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors");
for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " In%s", icmpmibmap[i].name);
- seq_puts(seq, " OutMsgs OutErrors");
+ seq_puts(seq, " OutMsgs OutErrors OutRateLimitGlobal OutRateLimitHost");
for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " Out%s", icmpmibmap[i].name);
seq_printf(seq, "\nIcmp: %lu %lu %lu",
@@ -363,9 +363,11 @@ static void icmp_put(struct seq_file *seq)
for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + icmpmibmap[i].index));
- seq_printf(seq, " %lu %lu",
+ seq_printf(seq, " %lu %lu %lu %lu",
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
- snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
+ snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS),
+ snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_RATELIMITGLOBAL),
+ snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_RATELIMITHOST));
for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 006c1f0ed8b4..94df935ee0c5 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -93,7 +93,7 @@ int raw_hash_sk(struct sock *sk)
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
struct hlist_nulls_head *hlist;
- hlist = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
+ hlist = &h->ht[raw_hashfunc(sock_net(sk), inet_sk(sk)->inet_num)];
spin_lock(&h->lock);
__sk_nulls_add_node_rcu(sk, hlist);
@@ -160,9 +160,9 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
* RFC 1122: SHOULD pass TOS value up to the transport layer.
* -> It does. And not only TOS, but all IP header.
*/
-static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
+static int raw_v4_input(struct net *net, struct sk_buff *skb,
+ const struct iphdr *iph, int hash)
{
- struct net *net = dev_net(skb->dev);
struct hlist_nulls_head *hlist;
struct hlist_nulls_node *hnode;
int sdif = inet_sdif(skb);
@@ -193,9 +193,10 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
int raw_local_deliver(struct sk_buff *skb, int protocol)
{
- int hash = protocol & (RAW_HTABLE_SIZE - 1);
+ struct net *net = dev_net(skb->dev);
- return raw_v4_input(skb, ip_hdr(skb), hash);
+ return raw_v4_input(net, skb, ip_hdr(skb),
+ raw_hashfunc(net, protocol));
}
static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
@@ -271,7 +272,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
struct sock *sk;
int hash;
- hash = protocol & (RAW_HTABLE_SIZE - 1);
+ hash = raw_hashfunc(net, protocol);
hlist = &raw_v4_hashinfo.ht[hash];
rcu_read_lock();
@@ -287,11 +288,13 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
+ enum skb_drop_reason reason;
+
/* Charge it to the socket. */
ipv4_pktinfo_prepare(sk, skb);
- if (sock_queue_rcv_skb(sk, skb) < 0) {
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
+ kfree_skb_reason(skb, reason);
return NET_RX_DROP;
}
@@ -302,7 +305,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY);
return NET_RX_DROP;
}
nf_reset_ct(skb);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index d2c470524e58..146792cd26fe 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -295,7 +295,7 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
}
/* override sysctl_tcp_min_tso_segs */
-static u32 bbr_min_tso_segs(struct sock *sk)
+__bpf_kfunc static u32 bbr_min_tso_segs(struct sock *sk)
{
return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
}
@@ -328,7 +328,7 @@ static void bbr_save_cwnd(struct sock *sk)
bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp));
}
-static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+__bpf_kfunc static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
@@ -1023,7 +1023,7 @@ static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
bbr_update_gains(sk);
}
-static void bbr_main(struct sock *sk, const struct rate_sample *rs)
+__bpf_kfunc static void bbr_main(struct sock *sk, const struct rate_sample *rs)
{
struct bbr *bbr = inet_csk_ca(sk);
u32 bw;
@@ -1035,7 +1035,7 @@ static void bbr_main(struct sock *sk, const struct rate_sample *rs)
bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
}
-static void bbr_init(struct sock *sk)
+__bpf_kfunc static void bbr_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
@@ -1077,7 +1077,7 @@ static void bbr_init(struct sock *sk)
cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
}
-static u32 bbr_sndbuf_expand(struct sock *sk)
+__bpf_kfunc static u32 bbr_sndbuf_expand(struct sock *sk)
{
/* Provision 3 * cwnd since BBR may slow-start even during recovery. */
return 3;
@@ -1086,7 +1086,7 @@ static u32 bbr_sndbuf_expand(struct sock *sk)
/* In theory BBR does not need to undo the cwnd since it does not
* always reduce cwnd on losses (see bbr_main()). Keep it for now.
*/
-static u32 bbr_undo_cwnd(struct sock *sk)
+__bpf_kfunc static u32 bbr_undo_cwnd(struct sock *sk)
{
struct bbr *bbr = inet_csk_ca(sk);
@@ -1097,7 +1097,7 @@ static u32 bbr_undo_cwnd(struct sock *sk)
}
/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
-static u32 bbr_ssthresh(struct sock *sk)
+__bpf_kfunc static u32 bbr_ssthresh(struct sock *sk)
{
bbr_save_cwnd(sk);
return tcp_sk(sk)->snd_ssthresh;
@@ -1125,7 +1125,7 @@ static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
return 0;
}
-static void bbr_set_state(struct sock *sk, u8 new_state)
+__bpf_kfunc static void bbr_set_state(struct sock *sk, u8 new_state)
{
struct bbr *bbr = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index d3cae40749e8..db8b4b488c31 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -403,7 +403,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
* ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
* returns the leftover acks to adjust cwnd in congestion avoidance mode.
*/
-u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
+__bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
@@ -417,7 +417,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
* for every packet that was ACKed.
*/
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
+__bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
{
/* If credits accumulated at a higher w, apply them gently now. */
if (tp->snd_cwnd_cnt >= w) {
@@ -443,7 +443,7 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
/* This is Jacobson's slow start and congestion avoidance.
* SIGCOMM '88, p. 328.
*/
-void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+__bpf_kfunc void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -462,7 +462,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
/* Slow start threshold is half the congestion window (min 2) */
-u32 tcp_reno_ssthresh(struct sock *sk)
+__bpf_kfunc u32 tcp_reno_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -470,7 +470,7 @@ u32 tcp_reno_ssthresh(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
-u32 tcp_reno_undo_cwnd(struct sock *sk)
+__bpf_kfunc u32 tcp_reno_undo_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 768c10c1f649..0fd78ecb67e7 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -126,7 +126,7 @@ static inline void bictcp_hystart_reset(struct sock *sk)
ca->sample_cnt = 0;
}
-static void cubictcp_init(struct sock *sk)
+__bpf_kfunc static void cubictcp_init(struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
@@ -139,7 +139,7 @@ static void cubictcp_init(struct sock *sk)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
-static void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+__bpf_kfunc static void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
struct bictcp *ca = inet_csk_ca(sk);
@@ -321,7 +321,7 @@ tcp_friendliness:
ca->cnt = max(ca->cnt, 2U);
}
-static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+__bpf_kfunc static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
@@ -338,7 +338,7 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
-static u32 cubictcp_recalc_ssthresh(struct sock *sk)
+__bpf_kfunc static u32 cubictcp_recalc_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
@@ -355,7 +355,7 @@ static u32 cubictcp_recalc_ssthresh(struct sock *sk)
return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
}
-static void cubictcp_state(struct sock *sk, u8 new_state)
+__bpf_kfunc static void cubictcp_state(struct sock *sk, u8 new_state)
{
if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
@@ -445,7 +445,7 @@ static void hystart_update(struct sock *sk, u32 delay)
}
}
-static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
+__bpf_kfunc static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index e0a2ca7456ff..bb23bb5b387a 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -75,7 +75,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
ca->old_delivered_ce = tp->delivered_ce;
}
-static void dctcp_init(struct sock *sk)
+__bpf_kfunc static void dctcp_init(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -104,7 +104,7 @@ static void dctcp_init(struct sock *sk)
INET_ECN_dontxmit(sk);
}
-static u32 dctcp_ssthresh(struct sock *sk)
+__bpf_kfunc static u32 dctcp_ssthresh(struct sock *sk)
{
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -113,7 +113,7 @@ static u32 dctcp_ssthresh(struct sock *sk)
return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * ca->dctcp_alpha) >> 11U), 2U);
}
-static void dctcp_update_alpha(struct sock *sk, u32 flags)
+__bpf_kfunc static void dctcp_update_alpha(struct sock *sk, u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct dctcp *ca = inet_csk_ca(sk);
@@ -169,7 +169,7 @@ static void dctcp_react_to_loss(struct sock *sk)
tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U);
}
-static void dctcp_state(struct sock *sk, u8 new_state)
+__bpf_kfunc static void dctcp_state(struct sock *sk, u8 new_state)
{
if (new_state == TCP_CA_Recovery &&
new_state != inet_csk(sk)->icsk_ca_state)
@@ -179,7 +179,7 @@ static void dctcp_state(struct sock *sk, u8 new_state)
*/
}
-static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+__bpf_kfunc static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
{
struct dctcp *ca = inet_csk_ca(sk);
@@ -229,7 +229,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
return 0;
}
-static u32 dctcp_cwnd_undo(struct sock *sk)
+__bpf_kfunc static u32 dctcp_cwnd_undo(struct sock *sk)
{
const struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8320d0ecb13a..ea370afa70ed 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2102,6 +2102,7 @@ process:
/* min_ttl can be changed concurrently from do_ip_setsockopt() */
if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
+ drop_reason = SKB_DROP_REASON_TCP_MINTTL;
goto discard_and_relse;
}
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9592fe3e444a..c605d171eb2d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -248,7 +248,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
int low, high, remaining;
unsigned int rand;
- inet_get_local_port_range(net, &low, &high);
+ inet_sk_get_local_port_range(sk, &low, &high);
remaining = (high - low) + 1;
rand = get_random_u32();
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 847934763868..38689bedfce7 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -239,16 +239,6 @@ lookup_protocol:
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;
- /*
- * Increment only the relevant sk_prot->socks debug field, this changes
- * the previous behaviour of incrementing both the equivalent to
- * answer->prot->socks (inet6_sock_nr) and inet_sock_nr.
- *
- * This allows better debug granularity as we'll know exactly how many
- * UDPv6, TCPv6, etc socks were allocated, not the sum of all IPv6
- * transport protocol socks. -acme
- */
- sk_refcnt_debug_inc(sk);
if (inet->inet_num) {
/* It assumes that any protocol which allows
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 9d92d51c4757..1f53f2a74480 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -183,6 +183,7 @@ static bool icmpv6_global_allow(struct net *net, int type)
if (icmp_global_allow())
return true;
+ __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
return false;
}
@@ -224,6 +225,9 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
if (peer)
inet_putpeer(peer);
}
+ if (!res)
+ __ICMP6_INC_STATS(net, ip6_dst_idev(dst),
+ ICMP6_MIB_RATELIMITHOST);
dst_release(dst);
return res;
}
@@ -328,7 +332,6 @@ static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt
{
struct ipv6hdr *iph = ipv6_hdr(skb);
struct ipv6_destopt_hao *hao;
- struct in6_addr tmp;
int off;
if (opt->dsthao) {
@@ -336,9 +339,7 @@ static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt
if (likely(off >= 0)) {
hao = (struct ipv6_destopt_hao *)
(skb_network_header(skb) + off);
- tmp = iph->saddr;
- iph->saddr = hao->addr;
- hao->addr = tmp;
+ swap(iph->saddr, hao->addr);
}
}
}
@@ -704,7 +705,7 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
}
EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach);
-static void icmpv6_echo_reply(struct sk_buff *skb)
+static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
@@ -718,18 +719,19 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
struct dst_entry *dst;
struct ipcm6_cookie ipc6;
u32 mark = IP6_REPLY_MARK(net, skb->mark);
+ SKB_DR(reason);
bool acast;
u8 type;
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
net->ipv6.sysctl.icmpv6_echo_ignore_multicast)
- return;
+ return reason;
saddr = &ipv6_hdr(skb)->daddr;
acast = ipv6_anycast_destination(skb_dst(skb), saddr);
if (acast && net->ipv6.sysctl.icmpv6_echo_ignore_anycast)
- return;
+ return reason;
if (!ipv6_unicast_destination(skb) &&
!(net->ipv6.sysctl.anycast_src_echo_reply && acast))
@@ -803,6 +805,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
} else {
icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
skb->len + sizeof(struct icmp6hdr));
+ reason = SKB_CONSUMED;
}
out_dst_release:
dst_release(dst);
@@ -810,18 +813,22 @@ out:
icmpv6_xmit_unlock(sk);
out_bh_enable:
local_bh_enable();
+ return reason;
}
-void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
+enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
+ u8 code, __be32 info)
{
struct inet6_skb_parm *opt = IP6CB(skb);
+ struct net *net = dev_net(skb->dev);
const struct inet6_protocol *ipprot;
+ enum skb_drop_reason reason;
int inner_offset;
__be16 frag_off;
u8 nexthdr;
- struct net *net = dev_net(skb->dev);
- if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ reason = pskb_may_pull_reason(skb, sizeof(struct ipv6hdr));
+ if (reason != SKB_NOT_DROPPED_YET)
goto out;
seg6_icmp_srh(skb, opt);
@@ -831,14 +838,17 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
/* now skip over extension headers */
inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
&nexthdr, &frag_off);
- if (inner_offset < 0)
+ if (inner_offset < 0) {
+ SKB_DR_SET(reason, IPV6_BAD_EXTHDR);
goto out;
+ }
} else {
inner_offset = sizeof(struct ipv6hdr);
}
/* Checkin header including 8 bytes of inner protocol header. */
- if (!pskb_may_pull(skb, inner_offset+8))
+ reason = pskb_may_pull_reason(skb, inner_offset + 8);
+ if (reason != SKB_NOT_DROPPED_YET)
goto out;
/* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
@@ -853,10 +863,11 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
ipprot->err_handler(skb, opt, type, code, inner_offset, info);
raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
- return;
+ return SKB_CONSUMED;
out:
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+ return reason;
}
/*
@@ -921,12 +932,12 @@ static int icmpv6_rcv(struct sk_buff *skb)
switch (type) {
case ICMPV6_ECHO_REQUEST:
if (!net->ipv6.sysctl.icmpv6_echo_ignore_all)
- icmpv6_echo_reply(skb);
+ reason = icmpv6_echo_reply(skb);
break;
case ICMPV6_EXT_ECHO_REQUEST:
if (!net->ipv6.sysctl.icmpv6_echo_ignore_all &&
READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe))
- icmpv6_echo_reply(skb);
+ reason = icmpv6_echo_reply(skb);
break;
case ICMPV6_ECHO_REPLY:
@@ -952,7 +963,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
case ICMPV6_DEST_UNREACH:
case ICMPV6_TIME_EXCEED:
case ICMPV6_PARAMPROB:
- icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
+ reason = icmpv6_notify(skb, type, hdr->icmp6_code,
+ hdr->icmp6_mtu);
break;
case NDISC_ROUTER_SOLICITATION:
@@ -960,7 +972,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
case NDISC_NEIGHBOUR_SOLICITATION:
case NDISC_NEIGHBOUR_ADVERTISEMENT:
case NDISC_REDIRECT:
- ndisc_rcv(skb);
+ reason = ndisc_rcv(skb);
break;
case ICMPV6_MGM_QUERY:
@@ -994,7 +1006,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
* must pass to upper level
*/
- icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
+ reason = icmpv6_notify(skb, type, hdr->icmp6_code,
+ hdr->icmp6_mtu);
}
/* until the v6 path can be better sorted assume failure and
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 9ce51680290b..2917dd8d198c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -464,13 +464,6 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
__ipv6_sock_mc_close(sk);
__ipv6_sock_ac_close(sk);
- /*
- * Sock is moving from IPv6 to IPv4 (sk_prot), so
- * remove it from the refcnt debug socks count in the
- * original family...
- */
- sk_refcnt_debug_dec(sk);
-
if (sk->sk_protocol == IPPROTO_TCP) {
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -507,11 +500,6 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
inet6_cleanup_sock(sk);
- /*
- * ... and add it to the refcnt debug socks count
- * in the new family. -acme
- */
- sk_refcnt_debug_inc(sk);
module_put(THIS_MODULE);
retv = 0;
break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3a553494ff16..c4be62c99f73 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -783,7 +783,7 @@ void ndisc_update(const struct net_device *dev, struct neighbour *neigh,
ndisc_ops_update(dev, neigh, flags, icmp6_type, ndopts);
}
-static void ndisc_recv_ns(struct sk_buff *skb)
+static enum skb_drop_reason ndisc_recv_ns(struct sk_buff *skb)
{
struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
@@ -797,18 +797,17 @@ static void ndisc_recv_ns(struct sk_buff *skb)
struct inet6_dev *idev = NULL;
struct neighbour *neigh;
int dad = ipv6_addr_any(saddr);
- bool inc;
int is_router = -1;
+ SKB_DR(reason);
u64 nonce = 0;
+ bool inc;
- if (skb->len < sizeof(struct nd_msg)) {
- ND_PRINTK(2, warn, "NS: packet too short\n");
- return;
- }
+ if (skb->len < sizeof(struct nd_msg))
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
if (ipv6_addr_is_multicast(&msg->target)) {
ND_PRINTK(2, warn, "NS: multicast target address\n");
- return;
+ return reason;
}
/*
@@ -817,20 +816,18 @@ static void ndisc_recv_ns(struct sk_buff *skb)
*/
if (dad && !ipv6_addr_is_solict_mult(daddr)) {
ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n");
- return;
+ return reason;
}
- if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts)) {
- ND_PRINTK(2, warn, "NS: invalid ND options\n");
- return;
- }
+ if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts))
+ return SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS;
if (ndopts.nd_opts_src_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev);
if (!lladdr) {
ND_PRINTK(2, warn,
"NS: invalid link-layer address length\n");
- return;
+ return reason;
}
/* RFC2461 7.1.1:
@@ -841,7 +838,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
if (dad) {
ND_PRINTK(2, warn,
"NS: bad DAD packet (link-layer address option)\n");
- return;
+ return reason;
}
}
if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1)
@@ -869,7 +866,7 @@ have_ifp:
* so fail our DAD process
*/
addrconf_dad_failure(skb, ifp);
- return;
+ return reason;
} else {
/*
* This is not a dad solicitation.
@@ -901,7 +898,7 @@ have_ifp:
idev = in6_dev_get(dev);
if (!idev) {
/* XXX: count this drop? */
- return;
+ return reason;
}
if (ipv6_chk_acast_addr(net, dev, &msg->target) ||
@@ -924,8 +921,10 @@ have_ifp:
pneigh_enqueue(&nd_tbl, idev->nd_parms, n);
goto out;
}
- } else
+ } else {
+ SKB_DR_SET(reason, IPV6_NDISC_NS_OTHERHOST);
goto out;
+ }
}
if (is_router < 0)
@@ -958,6 +957,7 @@ have_ifp:
true, (ifp != NULL && inc), inc);
if (neigh)
neigh_release(neigh);
+ reason = SKB_CONSUMED;
}
out:
@@ -965,6 +965,7 @@ out:
in6_ifa_put(ifp);
else
in6_dev_put(idev);
+ return reason;
}
static int accept_untracked_na(struct net_device *dev, struct in6_addr *saddr)
@@ -986,7 +987,7 @@ static int accept_untracked_na(struct net_device *dev, struct in6_addr *saddr)
}
}
-static void ndisc_recv_na(struct sk_buff *skb)
+static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
{
struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
@@ -999,22 +1000,21 @@ static void ndisc_recv_na(struct sk_buff *skb)
struct inet6_dev *idev = __in6_dev_get(dev);
struct inet6_ifaddr *ifp;
struct neighbour *neigh;
+ SKB_DR(reason);
u8 new_state;
- if (skb->len < sizeof(struct nd_msg)) {
- ND_PRINTK(2, warn, "NA: packet too short\n");
- return;
- }
+ if (skb->len < sizeof(struct nd_msg))
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
if (ipv6_addr_is_multicast(&msg->target)) {
ND_PRINTK(2, warn, "NA: target address is multicast\n");
- return;
+ return reason;
}
if (ipv6_addr_is_multicast(daddr) &&
msg->icmph.icmp6_solicited) {
ND_PRINTK(2, warn, "NA: solicited NA is multicasted\n");
- return;
+ return reason;
}
/* For some 802.11 wireless deployments (and possibly other networks),
@@ -1024,18 +1024,17 @@ static void ndisc_recv_na(struct sk_buff *skb)
*/
if (!msg->icmph.icmp6_solicited && idev &&
idev->cnf.drop_unsolicited_na)
- return;
+ return reason;
+
+ if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts))
+ return SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS;
- if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts)) {
- ND_PRINTK(2, warn, "NS: invalid ND option\n");
- return;
- }
if (ndopts.nd_opts_tgt_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev);
if (!lladdr) {
ND_PRINTK(2, warn,
"NA: invalid link-layer address length\n");
- return;
+ return reason;
}
}
ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1);
@@ -1043,7 +1042,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
if (skb->pkt_type != PACKET_LOOPBACK
&& (ifp->flags & IFA_F_TENTATIVE)) {
addrconf_dad_failure(skb, ifp);
- return;
+ return reason;
}
/* What should we make now? The advertisement
is invalid, but ndisc specs say nothing
@@ -1059,7 +1058,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
"NA: %pM advertised our address %pI6c on %s!\n",
eth_hdr(skb)->h_source, &ifp->addr, ifp->idev->dev->name);
in6_ifa_put(ifp);
- return;
+ return reason;
}
neigh = neigh_lookup(&nd_tbl, &msg->target, dev);
@@ -1120,13 +1119,14 @@ static void ndisc_recv_na(struct sk_buff *skb)
*/
rt6_clean_tohost(dev_net(dev), saddr);
}
-
+ reason = SKB_CONSUMED;
out:
neigh_release(neigh);
}
+ return reason;
}
-static void ndisc_recv_rs(struct sk_buff *skb)
+static enum skb_drop_reason ndisc_recv_rs(struct sk_buff *skb)
{
struct rs_msg *rs_msg = (struct rs_msg *)skb_transport_header(skb);
unsigned long ndoptlen = skb->len - sizeof(*rs_msg);
@@ -1135,14 +1135,15 @@ static void ndisc_recv_rs(struct sk_buff *skb)
const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
struct ndisc_options ndopts;
u8 *lladdr = NULL;
+ SKB_DR(reason);
if (skb->len < sizeof(*rs_msg))
- return;
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
idev = __in6_dev_get(skb->dev);
if (!idev) {
ND_PRINTK(1, err, "RS: can't find in6 device\n");
- return;
+ return reason;
}
/* Don't accept RS if we're not in router mode */
@@ -1157,10 +1158,8 @@ static void ndisc_recv_rs(struct sk_buff *skb)
goto out;
/* Parse ND options */
- if (!ndisc_parse_options(skb->dev, rs_msg->opt, ndoptlen, &ndopts)) {
- ND_PRINTK(2, notice, "NS: invalid ND option, ignored\n");
- goto out;
- }
+ if (!ndisc_parse_options(skb->dev, rs_msg->opt, ndoptlen, &ndopts))
+ return SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS;
if (ndopts.nd_opts_src_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr,
@@ -1177,9 +1176,10 @@ static void ndisc_recv_rs(struct sk_buff *skb)
NEIGH_UPDATE_F_OVERRIDE_ISROUTER,
NDISC_ROUTER_SOLICITATION, &ndopts);
neigh_release(neigh);
+ reason = SKB_CONSUMED;
}
out:
- return;
+ return reason;
}
static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
@@ -1228,20 +1228,21 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err);
}
-static void ndisc_router_discovery(struct sk_buff *skb)
+static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
{
struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
+ bool send_ifinfo_notify = false;
struct neighbour *neigh = NULL;
- struct inet6_dev *in6_dev;
+ struct ndisc_options ndopts;
struct fib6_info *rt = NULL;
+ struct inet6_dev *in6_dev;
u32 defrtr_usr_metric;
+ unsigned int pref = 0;
+ __u32 old_if_flags;
struct net *net;
+ SKB_DR(reason);
int lifetime;
- struct ndisc_options ndopts;
int optlen;
- unsigned int pref = 0;
- __u32 old_if_flags;
- bool send_ifinfo_notify = false;
__u8 *opt = (__u8 *)(ra_msg + 1);
@@ -1253,17 +1254,15 @@ static void ndisc_router_discovery(struct sk_buff *skb)
__func__, skb->dev->name);
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
ND_PRINTK(2, warn, "RA: source address is not link-local\n");
- return;
- }
- if (optlen < 0) {
- ND_PRINTK(2, warn, "RA: packet too short\n");
- return;
+ return reason;
}
+ if (optlen < 0)
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
ND_PRINTK(2, warn, "RA: from host or unauthorized router\n");
- return;
+ return reason;
}
#endif
@@ -1275,13 +1274,11 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (!in6_dev) {
ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n",
skb->dev->name);
- return;
+ return reason;
}
- if (!ndisc_parse_options(skb->dev, opt, optlen, &ndopts)) {
- ND_PRINTK(2, warn, "RA: invalid ND options\n");
- return;
- }
+ if (!ndisc_parse_options(skb->dev, opt, optlen, &ndopts))
+ return SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS;
if (!ipv6_accept_ra(in6_dev)) {
ND_PRINTK(2, info,
@@ -1362,7 +1359,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
"RA: %s got default router without neighbour\n",
__func__);
fib6_info_release(rt);
- return;
+ return reason;
}
}
/* Set default route metric as specified by user */
@@ -1387,7 +1384,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
ND_PRINTK(0, err,
"RA: %s failed to add default route\n",
__func__);
- return;
+ return reason;
}
neigh = ip6_neigh_lookup(&rt->fib6_nh->fib_nh_gw6,
@@ -1398,7 +1395,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
"RA: %s got default router without neighbour\n",
__func__);
fib6_info_release(rt);
- return;
+ return reason;
}
neigh->flags |= NTF_ROUTER;
} else if (rt && IPV6_EXTRACT_PREF(rt->fib6_flags) != pref) {
@@ -1485,6 +1482,7 @@ skip_linkparms:
NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
NEIGH_UPDATE_F_ISROUTER,
NDISC_ROUTER_ADVERTISEMENT, &ndopts);
+ reason = SKB_CONSUMED;
}
if (!ipv6_accept_ra(in6_dev)) {
@@ -1595,15 +1593,17 @@ out:
fib6_info_release(rt);
if (neigh)
neigh_release(neigh);
+ return reason;
}
-static void ndisc_redirect_rcv(struct sk_buff *skb)
+static enum skb_drop_reason ndisc_redirect_rcv(struct sk_buff *skb)
{
- u8 *hdr;
- struct ndisc_options ndopts;
struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb);
u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
offsetof(struct rd_msg, opt));
+ struct ndisc_options ndopts;
+ SKB_DR(reason);
+ u8 *hdr;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
switch (skb->ndisc_nodetype) {
@@ -1611,31 +1611,31 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
case NDISC_NODETYPE_NODEFAULT:
ND_PRINTK(2, warn,
"Redirect: from host or unauthorized router\n");
- return;
+ return reason;
}
#endif
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
ND_PRINTK(2, warn,
"Redirect: source address is not link-local\n");
- return;
+ return reason;
}
if (!ndisc_parse_options(skb->dev, msg->opt, ndoptlen, &ndopts))
- return;
+ return SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS;
if (!ndopts.nd_opts_rh) {
ip6_redirect_no_header(skb, dev_net(skb->dev),
skb->dev->ifindex);
- return;
+ return reason;
}
hdr = (u8 *)ndopts.nd_opts_rh;
hdr += 8;
if (!pskb_pull(skb, hdr - skb_transport_header(skb)))
- return;
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
- icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
+ return icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
}
static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
@@ -1781,8 +1781,9 @@ release:
static void pndisc_redo(struct sk_buff *skb)
{
- ndisc_recv_ns(skb);
- kfree_skb(skb);
+ enum skb_drop_reason reason = ndisc_recv_ns(skb);
+
+ kfree_skb_reason(skb, reason);
}
static int ndisc_is_multicast(const void *pkey)
@@ -1804,15 +1805,16 @@ static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
return false;
}
-int ndisc_rcv(struct sk_buff *skb)
+enum skb_drop_reason ndisc_rcv(struct sk_buff *skb)
{
struct nd_msg *msg;
+ SKB_DR(reason);
if (ndisc_suppress_frag_ndisc(skb))
- return 0;
+ return SKB_DROP_REASON_IPV6_NDISC_FRAG;
if (skb_linearize(skb))
- return 0;
+ return SKB_DROP_REASON_NOMEM;
msg = (struct nd_msg *)skb_transport_header(skb);
@@ -1821,39 +1823,39 @@ int ndisc_rcv(struct sk_buff *skb)
if (ipv6_hdr(skb)->hop_limit != 255) {
ND_PRINTK(2, warn, "NDISC: invalid hop-limit: %d\n",
ipv6_hdr(skb)->hop_limit);
- return 0;
+ return SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT;
}
if (msg->icmph.icmp6_code != 0) {
ND_PRINTK(2, warn, "NDISC: invalid ICMPv6 code: %d\n",
msg->icmph.icmp6_code);
- return 0;
+ return SKB_DROP_REASON_IPV6_NDISC_BAD_CODE;
}
switch (msg->icmph.icmp6_type) {
case NDISC_NEIGHBOUR_SOLICITATION:
memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
- ndisc_recv_ns(skb);
+ reason = ndisc_recv_ns(skb);
break;
case NDISC_NEIGHBOUR_ADVERTISEMENT:
- ndisc_recv_na(skb);
+ reason = ndisc_recv_na(skb);
break;
case NDISC_ROUTER_SOLICITATION:
- ndisc_recv_rs(skb);
+ reason = ndisc_recv_rs(skb);
break;
case NDISC_ROUTER_ADVERTISEMENT:
- ndisc_router_discovery(skb);
+ reason = ndisc_router_discovery(skb);
break;
case NDISC_REDIRECT:
- ndisc_redirect_rcv(skb);
+ reason = ndisc_redirect_rcv(skb);
break;
}
- return 0;
+ return reason;
}
static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index f61d4f18e1cf..58ccdb08c0fd 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -345,6 +345,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
nf_ct_attach(nskb, oldskb);
+ nf_ct_set_closing(skb_nfct(oldskb));
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* If we use ip6_local_out for bridged traffic, the MAC source on
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index d6306aa46bb1..e20b3705c2d2 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -94,6 +94,7 @@ static const struct snmp_mib snmp6_icmp6_list[] = {
SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS),
SNMP_MIB_ITEM("Icmp6InCsumErrors", ICMP6_MIB_CSUMERRORS),
+ SNMP_MIB_ITEM("Icmp6OutRateLimitHost", ICMP6_MIB_RATELIMITHOST),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ada087b50541..bac9ba747bde 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -152,7 +152,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
- hash = nexthdr & (RAW_HTABLE_SIZE - 1);
+ hash = raw_hashfunc(net, nexthdr);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
sk_nulls_for_each(sk, hnode, hlist) {
@@ -338,7 +338,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
struct sock *sk;
int hash;
- hash = nexthdr & (RAW_HTABLE_SIZE - 1);
+ hash = raw_hashfunc(net, nexthdr);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
sk_nulls_for_each(sk, hnode, hlist) {
@@ -355,17 +355,19 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
+ enum skb_drop_reason reason;
+
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_SKB_CSUM);
return NET_RX_DROP;
}
/* Charge it to the socket. */
skb_dst_drop(skb);
- if (sock_queue_rcv_skb(sk, skb) < 0) {
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
+ kfree_skb_reason(skb, reason);
return NET_RX_DROP;
}
@@ -386,7 +388,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY);
return NET_RX_DROP;
}
@@ -410,7 +412,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (inet->hdrincl) {
if (skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_SKB_CSUM);
return NET_RX_DROP;
}
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e74e0361fd92..c180c2ef17c5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -91,7 +91,7 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
struct net_device *dev, int how);
-static int ip6_dst_gc(struct dst_ops *ops);
+static void ip6_dst_gc(struct dst_ops *ops);
static int ip6_pkt_discard(struct sk_buff *skb);
static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -2593,9 +2593,10 @@ INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
}
-struct dst_entry *ip6_route_output_flags_noref(struct net *net,
- const struct sock *sk,
- struct flowi6 *fl6, int flags)
+static struct dst_entry *ip6_route_output_flags_noref(struct net *net,
+ const struct sock *sk,
+ struct flowi6 *fl6,
+ int flags)
{
bool any_src;
@@ -2624,7 +2625,6 @@ struct dst_entry *ip6_route_output_flags_noref(struct net *net,
return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
}
-EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
struct dst_entry *ip6_route_output_flags(struct net *net,
const struct sock *sk,
@@ -3284,23 +3284,17 @@ out:
return dst;
}
-static int ip6_dst_gc(struct dst_ops *ops)
+static void ip6_dst_gc(struct dst_ops *ops)
{
struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
- int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
unsigned int val;
int entries;
- entries = dst_entries_get_fast(ops);
- if (entries > rt_max_size)
- entries = dst_entries_get_slow(ops);
-
- if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
- entries <= rt_max_size)
+ if (time_after(rt_last_gc + rt_min_interval, jiffies))
goto out;
fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
@@ -3310,7 +3304,6 @@ static int ip6_dst_gc(struct dst_ops *ops)
out:
val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
- return entries > rt_max_size;
}
static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
@@ -6512,7 +6505,7 @@ static int __net_init ip6_route_net_init(struct net *net)
#endif
net->ipv6.sysctl.flush_delay = 0;
- net->ipv6.sysctl.ip6_rt_max_size = 4096;
+ net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index ff691d9f4a04..b1c028df686e 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -13,7 +13,7 @@
#include <net/rpl.h>
struct rpl_iptunnel_encap {
- struct ipv6_rpl_sr_hdr srh[0];
+ DECLARE_FLEX_ARRAY(struct ipv6_rpl_sr_hdr, srh);
};
struct rpl_lwt {
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 487f8e98deaa..dd433cc265c8 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -109,8 +109,15 @@ struct bpf_lwt_prog {
#define next_csid_chk_lcnode_fn_bits(flen) \
next_csid_chk_lcblock_bits(flen)
+#define SEG6_F_LOCAL_FLV_OP(flvname) BIT(SEG6_LOCAL_FLV_OP_##flvname)
+#define SEG6_F_LOCAL_FLV_PSP SEG6_F_LOCAL_FLV_OP(PSP)
+
+/* Supported RFC8986 Flavor operations are reported in this bitmask */
+#define SEG6_LOCAL_FLV8986_SUPP_OPS SEG6_F_LOCAL_FLV_PSP
+
/* Supported Flavor operations are reported in this bitmask */
-#define SEG6_LOCAL_FLV_SUPP_OPS (BIT(SEG6_LOCAL_FLV_OP_NEXT_CSID))
+#define SEG6_LOCAL_FLV_SUPP_OPS (SEG6_F_LOCAL_FLV_OP(NEXT_CSID) | \
+ SEG6_LOCAL_FLV8986_SUPP_OPS)
struct seg6_flavors_info {
/* Flavor operations */
@@ -364,6 +371,14 @@ static void seg6_next_csid_advance_arg(struct in6_addr *addr,
memset(&addr->s6_addr[16 - fnc_octects], 0x00, fnc_octects);
}
+static int input_action_end_finish(struct sk_buff *skb,
+ struct seg6_local_lwt *slwt)
+{
+ seg6_lookup_nexthop(skb, NULL, 0);
+
+ return dst_input(skb);
+}
+
static int input_action_end_core(struct sk_buff *skb,
struct seg6_local_lwt *slwt)
{
@@ -375,9 +390,7 @@ static int input_action_end_core(struct sk_buff *skb,
advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
- seg6_lookup_nexthop(skb, NULL, 0);
-
- return dst_input(skb);
+ return input_action_end_finish(skb, slwt);
drop:
kfree_skb(skb);
@@ -395,9 +408,7 @@ static int end_next_csid_core(struct sk_buff *skb, struct seg6_local_lwt *slwt)
/* update DA */
seg6_next_csid_advance_arg(daddr, finfo);
- seg6_lookup_nexthop(skb, NULL, 0);
-
- return dst_input(skb);
+ return input_action_end_finish(skb, slwt);
}
static bool seg6_next_csid_enabled(__u32 fops)
@@ -405,15 +416,331 @@ static bool seg6_next_csid_enabled(__u32 fops)
return fops & BIT(SEG6_LOCAL_FLV_OP_NEXT_CSID);
}
+/* We describe the packet state in relation to the absence/presence of the SRH
+ * and the Segment Left (SL) field.
+ * For our purposes, it is not necessary to record the exact value of the SL
+ * when the SID List consists of two or more segments.
+ */
+enum seg6_local_pktinfo {
+ /* the order really matters! */
+ SEG6_LOCAL_PKTINFO_NOHDR = 0,
+ SEG6_LOCAL_PKTINFO_SL_ZERO,
+ SEG6_LOCAL_PKTINFO_SL_ONE,
+ SEG6_LOCAL_PKTINFO_SL_MORE,
+ __SEG6_LOCAL_PKTINFO_MAX,
+};
+
+#define SEG6_LOCAL_PKTINFO_MAX (__SEG6_LOCAL_PKTINFO_MAX - 1)
+
+static enum seg6_local_pktinfo seg6_get_srh_pktinfo(struct ipv6_sr_hdr *srh)
+{
+ __u8 sgl;
+
+ if (!srh)
+ return SEG6_LOCAL_PKTINFO_NOHDR;
+
+ sgl = srh->segments_left;
+ if (sgl < 2)
+ return SEG6_LOCAL_PKTINFO_SL_ZERO + sgl;
+
+ return SEG6_LOCAL_PKTINFO_SL_MORE;
+}
+
+enum seg6_local_flv_action {
+ SEG6_LOCAL_FLV_ACT_UNSPEC = 0,
+ SEG6_LOCAL_FLV_ACT_END,
+ SEG6_LOCAL_FLV_ACT_PSP,
+ SEG6_LOCAL_FLV_ACT_USP,
+ SEG6_LOCAL_FLV_ACT_USD,
+ __SEG6_LOCAL_FLV_ACT_MAX
+};
+
+#define SEG6_LOCAL_FLV_ACT_MAX (__SEG6_LOCAL_FLV_ACT_MAX - 1)
+
+/* The action table for RFC8986 flavors (see the flv8986_act_tbl below)
+ * contains the actions (i.e. processing operations) to be applied on packets
+ * when flavors are configured for an End* behavior.
+ * By combining the pkinfo data and from the flavors mask, the macro
+ * computes the index used to access the elements (actions) stored in the
+ * action table. The index is structured as follows:
+ *
+ * index
+ * _______________/\________________
+ * / \
+ * +----------------+----------------+
+ * | pf | afm |
+ * +----------------+----------------+
+ * ph-1 ... p1 p0 fk-1 ... f1 f0
+ * MSB LSB
+ *
+ * where:
+ * - 'afm' (adjusted flavor mask) is the mask containing a combination of the
+ * RFC8986 flavors currently supported. 'afm' corresponds to the @fm
+ * argument of the macro whose value is righ-shifted by 1 bit. By doing so,
+ * we discard the SEG6_LOCAL_FLV_OP_UNSPEC flag (bit 0 in @fm) which is
+ * never used here;
+ * - 'pf' encodes the packet info (pktinfo) regarding the presence/absence of
+ * the SRH, SL = 0, etc. 'pf' is set with the value of @pf provided as
+ * argument to the macro.
+ */
+#define flv8986_act_tbl_idx(pf, fm) \
+ ((((pf) << bits_per(SEG6_LOCAL_FLV8986_SUPP_OPS)) | \
+ ((fm) & SEG6_LOCAL_FLV8986_SUPP_OPS)) >> SEG6_LOCAL_FLV_OP_PSP)
+
+/* We compute the size of the action table by considering the RFC8986 flavors
+ * actually supported by the kernel. In this way, the size is automatically
+ * adjusted when new flavors are supported.
+ */
+#define FLV8986_ACT_TBL_SIZE \
+ roundup_pow_of_two(flv8986_act_tbl_idx(SEG6_LOCAL_PKTINFO_MAX, \
+ SEG6_LOCAL_FLV8986_SUPP_OPS))
+
+/* tbl_cfg(act, pf, fm) macro is used to easily configure the action
+ * table; it accepts 3 arguments:
+ * i) @act, the suffix from SEG6_LOCAL_FLV_ACT_{act} representing
+ * the action that should be applied on the packet;
+ * ii) @pf, the suffix from SEG6_LOCAL_PKTINFO_{pf} reporting the packet
+ * info about the lack/presence of SRH, SRH with SL = 0, etc;
+ * iii) @fm, the mask of flavors.
+ */
+#define tbl_cfg(act, pf, fm) \
+ [flv8986_act_tbl_idx(SEG6_LOCAL_PKTINFO_##pf, \
+ (fm))] = SEG6_LOCAL_FLV_ACT_##act
+
+/* shorthand for improving readability */
+#define F_PSP SEG6_F_LOCAL_FLV_PSP
+
+/* The table contains, for each combination of the pktinfo data and
+ * flavors, the action that should be taken on a packet (e.g.
+ * "standard" Endpoint processing, Penultimate Segment Pop, etc).
+ *
+ * By default, table entries not explicitly configured are initialized with the
+ * SEG6_LOCAL_FLV_ACT_UNSPEC action, which generally has the effect of
+ * discarding the processed packet.
+ */
+static const u8 flv8986_act_tbl[FLV8986_ACT_TBL_SIZE] = {
+ /* PSP variant for packet where SRH with SL = 1 */
+ tbl_cfg(PSP, SL_ONE, F_PSP),
+ /* End for packet where the SRH with SL > 1*/
+ tbl_cfg(END, SL_MORE, F_PSP),
+};
+
+#undef F_PSP
+#undef tbl_cfg
+
+/* For each flavor defined in RFC8986 (or a combination of them) an action is
+ * performed on the packet. The specific action depends on:
+ * - info extracted from the packet (i.e. pktinfo data) regarding the
+ * lack/presence of the SRH, and if the SRH is available, on the value of
+ * Segment Left field;
+ * - the mask of flavors configured for the specific SRv6 End* behavior.
+ *
+ * The function combines both the pkinfo and the flavors mask to evaluate the
+ * corresponding action to be taken on the packet.
+ */
+static enum seg6_local_flv_action
+seg6_local_flv8986_act_lookup(enum seg6_local_pktinfo pinfo, __u32 flvmask)
+{
+ unsigned long index;
+
+ /* check if the provided mask of flavors is supported */
+ if (unlikely(flvmask & ~SEG6_LOCAL_FLV8986_SUPP_OPS))
+ return SEG6_LOCAL_FLV_ACT_UNSPEC;
+
+ index = flv8986_act_tbl_idx(pinfo, flvmask);
+ if (unlikely(index >= FLV8986_ACT_TBL_SIZE))
+ return SEG6_LOCAL_FLV_ACT_UNSPEC;
+
+ return flv8986_act_tbl[index];
+}
+
+/* skb->data must be aligned with skb->network_header */
+static bool seg6_pop_srh(struct sk_buff *skb, int srhoff)
+{
+ struct ipv6_sr_hdr *srh;
+ struct ipv6hdr *iph;
+ __u8 srh_nexthdr;
+ int thoff = -1;
+ int srhlen;
+ int nhlen;
+
+ if (unlikely(srhoff < sizeof(*iph) ||
+ !pskb_may_pull(skb, srhoff + sizeof(*srh))))
+ return false;
+
+ srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+ srhlen = ipv6_optlen(srh);
+
+ /* we are about to mangle the pkt, let's check if we can write on it */
+ if (unlikely(skb_ensure_writable(skb, srhoff + srhlen)))
+ return false;
+
+ /* skb_ensure_writable() may change skb pointers; evaluate srh again */
+ srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+ srh_nexthdr = srh->nexthdr;
+
+ if (unlikely(!skb_transport_header_was_set(skb)))
+ goto pull;
+
+ nhlen = skb_network_header_len(skb);
+ /* we have to deal with the transport header: it could be set before
+ * the SRH, after the SRH, or within it (which is considered wrong,
+ * however).
+ */
+ if (likely(nhlen <= srhoff))
+ thoff = nhlen;
+ else if (nhlen >= srhoff + srhlen)
+ /* transport_header is set after the SRH */
+ thoff = nhlen - srhlen;
+ else
+ /* transport_header falls inside the SRH; hence, we can't
+ * restore the transport_header pointer properly after
+ * SRH removing operation.
+ */
+ return false;
+pull:
+ /* we need to pop the SRH:
+ * 1) first of all, we pull out everything from IPv6 header up to SRH
+ * (included) evaluating also the rcsum;
+ * 2) we overwrite (and then remove) the SRH by properly moving the
+ * IPv6 along with any extension header that precedes the SRH;
+ * 3) At the end, we push back the pulled headers (except for SRH,
+ * obviously).
+ */
+ skb_pull_rcsum(skb, srhoff + srhlen);
+ memmove(skb_network_header(skb) + srhlen, skb_network_header(skb),
+ srhoff);
+ skb_push(skb, srhoff);
+
+ skb_reset_network_header(skb);
+ skb_mac_header_rebuild(skb);
+ if (likely(thoff >= 0))
+ skb_set_transport_header(skb, thoff);
+
+ iph = ipv6_hdr(skb);
+ if (iph->nexthdr == NEXTHDR_ROUTING) {
+ iph->nexthdr = srh_nexthdr;
+ } else {
+ /* we must look for the extension header (EXTH, for short) that
+ * immediately precedes the SRH we have just removed.
+ * Then, we update the value of the EXTH nexthdr with the one
+ * contained in the SRH nexthdr.
+ */
+ unsigned int off = sizeof(*iph);
+ struct ipv6_opt_hdr *hp, _hdr;
+ __u8 nexthdr = iph->nexthdr;
+
+ for (;;) {
+ if (unlikely(!ipv6_ext_hdr(nexthdr) ||
+ nexthdr == NEXTHDR_NONE))
+ return false;
+
+ hp = skb_header_pointer(skb, off, sizeof(_hdr), &_hdr);
+ if (unlikely(!hp))
+ return false;
+
+ if (hp->nexthdr == NEXTHDR_ROUTING) {
+ hp->nexthdr = srh_nexthdr;
+ break;
+ }
+
+ switch (nexthdr) {
+ case NEXTHDR_FRAGMENT:
+ fallthrough;
+ case NEXTHDR_AUTH:
+ /* we expect SRH before FRAG and AUTH */
+ return false;
+ default:
+ off += ipv6_optlen(hp);
+ break;
+ }
+
+ nexthdr = hp->nexthdr;
+ }
+ }
+
+ iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+
+ skb_postpush_rcsum(skb, iph, srhoff);
+
+ return true;
+}
+
+/* process the packet on the basis of the RFC8986 flavors set for the given
+ * SRv6 End behavior instance.
+ */
+static int end_flv8986_core(struct sk_buff *skb, struct seg6_local_lwt *slwt)
+{
+ const struct seg6_flavors_info *finfo = &slwt->flv_info;
+ enum seg6_local_flv_action action;
+ enum seg6_local_pktinfo pinfo;
+ struct ipv6_sr_hdr *srh;
+ __u32 flvmask;
+ int srhoff;
+
+ srh = seg6_get_srh(skb, 0);
+ srhoff = srh ? ((unsigned char *)srh - skb->data) : 0;
+ pinfo = seg6_get_srh_pktinfo(srh);
+#ifdef CONFIG_IPV6_SEG6_HMAC
+ if (srh && !seg6_hmac_validate_skb(skb))
+ goto drop;
+#endif
+ flvmask = finfo->flv_ops;
+ if (unlikely(flvmask & ~SEG6_LOCAL_FLV8986_SUPP_OPS)) {
+ pr_warn_once("seg6local: invalid RFC8986 flavors\n");
+ goto drop;
+ }
+
+ /* retrieve the action triggered by the combination of pktinfo data and
+ * the flavors mask.
+ */
+ action = seg6_local_flv8986_act_lookup(pinfo, flvmask);
+ switch (action) {
+ case SEG6_LOCAL_FLV_ACT_END:
+ /* process the packet as the "standard" End behavior */
+ advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
+ break;
+ case SEG6_LOCAL_FLV_ACT_PSP:
+ advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
+
+ if (unlikely(!seg6_pop_srh(skb, srhoff)))
+ goto drop;
+ break;
+ case SEG6_LOCAL_FLV_ACT_UNSPEC:
+ fallthrough;
+ default:
+ /* by default, we drop the packet since we could not find a
+ * suitable action.
+ */
+ goto drop;
+ }
+
+ return input_action_end_finish(skb, slwt);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
/* regular endpoint function */
static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt)
{
const struct seg6_flavors_info *finfo = &slwt->flv_info;
+ __u32 fops = finfo->flv_ops;
- if (seg6_next_csid_enabled(finfo->flv_ops))
+ if (!fops)
+ return input_action_end_core(skb, slwt);
+
+ /* check for the presence of NEXT-C-SID since it applies first */
+ if (seg6_next_csid_enabled(fops))
return end_next_csid_core(skb, slwt);
- return input_action_end_core(skb, slwt);
+ /* the specific processing function to be performed on the packet
+ * depends on the combination of flavors defined in RFC8986 and some
+ * information extracted from the packet, e.g. presence/absence of SRH,
+ * Segment Left = 0, etc.
+ */
+ return end_flv8986_core(skb, slwt);
}
/* regular endpoint, and forward to specified nexthop */
@@ -2300,6 +2627,13 @@ int __init seg6_local_init(void)
BUILD_BUG_ON(next_csid_chk_lcblock_bits(SEG6_LOCAL_LCBLOCK_DBITS));
BUILD_BUG_ON(next_csid_chk_lcnode_fn_bits(SEG6_LOCAL_LCNODE_FN_DBITS));
+ /* To be memory efficient, we use 'u8' to represent the different
+ * actions related to RFC8986 flavors. If the kernel build stops here,
+ * it means that it is not possible to correctly encode these actions
+ * with the data type chosen for the action table.
+ */
+ BUILD_BUG_ON(SEG6_LOCAL_FLV_ACT_MAX > (typeof(flv8986_act_tbl[0]))~0U);
+
return lwtunnel_encap_add_ops(&seg6_local_ops,
LWTUNNEL_ENCAP_SEG6_LOCAL);
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a52a4f12f146..1bf93b61aa06 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1705,8 +1705,9 @@ process:
if (static_branch_unlikely(&ip6_min_hopcount)) {
/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
- if (hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
+ if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) {
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
+ drop_reason = SKB_DROP_REASON_TCP_MINTTL;
goto discard_and_relse;
}
}
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 890a2423f559..cfe828bd7fc6 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -28,6 +28,7 @@
#include <net/netns/generic.h>
#include <net/sock.h>
#include <uapi/linux/kcm.h>
+#include <trace/events/sock.h>
unsigned int kcm_net_id;
@@ -349,6 +350,8 @@ static void psock_data_ready(struct sock *sk)
{
struct kcm_psock *psock;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
psock = (struct kcm_psock *)sk->sk_user_data;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index db2e584c625e..f011af6601c9 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -650,54 +650,22 @@ static int pppol2tp_tunnel_mtu(const struct l2tp_tunnel *tunnel)
return mtu - PPPOL2TP_HEADER_OVERHEAD;
}
-/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
- */
-static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
- int sockaddr_len, int flags)
+static struct l2tp_tunnel *pppol2tp_tunnel_get(struct net *net,
+ const struct l2tp_connect_info *info,
+ bool *new_tunnel)
{
- struct sock *sk = sock->sk;
- struct pppox_sock *po = pppox_sk(sk);
- struct l2tp_session *session = NULL;
- struct l2tp_connect_info info;
struct l2tp_tunnel *tunnel;
- struct pppol2tp_session *ps;
- struct l2tp_session_cfg cfg = { 0, };
- bool drop_refcnt = false;
- bool drop_tunnel = false;
- bool new_session = false;
- bool new_tunnel = false;
int error;
- error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info);
- if (error < 0)
- return error;
+ *new_tunnel = false;
- lock_sock(sk);
-
- /* Check for already bound sockets */
- error = -EBUSY;
- if (sk->sk_state & PPPOX_CONNECTED)
- goto end;
-
- /* We don't supporting rebinding anyway */
- error = -EALREADY;
- if (sk->sk_user_data)
- goto end; /* socket is already attached */
-
- /* Don't bind if tunnel_id is 0 */
- error = -EINVAL;
- if (!info.tunnel_id)
- goto end;
-
- tunnel = l2tp_tunnel_get(sock_net(sk), info.tunnel_id);
- if (tunnel)
- drop_tunnel = true;
+ tunnel = l2tp_tunnel_get(net, info->tunnel_id);
/* Special case: create tunnel context if session_id and
* peer_session_id is 0. Otherwise look up tunnel using supplied
* tunnel id.
*/
- if (!info.session_id && !info.peer_session_id) {
+ if (!info->session_id && !info->peer_session_id) {
if (!tunnel) {
struct l2tp_tunnel_cfg tcfg = {
.encap = L2TP_ENCAPTYPE_UDP,
@@ -706,40 +674,82 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
/* Prevent l2tp_tunnel_register() from trying to set up
* a kernel socket.
*/
- if (info.fd < 0) {
- error = -EBADF;
- goto end;
- }
+ if (info->fd < 0)
+ return ERR_PTR(-EBADF);
- error = l2tp_tunnel_create(info.fd,
- info.version,
- info.tunnel_id,
- info.peer_tunnel_id, &tcfg,
+ error = l2tp_tunnel_create(info->fd,
+ info->version,
+ info->tunnel_id,
+ info->peer_tunnel_id, &tcfg,
&tunnel);
if (error < 0)
- goto end;
+ return ERR_PTR(error);
l2tp_tunnel_inc_refcount(tunnel);
- error = l2tp_tunnel_register(tunnel, sock_net(sk),
- &tcfg);
+ error = l2tp_tunnel_register(tunnel, net, &tcfg);
if (error < 0) {
kfree(tunnel);
- goto end;
+ return ERR_PTR(error);
}
- drop_tunnel = true;
- new_tunnel = true;
+
+ *new_tunnel = true;
}
} else {
/* Error if we can't find the tunnel */
- error = -ENOENT;
if (!tunnel)
- goto end;
+ return ERR_PTR(-ENOENT);
/* Error if socket is not prepped */
- if (!tunnel->sock)
- goto end;
+ if (!tunnel->sock) {
+ l2tp_tunnel_dec_refcount(tunnel);
+ return ERR_PTR(-ENOENT);
+ }
}
+ return tunnel;
+}
+
+/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
+ */
+static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct l2tp_session *session = NULL;
+ struct l2tp_connect_info info;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ struct l2tp_session_cfg cfg = { 0, };
+ bool drop_refcnt = false;
+ bool new_session = false;
+ bool new_tunnel = false;
+ int error;
+
+ error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info);
+ if (error < 0)
+ return error;
+
+ /* Don't bind if tunnel_id is 0 */
+ if (!info.tunnel_id)
+ return -EINVAL;
+
+ tunnel = pppol2tp_tunnel_get(sock_net(sk), &info, &new_tunnel);
+ if (IS_ERR(tunnel))
+ return PTR_ERR(tunnel);
+
+ lock_sock(sk);
+
+ /* Check for already bound sockets */
+ error = -EBUSY;
+ if (sk->sk_state & PPPOX_CONNECTED)
+ goto end;
+
+ /* We don't supporting rebinding anyway */
+ error = -EALREADY;
+ if (sk->sk_user_data)
+ goto end; /* socket is already attached */
+
if (tunnel->peer_tunnel_id == 0)
tunnel->peer_tunnel_id = info.peer_tunnel_id;
@@ -840,8 +850,7 @@ end:
}
if (drop_refcnt)
l2tp_session_dec_refcount(session);
- if (drop_tunnel)
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_dec_refcount(tunnel);
release_sock(sk);
return error;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 672eff6f5d32..8eb342300868 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1220,7 +1220,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_local *local = sdata->local;
struct beacon_data *old;
struct ieee80211_sub_if_data *vlan;
- u32 changed = BSS_CHANGED_BEACON_INT |
+ u64 changed = BSS_CHANGED_BEACON_INT |
BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_BEACON |
BSS_CHANGED_P2P_PS |
@@ -1252,6 +1252,21 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
prev_beacon_int = link_conf->beacon_int;
link_conf->beacon_int = params->beacon_interval;
+ if (params->vht_cap) {
+ link_conf->vht_su_beamformer =
+ params->vht_cap->vht_cap_info &
+ cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ link_conf->vht_su_beamformee =
+ params->vht_cap->vht_cap_info &
+ cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ link_conf->vht_mu_beamformer =
+ params->vht_cap->vht_cap_info &
+ cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+ link_conf->vht_mu_beamformee =
+ params->vht_cap->vht_cap_info &
+ cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ }
+
if (params->he_cap && params->he_oper) {
link_conf->he_support = true;
link_conf->htc_trig_based_pkt_ext =
@@ -1266,6 +1281,26 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
changed |= BSS_CHANGED_HE_BSS_COLOR;
}
+ if (params->he_cap) {
+ link_conf->he_su_beamformer =
+ params->he_cap->phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+ link_conf->he_su_beamformee =
+ params->he_cap->phy_cap_info[4] &
+ IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
+ link_conf->he_mu_beamformer =
+ params->he_cap->phy_cap_info[4] &
+ IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ link_conf->he_full_ul_mumimo =
+ params->he_cap->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
+ }
+
+ if (params->eht_cap) {
+ link_conf->eht_puncturing = params->punct_bitmap;
+ changed |= BSS_CHANGED_EHT_PUNCTURING;
+ }
+
if (sdata->vif.type == NL80211_IFTYPE_AP &&
params->mbssid_config.tx_wdev) {
err = ieee80211_set_ap_mbssid_options(sdata,
@@ -2734,7 +2769,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
* If the scan has been forced (and the driver supports
* forcing), don't care about being beaconing already.
* This will create problems to the attached stations (e.g. all
- * the frames sent while scanning on other channel will be
+ * the frames sent while scanning on other channel will be
* lost)
*/
if (sdata->deflink.u.ap.beacon &&
@@ -3516,6 +3551,12 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&local->mtx);
lockdep_assert_held(&local->chanctx_mtx);
+ if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) {
+ sdata->vif.bss_conf.eht_puncturing =
+ sdata->vif.bss_conf.csa_punct_bitmap;
+ changed |= BSS_CHANGED_EHT_PUNCTURING;
+ }
+
/*
* using reservation isn't immediate as it may be deferred until later
* with multi-vif. once reservation is complete it will re-schedule the
@@ -3557,7 +3598,8 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
if (err)
return err;
- cfg80211_ch_switch_notify(sdata->dev, &sdata->deflink.csa_chandef, 0);
+ cfg80211_ch_switch_notify(sdata->dev, &sdata->deflink.csa_chandef, 0,
+ sdata->vif.bss_conf.eht_puncturing);
return 0;
}
@@ -3819,9 +3861,13 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
goto out;
}
+ if (params->punct_bitmap && !sdata->vif.bss_conf.eht_support)
+ goto out;
+
sdata->deflink.csa_chandef = params->chandef;
sdata->deflink.csa_block_tx = params->block_tx;
sdata->vif.bss_conf.csa_active = true;
+ sdata->vif.bss_conf.csa_punct_bitmap = params->punct_bitmap;
if (sdata->deflink.csa_block_tx)
ieee80211_stop_vif_queues(local, sdata,
@@ -3829,7 +3875,8 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
cfg80211_ch_switch_started_notify(sdata->dev,
&sdata->deflink.csa_chandef, 0,
- params->count, params->block_tx);
+ params->count, params->block_tx,
+ sdata->vif.bss_conf.csa_punct_bitmap);
if (changed) {
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
@@ -4141,7 +4188,7 @@ static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_link_data *link;
int ret;
- u32 changed = 0;
+ u64 changed = 0;
link = sdata_dereference(sdata->link[link_id], sdata);
@@ -4622,6 +4669,19 @@ unlock:
sdata_unlock(sdata);
}
+void ieee80211_color_collision_detection_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct ieee80211_link_data *link =
+ container_of(delayed_work, struct ieee80211_link_data,
+ color_collision_detect_work);
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+
+ sdata_lock(sdata);
+ cfg80211_obss_color_collision_notify(sdata->dev, link->color_bitmap);
+ sdata_unlock(sdata);
+}
+
void ieee80211_color_change_finish(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -4632,17 +4692,27 @@ void ieee80211_color_change_finish(struct ieee80211_vif *vif)
EXPORT_SYMBOL_GPL(ieee80211_color_change_finish);
void
-ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
+ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
u64 color_bitmap, gfp_t gfp)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link = &sdata->deflink;
if (sdata->vif.bss_conf.color_change_active || sdata->vif.bss_conf.csa_active)
return;
- cfg80211_obss_color_collision_notify(sdata->dev, color_bitmap, gfp);
+ if (delayed_work_pending(&link->color_collision_detect_work))
+ return;
+
+ link->color_bitmap = color_bitmap;
+ /* queue the color collision detection event every 500 ms in order to
+ * avoid sending too much netlink messages to userspace.
+ */
+ ieee80211_queue_delayed_work(&sdata->local->hw,
+ &link->color_collision_detect_work,
+ msecs_to_jiffies(500));
}
-EXPORT_SYMBOL_GPL(ieeee80211_obss_color_collision_notify);
+EXPORT_SYMBOL_GPL(ieee80211_obss_color_collision_notify);
static int
ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e72cf0749d49..dbc34fbe7c8f 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1916,7 +1916,7 @@ int ieee80211_link_use_reserved_context(struct ieee80211_link_data *link)
int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
const struct cfg80211_chan_def *chandef,
- u32 *changed)
+ u64 *changed)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_bss_conf *link_conf = link->conf;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index c87e1137e5da..0bac9af3ca96 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -603,8 +603,6 @@ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
-IEEE80211_IF_FILE(dropped_frames_congestion,
- u.mesh.mshstats.dropped_frames_congestion, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
u.mesh.mshstats.dropped_frames_no_route, DEC);
@@ -740,7 +738,6 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
MESHSTATS_ADD(fwded_frames);
MESHSTATS_ADD(dropped_frames_ttl);
MESHSTATS_ADD(dropped_frames_no_route);
- MESHSTATS_ADD(dropped_frames_congestion);
#undef MESHSTATS_ADD
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d16606e84e22..ecc232eb1ee8 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -327,7 +327,6 @@ struct mesh_stats {
__u32 fwded_frames; /* Mesh total forwarded frames */
__u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
__u32 dropped_frames_no_route; /* Not transmitted, no route found */
- __u32 dropped_frames_congestion;/* Not forwarded due to congestion */
};
#define PREQ_Q_F_START 0x1
@@ -974,6 +973,8 @@ struct ieee80211_link_data {
struct cfg80211_chan_def csa_chandef;
struct work_struct color_change_finalize_work;
+ struct delayed_work color_collision_detect_work;
+ u64 color_bitmap;
/* context reservation -- protected with chanctx_mtx */
struct ieee80211_chanctx *reserved_chanctx;
@@ -1929,6 +1930,7 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
/* color change handling */
void ieee80211_color_change_finalize_work(struct work_struct *work);
+void ieee80211_color_collision_detection_work(struct work_struct *work);
/* interface handling */
#define MAC80211_SUPPORTED_FEATURES_TX (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
@@ -2478,7 +2480,7 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link);
int __must_check
ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
const struct cfg80211_chan_def *chandef,
- u32 *changed);
+ u64 *changed);
void ieee80211_link_release_channel(struct ieee80211_link_data *link);
void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link);
void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index d1f5a9f7c647..8c8869cc1fb4 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -39,6 +39,8 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
ieee80211_csa_finalize_work);
INIT_WORK(&link->color_change_finalize_work,
ieee80211_color_change_finalize_work);
+ INIT_DELAYED_WORK(&link->color_collision_detect_work,
+ ieee80211_color_collision_detection_work);
INIT_LIST_HEAD(&link->assigned_chanctx_list);
INIT_LIST_HEAD(&link->reserved_chanctx_list);
INIT_DELAYED_WORK(&link->dfs_cac_timer_work,
@@ -66,6 +68,7 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
if (link->sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_mgd_stop_link(link);
+ cancel_delayed_work_sync(&link->color_collision_detect_work);
ieee80211_link_release_channel(link);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0aee2392dd29..60792dfabc9d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -8,7 +8,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
*/
#include <linux/delay.h>
@@ -89,6 +89,80 @@ MODULE_PARM_DESC(probe_wait_ms,
#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
/*
+ * Extract from the given disabled subchannel bitmap (raw format
+ * from the EHT Operation Element) the bits for the subchannel
+ * we're using right now.
+ */
+static u16
+ieee80211_extract_dis_subch_bmap(const struct ieee80211_eht_operation *eht_oper,
+ struct cfg80211_chan_def *chandef, u16 bitmap)
+{
+ struct ieee80211_eht_operation_info *info = (void *)eht_oper->optional;
+ struct cfg80211_chan_def ap_chandef = *chandef;
+ u32 ap_center_freq, local_center_freq;
+ u32 ap_bw, local_bw;
+ int ap_start_freq, local_start_freq;
+ u16 shift, mask;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) ||
+ !(eht_oper->params &
+ IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
+ return 0;
+
+ /* set 160/320 supported to get the full AP definition */
+ ieee80211_chandef_eht_oper(eht_oper, true, true, &ap_chandef);
+ ap_center_freq = ap_chandef.center_freq1;
+ ap_bw = 20 * BIT(u8_get_bits(info->control,
+ IEEE80211_EHT_OPER_CHAN_WIDTH));
+ ap_start_freq = ap_center_freq - ap_bw / 2;
+ local_center_freq = chandef->center_freq1;
+ local_bw = 20 * BIT(ieee80211_chan_width_to_rx_bw(chandef->width));
+ local_start_freq = local_center_freq - local_bw / 2;
+ shift = (local_start_freq - ap_start_freq) / 20;
+ mask = BIT(local_bw / 20) - 1;
+
+ return (bitmap >> shift) & mask;
+}
+
+/*
+ * Handle the puncturing bitmap, possibly downgrading bandwidth to get a
+ * valid bitmap.
+ */
+static void
+ieee80211_handle_puncturing_bitmap(struct ieee80211_link_data *link,
+ const struct ieee80211_eht_operation *eht_oper,
+ u16 bitmap, u64 *changed)
+{
+ struct cfg80211_chan_def *chandef = &link->conf->chandef;
+ u16 extracted;
+ u64 _changed = 0;
+
+ if (!changed)
+ changed = &_changed;
+
+ while (chandef->width > NL80211_CHAN_WIDTH_40) {
+ extracted =
+ ieee80211_extract_dis_subch_bmap(eht_oper, chandef,
+ bitmap);
+
+ if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
+ chandef))
+ break;
+ link->u.mgd.conn_flags |=
+ ieee80211_chandef_downgrade(chandef);
+ *changed |= BSS_CHANGED_BANDWIDTH;
+ }
+
+ if (chandef->width <= NL80211_CHAN_WIDTH_40)
+ extracted = 0;
+
+ if (link->conf->eht_puncturing != extracted) {
+ link->conf->eht_puncturing = extracted;
+ *changed |= BSS_CHANGED_EHT_PUNCTURING;
+ }
+}
+
+/*
* We can have multiple work items (and connection probing)
* scheduling this timer, but we need to take care to only
* reschedule it when it should fire _earlier_ than it was
@@ -413,7 +487,7 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
const struct ieee80211_s1g_oper_ie *s1g_oper,
- const u8 *bssid, u32 *changed)
+ const u8 *bssid, u64 *changed)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
@@ -1704,7 +1778,7 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
return;
}
- cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef, 0);
+ cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef, 0, 0);
}
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
@@ -1914,7 +1988,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
mutex_unlock(&local->mtx);
cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, 0,
- csa_ie.count, csa_ie.mode);
+ csa_ie.count, csa_ie.mode, 0);
if (local->ops->channel_switch) {
/* use driver's channel switch callback */
@@ -4145,6 +4219,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
link_sta);
bss_conf->eht_support = link_sta->pub->eht_cap.has_eht;
+ *changed |= BSS_CHANGED_EHT_PUNCTURING;
} else {
bss_conf->eht_support = false;
}
@@ -5477,6 +5552,45 @@ static bool ieee80211_rx_our_beacon(const u8 *tx_bssid,
return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid);
}
+static bool ieee80211_config_puncturing(struct ieee80211_link_data *link,
+ const struct ieee80211_eht_operation *eht_oper,
+ u64 *changed)
+{
+ u16 bitmap = 0, extracted;
+
+ if ((eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
+ (eht_oper->params &
+ IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
+ const struct ieee80211_eht_operation_info *info =
+ (void *)eht_oper->optional;
+ const u8 *disable_subchannel_bitmap = info->optional;
+
+ bitmap = get_unaligned_le16(disable_subchannel_bitmap);
+ }
+
+ extracted = ieee80211_extract_dis_subch_bmap(eht_oper,
+ &link->conf->chandef,
+ bitmap);
+
+ /* accept if there are no changes */
+ if (!(*changed & BSS_CHANGED_BANDWIDTH) &&
+ extracted == link->conf->eht_puncturing)
+ return true;
+
+ if (!cfg80211_valid_disable_subchannel_bitmap(&bitmap,
+ &link->conf->chandef)) {
+ link_info(link,
+ "Got an invalid disable subchannel bitmap from AP %pM: bitmap = 0x%x, bw = 0x%x. disconnect\n",
+ link->u.mgd.bssid,
+ bitmap,
+ link->conf->chandef.width);
+ return false;
+ }
+
+ ieee80211_handle_puncturing_bitmap(link, eht_oper, bitmap, changed);
+ return true;
+}
+
static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
struct ieee80211_hdr *hdr, size_t len,
struct ieee80211_rx_status *rx_status)
@@ -5494,7 +5608,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
struct ieee80211_channel *chan;
struct link_sta_info *link_sta;
struct sta_info *sta;
- u32 changed = 0;
+ u64 changed = 0;
bool erp_valid;
u8 erp_value = 0;
u32 ncrc = 0;
@@ -5791,6 +5905,21 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
elems->pwr_constr_elem,
elems->cisco_dtpc_elem);
+ if (elems->eht_operation &&
+ !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
+ if (!ieee80211_config_puncturing(link, elems->eht_operation,
+ &changed)) {
+ ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
+ WLAN_REASON_DEAUTH_LEAVING,
+ true, deauth_buf);
+ ieee80211_report_disconnect(sdata, deauth_buf,
+ sizeof(deauth_buf), true,
+ WLAN_REASON_DEAUTH_LEAVING,
+ false);
+ goto free;
+ }
+ }
+
ieee80211_link_info_change_notify(sdata, link, changed);
free:
kfree(elems);
@@ -6892,9 +7021,12 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
}
+ link->conf->eht_puncturing = 0;
+
rcu_read_lock();
beacon_ies = rcu_dereference(cbss->beacon_ies);
if (beacon_ies) {
+ const struct ieee80211_eht_operation *eht_oper;
const struct element *elem;
u8 dtim_count = 0;
@@ -6923,6 +7055,31 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
link->conf->ema_ap = true;
else
link->conf->ema_ap = false;
+
+ elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION,
+ beacon_ies->data, beacon_ies->len);
+ eht_oper = (const void *)(elem->data + 1);
+
+ if (elem &&
+ ieee80211_eht_oper_size_ok((const void *)(elem->data + 1),
+ elem->datalen - 1) &&
+ (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
+ (eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
+ const struct ieee80211_eht_operation_info *info =
+ (void *)eht_oper->optional;
+ const u8 *disable_subchannel_bitmap = info->optional;
+ u16 bitmap;
+
+ bitmap = get_unaligned_le16(disable_subchannel_bitmap);
+ if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
+ &link->conf->chandef))
+ ieee80211_handle_puncturing_bitmap(link,
+ eht_oper,
+ bitmap,
+ NULL);
+ else
+ conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ }
}
rcu_read_unlock();
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c6562a6d2503..f7fdfe710951 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2720,6 +2720,174 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
}
}
+static ieee80211_rx_result
+ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_MAC80211_MESH
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
+ uint16_t fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA;
+ struct ieee80211_hdr hdr = {
+ .frame_control = cpu_to_le16(fc)
+ };
+ struct ieee80211_hdr *fwd_hdr;
+ struct ieee80211s_hdr *mesh_hdr;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *fwd_skb;
+ struct ethhdr *eth;
+ bool multicast;
+ int tailroom = 0;
+ int hdrlen, mesh_hdrlen;
+ u8 *qos;
+
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ return RX_CONTINUE;
+
+ if (!pskb_may_pull(skb, sizeof(*eth) + 6))
+ return RX_DROP_MONITOR;
+
+ mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(*eth));
+ mesh_hdrlen = ieee80211_get_mesh_hdrlen(mesh_hdr);
+
+ if (!pskb_may_pull(skb, sizeof(*eth) + mesh_hdrlen))
+ return RX_DROP_MONITOR;
+
+ eth = (struct ethhdr *)skb->data;
+ multicast = is_multicast_ether_addr(eth->h_dest);
+
+ mesh_hdr = (struct ieee80211s_hdr *)(eth + 1);
+ if (!mesh_hdr->ttl)
+ return RX_DROP_MONITOR;
+
+ /* frame is in RMC, don't forward */
+ if (is_multicast_ether_addr(eth->h_dest) &&
+ mesh_rmc_check(sdata, eth->h_source, mesh_hdr))
+ return RX_DROP_MONITOR;
+
+ /* Frame has reached destination. Don't forward */
+ if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
+ goto rx_accept;
+
+ if (!ifmsh->mshcfg.dot11MeshForwarding) {
+ if (is_multicast_ether_addr(eth->h_dest))
+ goto rx_accept;
+
+ return RX_DROP_MONITOR;
+ }
+
+ /* forward packet */
+ if (sdata->crypto_tx_tailroom_needed_cnt)
+ tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
+ if (!--mesh_hdr->ttl) {
+ if (multicast)
+ goto rx_accept;
+
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+ return RX_DROP_MONITOR;
+ }
+
+ if (mesh_hdr->flags & MESH_FLAGS_AE) {
+ struct mesh_path *mppath;
+ char *proxied_addr;
+
+ if (multicast)
+ proxied_addr = mesh_hdr->eaddr1;
+ else if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
+ /* has_a4 already checked in ieee80211_rx_mesh_check */
+ proxied_addr = mesh_hdr->eaddr2;
+ else
+ return RX_DROP_MONITOR;
+
+ rcu_read_lock();
+ mppath = mpp_path_lookup(sdata, proxied_addr);
+ if (!mppath) {
+ mpp_path_add(sdata, proxied_addr, eth->h_source);
+ } else {
+ spin_lock_bh(&mppath->state_lock);
+ if (!ether_addr_equal(mppath->mpp, eth->h_source))
+ memcpy(mppath->mpp, eth->h_source, ETH_ALEN);
+ mppath->exp_time = jiffies;
+ spin_unlock_bh(&mppath->state_lock);
+ }
+ rcu_read_unlock();
+ }
+
+ skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]);
+
+ ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control,
+ eth->h_dest, eth->h_source);
+ hdrlen = ieee80211_hdrlen(hdr.frame_control);
+ if (multicast) {
+ int extra_head = sizeof(struct ieee80211_hdr) - sizeof(*eth);
+
+ fwd_skb = skb_copy_expand(skb, local->tx_headroom + extra_head +
+ IEEE80211_ENCRYPT_HEADROOM,
+ tailroom, GFP_ATOMIC);
+ if (!fwd_skb)
+ goto rx_accept;
+ } else {
+ fwd_skb = skb;
+ skb = NULL;
+
+ if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr)))
+ return RX_DROP_UNUSABLE;
+ }
+
+ fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr));
+ memcpy(fwd_hdr, &hdr, hdrlen - 2);
+ qos = ieee80211_get_qos_ctl(fwd_hdr);
+ qos[0] = qos[1] = 0;
+
+ skb_reset_mac_header(fwd_skb);
+ hdrlen += mesh_hdrlen;
+ if (ieee80211_get_8023_tunnel_proto(fwd_skb->data + hdrlen,
+ &fwd_skb->protocol))
+ hdrlen += ETH_ALEN;
+ else
+ fwd_skb->protocol = htons(fwd_skb->len - hdrlen);
+ skb_set_network_header(fwd_skb, hdrlen);
+
+ info = IEEE80211_SKB_CB(fwd_skb);
+ memset(info, 0, sizeof(*info));
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
+ info->control.vif = &sdata->vif;
+ info->control.jiffies = jiffies;
+ if (multicast) {
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
+ memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ /* update power mode indication when forwarding */
+ ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
+ } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
+ /* mesh power mode flags updated in mesh_nexthop_lookup */
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
+ } else {
+ /* unable to resolve next hop */
+ if (sta)
+ mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
+ hdr.addr3, 0,
+ WLAN_REASON_MESH_PATH_NOFORWARD,
+ sta->sta.addr);
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
+ kfree_skb(fwd_skb);
+ goto rx_accept;
+ }
+
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
+ fwd_skb->dev = sdata->dev;
+ ieee80211_add_pending_skb(local, fwd_skb);
+
+rx_accept:
+ if (!skb)
+ return RX_QUEUED;
+
+ ieee80211_strip_8023_mesh_hdr(skb);
+#endif
+
+ return RX_CONTINUE;
+}
+
static ieee80211_rx_result debug_noinline
__ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
{
@@ -2728,6 +2896,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
struct sk_buff_head frame_list;
+ static ieee80211_rx_result res;
struct ethhdr ethhdr;
const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
@@ -2746,6 +2915,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
break;
case NL80211_IFTYPE_MESH_POINT:
check_sa = NULL;
+ check_da = NULL;
break;
default:
break;
@@ -2760,20 +2930,43 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
data_offset, true))
return RX_DROP_UNUSABLE;
+ if (rx->sta && rx->sta->amsdu_mesh_control < 0) {
+ bool valid_std = ieee80211_is_valid_amsdu(skb, true);
+ bool valid_nonstd = ieee80211_is_valid_amsdu(skb, false);
+
+ if (valid_std && !valid_nonstd)
+ rx->sta->amsdu_mesh_control = 1;
+ else if (valid_nonstd && !valid_std)
+ rx->sta->amsdu_mesh_control = 0;
+ }
+
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
rx->sdata->vif.type,
rx->local->hw.extra_tx_headroom,
- check_da, check_sa);
+ check_da, check_sa,
+ rx->sta->amsdu_mesh_control);
while (!skb_queue_empty(&frame_list)) {
rx->skb = __skb_dequeue(&frame_list);
- if (!ieee80211_frame_allowed(rx, fc)) {
- dev_kfree_skb(rx->skb);
+ res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
+ switch (res) {
+ case RX_QUEUED:
continue;
+ case RX_CONTINUE:
+ break;
+ default:
+ goto free;
}
+ if (!ieee80211_frame_allowed(rx, fc))
+ goto free;
+
ieee80211_deliver_skb(rx);
+ continue;
+
+free:
+ dev_kfree_skb(rx->skb);
}
return RX_QUEUED;
@@ -2806,6 +2999,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (!rx->sdata->u.mgd.use_4addr)
return RX_DROP_UNUSABLE;
break;
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
default:
return RX_DROP_UNUSABLE;
}
@@ -2834,160 +3029,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
return __ieee80211_rx_h_amsdu(rx, 0);
}
-#ifdef CONFIG_MAC80211_MESH
-static ieee80211_rx_result
-ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
-{
- struct ieee80211_hdr *fwd_hdr, *hdr;
- struct ieee80211_tx_info *info;
- struct ieee80211s_hdr *mesh_hdr;
- struct sk_buff *skb = rx->skb, *fwd_skb;
- struct ieee80211_local *local = rx->local;
- struct ieee80211_sub_if_data *sdata = rx->sdata;
- struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- u16 ac, q, hdrlen;
- int tailroom = 0;
-
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
-
- /* make sure fixed part of mesh header is there, also checks skb len */
- if (!pskb_may_pull(rx->skb, hdrlen + 6))
- return RX_DROP_MONITOR;
-
- mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
-
- /* make sure full mesh header is there, also checks skb len */
- if (!pskb_may_pull(rx->skb,
- hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
- return RX_DROP_MONITOR;
-
- /* reload pointers */
- hdr = (struct ieee80211_hdr *) skb->data;
- mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
-
- if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) {
- int offset = hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr) +
- sizeof(rfc1042_header);
- __be16 ethertype;
-
- if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr) ||
- skb_copy_bits(rx->skb, offset, &ethertype, 2) != 0 ||
- ethertype != rx->sdata->control_port_protocol)
- return RX_DROP_MONITOR;
- }
-
- /* frame is in RMC, don't forward */
- if (ieee80211_is_data(hdr->frame_control) &&
- is_multicast_ether_addr(hdr->addr1) &&
- mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
- return RX_DROP_MONITOR;
-
- if (!ieee80211_is_data(hdr->frame_control))
- return RX_CONTINUE;
-
- if (!mesh_hdr->ttl)
- return RX_DROP_MONITOR;
-
- if (mesh_hdr->flags & MESH_FLAGS_AE) {
- struct mesh_path *mppath;
- char *proxied_addr;
- char *mpp_addr;
-
- if (is_multicast_ether_addr(hdr->addr1)) {
- mpp_addr = hdr->addr3;
- proxied_addr = mesh_hdr->eaddr1;
- } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
- MESH_FLAGS_AE_A5_A6) {
- /* has_a4 already checked in ieee80211_rx_mesh_check */
- mpp_addr = hdr->addr4;
- proxied_addr = mesh_hdr->eaddr2;
- } else {
- return RX_DROP_MONITOR;
- }
-
- rcu_read_lock();
- mppath = mpp_path_lookup(sdata, proxied_addr);
- if (!mppath) {
- mpp_path_add(sdata, proxied_addr, mpp_addr);
- } else {
- spin_lock_bh(&mppath->state_lock);
- if (!ether_addr_equal(mppath->mpp, mpp_addr))
- memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
- mppath->exp_time = jiffies;
- spin_unlock_bh(&mppath->state_lock);
- }
- rcu_read_unlock();
- }
-
- /* Frame has reached destination. Don't forward */
- if (!is_multicast_ether_addr(hdr->addr1) &&
- ether_addr_equal(sdata->vif.addr, hdr->addr3))
- return RX_CONTINUE;
-
- ac = ieee802_1d_to_ac[skb->priority];
- q = sdata->vif.hw_queue[ac];
- if (ieee80211_queue_stopped(&local->hw, q)) {
- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
- return RX_DROP_MONITOR;
- }
- skb_set_queue_mapping(skb, ac);
-
- if (!--mesh_hdr->ttl) {
- if (!is_multicast_ether_addr(hdr->addr1))
- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
- dropped_frames_ttl);
- goto out;
- }
-
- if (!ifmsh->mshcfg.dot11MeshForwarding)
- goto out;
-
- if (sdata->crypto_tx_tailroom_needed_cnt)
- tailroom = IEEE80211_ENCRYPT_TAILROOM;
-
- fwd_skb = skb_copy_expand(skb, local->tx_headroom +
- IEEE80211_ENCRYPT_HEADROOM,
- tailroom, GFP_ATOMIC);
- if (!fwd_skb)
- goto out;
-
- fwd_skb->dev = sdata->dev;
- fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
- fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
- info = IEEE80211_SKB_CB(fwd_skb);
- memset(info, 0, sizeof(*info));
- info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
- info->control.vif = &rx->sdata->vif;
- info->control.jiffies = jiffies;
- if (is_multicast_ether_addr(fwd_hdr->addr1)) {
- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
- memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
- /* update power mode indication when forwarding */
- ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
- } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
- /* mesh power mode flags updated in mesh_nexthop_lookup */
- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
- } else {
- /* unable to resolve next hop */
- mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
- fwd_hdr->addr3, 0,
- WLAN_REASON_MESH_PATH_NOFORWARD,
- fwd_hdr->addr2);
- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
- kfree_skb(fwd_skb);
- return RX_DROP_MONITOR;
- }
-
- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
- ieee80211_add_pending_skb(local, fwd_skb);
- out:
- if (is_multicast_ether_addr(hdr->addr1))
- return RX_CONTINUE;
- return RX_DROP_MONITOR;
-}
-#endif
-
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
{
@@ -2996,6 +3037,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
struct net_device *dev = sdata->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
__le16 fc = hdr->frame_control;
+ static ieee80211_rx_result res;
bool port_control;
int err;
@@ -3022,6 +3064,10 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
if (unlikely(err))
return RX_DROP_UNUSABLE;
+ res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
+ if (res != RX_CONTINUE)
+ return res;
+
if (!ieee80211_frame_allowed(rx, fc))
return RX_DROP_MONITOR;
@@ -3219,9 +3265,9 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
color = le32_get_bits(he_oper->he_oper_params,
IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
if (color == bss_conf->he_bss_color.color)
- ieeee80211_obss_color_collision_notify(&rx->sdata->vif,
- BIT_ULL(color),
- GFP_ATOMIC);
+ ieee80211_obss_color_collision_notify(&rx->sdata->vif,
+ BIT_ULL(color),
+ GFP_ATOMIC);
}
}
@@ -3992,10 +4038,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
CALL_RXH(ieee80211_rx_h_defragment);
CALL_RXH(ieee80211_rx_h_michael_mic_verify);
/* must be after MMIC verify so header is counted in MPDU mic */
-#ifdef CONFIG_MAC80211_MESH
- if (ieee80211_vif_is_mesh(&rx->sdata->vif))
- CALL_RXH(ieee80211_rx_h_mesh_fwding);
-#endif
CALL_RXH(ieee80211_rx_h_amsdu);
CALL_RXH(ieee80211_rx_h_data);
@@ -4052,9 +4094,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
static bool
ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
{
- if (!sta->mlo)
- return false;
-
return !!(sta->valid_links & BIT(link_id));
}
@@ -4076,13 +4115,8 @@ static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
}
static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
- struct ieee80211_sta *pubsta,
- int link_id)
+ struct sta_info *sta, int link_id)
{
- struct sta_info *sta;
-
- sta = container_of(pubsta, struct sta_info, sta);
-
rx->link_id = link_id;
rx->sta = sta;
@@ -4091,6 +4125,8 @@ static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
if (!rx->sdata)
rx->sdata = sta->sdata;
rx->link_sta = &sta->deflink;
+ } else {
+ rx->link_sta = NULL;
}
if (link_id < 0)
@@ -4120,7 +4156,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
if (sta->sta.valid_links)
link_id = ffs(sta->sta.valid_links) - 1;
- if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
+ if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
return;
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
@@ -4166,7 +4202,7 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
sta = container_of(pubsta, struct sta_info, sta);
- if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
+ if (!ieee80211_rx_data_set_sta(&rx, sta, -1))
return;
rcu_read_lock();
@@ -4245,7 +4281,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
case NL80211_IFTYPE_STATION:
if (!bssid && !sdata->u.mgd.use_4addr)
return false;
- if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
+ if (ieee80211_is_first_frag(hdr->seq_ctrl) &&
+ ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
return false;
if (multicast)
return true;
@@ -4843,7 +4880,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
hdr = (struct ieee80211_hdr *)rx->skb->data;
}
- if (unlikely(rx->sta && rx->sta->sta.mlo)) {
+ if (unlikely(rx->sta && rx->sta->sta.mlo) &&
+ is_unicast_ether_addr(hdr->addr1)) {
/* translate to MLD addresses */
if (ether_addr_equal(link->conf->addr, hdr->addr1))
ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
@@ -4873,6 +4911,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_fast_rx *fast_rx;
struct ieee80211_rx_data rx;
+ struct sta_info *sta;
int link_id = -1;
memset(&rx, 0, sizeof(rx));
@@ -4900,7 +4939,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
* link_id is used only for stats purpose and updating the stats on
* the deflink is fine?
*/
- if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
+ sta = container_of(pubsta, struct sta_info, sta);
+ if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
goto drop;
fast_rx = rcu_dereference(rx.sta->fast_rx);
@@ -4940,7 +4980,7 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
link_id = status->link_id;
}
- if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
+ if (!ieee80211_rx_data_set_sta(rx, sta, link_id))
return false;
return ieee80211_prepare_and_rx_handle(rx, skb, consume);
@@ -5007,7 +5047,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
link_id = status->link_id;
if (pubsta) {
- if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
+ sta = container_of(pubsta, struct sta_info, sta);
+ if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
goto out;
/*
@@ -5044,8 +5085,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
}
rx.sdata = prev_sta->sdata;
- if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
- link_id))
+ if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
goto out;
if (!status->link_valid && prev_sta->sta.mlo)
@@ -5058,8 +5098,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (prev_sta) {
rx.sdata = prev_sta->sdata;
- if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
- link_id))
+ if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
goto out;
if (!status->link_valid && prev_sta->sta.mlo)
@@ -5194,6 +5233,15 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
status->rate_idx, status->nss))
goto drop;
break;
+ case RX_ENC_EHT:
+ if (WARN_ONCE(status->rate_idx > 15 ||
+ !status->nss ||
+ status->nss > 8 ||
+ status->eht.gi > NL80211_RATE_INFO_EHT_GI_3_2,
+ "Rate marked as an EHT rate but data is invalid: MCS:%d, NSS:%d, GI:%d\n",
+ status->rate_idx, status->nss, status->eht.gi))
+ goto drop;
+ break;
default:
WARN_ON_ONCE(1);
fallthrough;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 04e0f132b1d9..7d68dbc872d7 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/module.h>
@@ -594,6 +594,9 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->sta_state = IEEE80211_STA_NONE;
+ if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+ sta->amsdu_mesh_control = -1;
+
/* Mark TID as unreserved */
sta->reserved_tid = IEEE80211_TID_UNRESERVED;
@@ -2406,12 +2409,19 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate);
rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate);
break;
+ case STA_STATS_RATE_TYPE_EHT:
+ rinfo->flags = RATE_INFO_FLAGS_EHT_MCS;
+ rinfo->mcs = STA_STATS_GET(EHT_MCS, rate);
+ rinfo->nss = STA_STATS_GET(EHT_NSS, rate);
+ rinfo->eht_gi = STA_STATS_GET(EHT_GI, rate);
+ rinfo->eht_ru_alloc = STA_STATS_GET(EHT_RU, rate);
+ break;
}
}
static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
{
- u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+ u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
if (rate == STA_STATS_RATE_INVALID)
return -EINVAL;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 69820b551668..e8e482a82d77 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -622,6 +622,8 @@ struct link_sta_info {
* taken from HT/VHT capabilities or VHT operating mode notification
* @cparams: CoDel parameters for this station.
* @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
+ * @amsdu_mesh_control: track the mesh A-MSDU format used by the peer
+ * (-1: not yet known, 0: non-standard [without mesh header], 1: standard)
* @fast_tx: TX fastpath information
* @fast_rx: RX fastpath information
* @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
@@ -707,6 +709,7 @@ struct sta_info {
struct codel_params cparams;
u8 reserved_tid;
+ s8 amsdu_mesh_control;
struct cfg80211_chan_def tdls_chandef;
@@ -936,6 +939,7 @@ enum sta_stats_type {
STA_STATS_RATE_TYPE_VHT,
STA_STATS_RATE_TYPE_HE,
STA_STATS_RATE_TYPE_S1G,
+ STA_STATS_RATE_TYPE_EHT,
};
#define STA_STATS_FIELD_HT_MCS GENMASK( 7, 0)
@@ -945,12 +949,16 @@ enum sta_stats_type {
#define STA_STATS_FIELD_VHT_NSS GENMASK( 7, 4)
#define STA_STATS_FIELD_HE_MCS GENMASK( 3, 0)
#define STA_STATS_FIELD_HE_NSS GENMASK( 7, 4)
-#define STA_STATS_FIELD_BW GENMASK(11, 8)
-#define STA_STATS_FIELD_SGI GENMASK(12, 12)
-#define STA_STATS_FIELD_TYPE GENMASK(15, 13)
-#define STA_STATS_FIELD_HE_RU GENMASK(18, 16)
-#define STA_STATS_FIELD_HE_GI GENMASK(20, 19)
-#define STA_STATS_FIELD_HE_DCM GENMASK(21, 21)
+#define STA_STATS_FIELD_EHT_MCS GENMASK( 3, 0)
+#define STA_STATS_FIELD_EHT_NSS GENMASK( 7, 4)
+#define STA_STATS_FIELD_BW GENMASK(12, 8)
+#define STA_STATS_FIELD_SGI GENMASK(13, 13)
+#define STA_STATS_FIELD_TYPE GENMASK(16, 14)
+#define STA_STATS_FIELD_HE_RU GENMASK(19, 17)
+#define STA_STATS_FIELD_HE_GI GENMASK(21, 20)
+#define STA_STATS_FIELD_HE_DCM GENMASK(22, 22)
+#define STA_STATS_FIELD_EHT_RU GENMASK(20, 17)
+#define STA_STATS_FIELD_EHT_GI GENMASK(22, 21)
#define STA_STATS_FIELD(_n, _v) FIELD_PREP(STA_STATS_FIELD_ ## _n, _v)
#define STA_STATS_GET(_n, _v) FIELD_GET(STA_STATS_FIELD_ ## _n, _v)
@@ -989,6 +997,13 @@ static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s)
r |= STA_STATS_FIELD(HE_RU, s->he_ru);
r |= STA_STATS_FIELD(HE_DCM, s->he_dcm);
break;
+ case RX_ENC_EHT:
+ r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_EHT);
+ r |= STA_STATS_FIELD(EHT_NSS, s->nss);
+ r |= STA_STATS_FIELD(EHT_MCS, s->rate_idx);
+ r |= STA_STATS_FIELD(EHT_GI, s->eht.gi);
+ r |= STA_STATS_FIELD(EHT_RU, s->eht.ru);
+ break;
default:
WARN_ON(1);
return STA_STATS_RATE_INVALID;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index defe97a31724..7699fb410670 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -4434,7 +4434,7 @@ static void ieee80211_mlo_multicast_tx(struct net_device *dev,
u32 ctrl_flags = IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX;
if (hweight16(links) == 1) {
- ctrl_flags |= u32_encode_bits(ffs(links) - 1,
+ ctrl_flags |= u32_encode_bits(__ffs(links),
IEEE80211_TX_CTRL_MLO_LINK);
__ieee80211_subif_start_xmit(skb, sdata->dev, 0, ctrl_flags,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 261ac667887f..1a28fe5cb614 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -832,19 +832,6 @@ static void __iterate_stations(struct ieee80211_local *local,
}
}
-void ieee80211_iterate_stations(struct ieee80211_hw *hw,
- void (*iterator)(void *data,
- struct ieee80211_sta *sta),
- void *data)
-{
- struct ieee80211_local *local = hw_to_local(hw);
-
- mutex_lock(&local->sta_mtx);
- __iterate_stations(local, iterator, data);
- mutex_unlock(&local->sta_mtx);
-}
-EXPORT_SYMBOL_GPL(ieee80211_iterate_stations);
-
void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
void (*iterator)(void *data,
struct ieee80211_sta *sta),
@@ -4033,6 +4020,19 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
/* Fill cfg80211 rate info */
switch (status->encoding) {
+ case RX_ENC_EHT:
+ ri.flags |= RATE_INFO_FLAGS_EHT_MCS;
+ ri.mcs = status->rate_idx;
+ ri.nss = status->nss;
+ ri.eht_ru_alloc = status->eht.ru;
+ if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
+ ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ /* TODO/FIXME: is this right? handle other PPDUs */
+ if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
+ mpdu_offset += 2;
+ ts += 36;
+ }
+ break;
case RX_ENC_HE:
ri.flags |= RATE_INFO_FLAGS_HE_MCS;
ri.mcs = status->rate_idx;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 803de5881485..c1250aa47808 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -625,7 +625,7 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
enum ieee80211_sta_rx_bandwidth new_bw;
struct sta_opmode_info sta_opmode = {};
u32 changed = 0;
- u8 nss;
+ u8 nss, cur_nss;
/* ignore - no support for BF yet */
if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
@@ -636,10 +636,25 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
nss += 1;
if (link_sta->pub->rx_nss != nss) {
- link_sta->pub->rx_nss = nss;
- sta_opmode.rx_nss = nss;
- changed |= IEEE80211_RC_NSS_CHANGED;
- sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ cur_nss = link_sta->pub->rx_nss;
+ /* Reset rx_nss and call ieee80211_sta_set_rx_nss() which
+ * will set the same to max nss value calculated based on capability.
+ */
+ link_sta->pub->rx_nss = 0;
+ ieee80211_sta_set_rx_nss(link_sta);
+ /* Do not allow an nss change to rx_nss greater than max_nss
+ * negotiated and capped to APs capability during association.
+ */
+ if (nss <= link_sta->pub->rx_nss) {
+ link_sta->pub->rx_nss = nss;
+ sta_opmode.rx_nss = nss;
+ changed |= IEEE80211_RC_NSS_CHANGED;
+ sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ } else {
+ link_sta->pub->rx_nss = cur_nss;
+ pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d",
+ link_sta->pub->addr, nss);
+ }
}
switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index 4059295fdbf8..43d1347b37ee 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MAC802154) += mac802154.o
mac802154-objs := main.o rx.o tx.o mac_cmd.o mib.o \
- iface.o llsec.o util.o cfg.o trace.o
+ iface.o llsec.o util.o cfg.o scan.o trace.o
CFLAGS_trace.o := -I$(src)
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index dc2d918fac68..5c3cb019f751 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -114,11 +114,15 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
wpan_phy->current_channel == channel)
return 0;
+ /* Refuse to change channels during scanning or beaconing */
+ if (mac802154_is_scanning(local) || mac802154_is_beaconing(local))
+ return -EBUSY;
+
ret = drv_set_channel(local, page, channel);
if (!ret) {
wpan_phy->current_page = page;
wpan_phy->current_channel = channel;
- ieee802154_configure_durations(wpan_phy);
+ ieee802154_configure_durations(wpan_phy, page, channel);
}
return ret;
@@ -261,6 +265,56 @@ ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
return 0;
}
+static int mac802154_trigger_scan(struct wpan_phy *wpan_phy,
+ struct cfg802154_scan_request *request)
+{
+ struct ieee802154_sub_if_data *sdata;
+
+ sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(request->wpan_dev);
+
+ ASSERT_RTNL();
+
+ return mac802154_trigger_scan_locked(sdata, request);
+}
+
+static int mac802154_abort_scan(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+ struct ieee802154_sub_if_data *sdata;
+
+ sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev);
+
+ ASSERT_RTNL();
+
+ return mac802154_abort_scan_locked(local, sdata);
+}
+
+static int mac802154_send_beacons(struct wpan_phy *wpan_phy,
+ struct cfg802154_beacon_request *request)
+{
+ struct ieee802154_sub_if_data *sdata;
+
+ sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(request->wpan_dev);
+
+ ASSERT_RTNL();
+
+ return mac802154_send_beacons_locked(sdata, request);
+}
+
+static int mac802154_stop_beacons(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+ struct ieee802154_sub_if_data *sdata;
+
+ sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev);
+
+ ASSERT_RTNL();
+
+ return mac802154_stop_beacons_locked(local, sdata);
+}
+
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
static void
ieee802154_get_llsec_table(struct wpan_phy *wpan_phy,
@@ -468,6 +522,10 @@ const struct cfg802154_ops mac802154_config_ops = {
.set_max_frame_retries = ieee802154_set_max_frame_retries,
.set_lbt_mode = ieee802154_set_lbt_mode,
.set_ackreq_default = ieee802154_set_ackreq_default,
+ .trigger_scan = mac802154_trigger_scan,
+ .abort_scan = mac802154_abort_scan,
+ .send_beacons = mac802154_send_beacons,
+ .stop_beacons = mac802154_stop_beacons,
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
.get_llsec_table = ieee802154_get_llsec_table,
.lock_llsec_table = ieee802154_lock_llsec_table,
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 509e0172fe82..63bab99ed368 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -21,6 +21,11 @@
#include "llsec.h"
+enum ieee802154_ongoing {
+ IEEE802154_IS_SCANNING = BIT(0),
+ IEEE802154_IS_BEACONING = BIT(1),
+};
+
/* mac802154 device private data */
struct ieee802154_local {
struct ieee802154_hw hw;
@@ -43,15 +48,32 @@ struct ieee802154_local {
struct list_head interfaces;
struct mutex iflist_mtx;
- /* This one is used for scanning and other jobs not to be interfered
- * with serial driver.
- */
+ /* Data related workqueue */
struct workqueue_struct *workqueue;
+ /* MAC commands related workqueue */
+ struct workqueue_struct *mac_wq;
struct hrtimer ifs_timer;
+ /* Scanning */
+ u8 scan_page;
+ u8 scan_channel;
+ struct cfg802154_scan_request __rcu *scan_req;
+ struct delayed_work scan_work;
+
+ /* Beaconing */
+ unsigned int beacon_interval;
+ struct ieee802154_beacon_frame beacon;
+ struct cfg802154_beacon_request __rcu *beacon_req;
+ struct delayed_work beacon_work;
+
+ /* Asynchronous tasks */
+ struct list_head rx_beacon_list;
+ struct work_struct rx_beacon_work;
+
bool started;
bool suspended;
+ unsigned long ongoing;
struct tasklet_struct tasklet;
struct sk_buff_head skb_queue;
@@ -141,10 +163,16 @@ int ieee802154_mlme_op_pre(struct ieee802154_local *local);
int ieee802154_mlme_tx(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb);
+int ieee802154_mlme_tx_locked(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb);
void ieee802154_mlme_op_post(struct ieee802154_local *local);
int ieee802154_mlme_tx_one(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb);
+int ieee802154_mlme_tx_one_locked(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb);
netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t
@@ -220,6 +248,33 @@ void mac802154_unlock_table(struct net_device *dev);
int mac802154_wpan_update_llsec(struct net_device *dev);
+/* PAN management handling */
+void mac802154_scan_worker(struct work_struct *work);
+int mac802154_trigger_scan_locked(struct ieee802154_sub_if_data *sdata,
+ struct cfg802154_scan_request *request);
+int mac802154_abort_scan_locked(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata);
+int mac802154_process_beacon(struct ieee802154_local *local,
+ struct sk_buff *skb,
+ u8 page, u8 channel);
+void mac802154_rx_beacon_worker(struct work_struct *work);
+
+static inline bool mac802154_is_scanning(struct ieee802154_local *local)
+{
+ return test_bit(IEEE802154_IS_SCANNING, &local->ongoing);
+}
+
+void mac802154_beacon_worker(struct work_struct *work);
+int mac802154_send_beacons_locked(struct ieee802154_sub_if_data *sdata,
+ struct cfg802154_beacon_request *request);
+int mac802154_stop_beacons_locked(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata);
+
+static inline bool mac802154_is_beaconing(struct ieee802154_local *local)
+{
+ return test_bit(IEEE802154_IS_BEACONING, &local->ongoing);
+}
+
/* interface handling */
int ieee802154_iface_init(void);
void ieee802154_iface_exit(void);
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index ac0b28025fb0..c0e2da5072be 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -302,6 +302,12 @@ static int mac802154_slave_close(struct net_device *dev)
ASSERT_RTNL();
+ if (mac802154_is_scanning(local))
+ mac802154_abort_scan_locked(local, sdata);
+
+ if (mac802154_is_beaconing(local))
+ mac802154_stop_beacons_locked(local, sdata);
+
netif_stop_queue(dev);
local->open_count--;
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 55550ead2ced..8d2eabc71bbe 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -707,7 +707,10 @@ int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
hlen = ieee802154_hdr_pull(skb, &hdr);
- if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA)
+ /* TODO: control frames security support */
+ if (hlen < 0 ||
+ (hdr.fc.type != IEEE802154_FC_TYPE_DATA &&
+ hdr.fc.type != IEEE802154_FC_TYPE_BEACON))
return -EINVAL;
if (!hdr.fc.security_enabled ||
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 3ed31daf7b9c..ee23e234b998 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -89,6 +89,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
local->ops = ops;
INIT_LIST_HEAD(&local->interfaces);
+ INIT_LIST_HEAD(&local->rx_beacon_list);
mutex_init(&local->iflist_mtx);
tasklet_setup(&local->tasklet, ieee802154_tasklet_handler);
@@ -96,6 +97,9 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
skb_queue_head_init(&local->skb_queue);
INIT_WORK(&local->sync_tx_work, ieee802154_xmit_sync_worker);
+ INIT_DELAYED_WORK(&local->scan_work, mac802154_scan_worker);
+ INIT_WORK(&local->rx_beacon_work, mac802154_rx_beacon_worker);
+ INIT_DELAYED_WORK(&local->beacon_work, mac802154_beacon_worker);
/* init supported flags with 802.15.4 default ranges */
phy->supported.max_minbe = 8;
@@ -113,32 +117,33 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
}
EXPORT_SYMBOL(ieee802154_alloc_hw);
-void ieee802154_configure_durations(struct wpan_phy *phy)
+void ieee802154_configure_durations(struct wpan_phy *phy,
+ unsigned int page, unsigned int channel)
{
u32 duration = 0;
- switch (phy->current_page) {
+ switch (page) {
case 0:
- if (BIT(phy->current_channel) & 0x1)
+ if (BIT(channel) & 0x1)
/* 868 MHz BPSK 802.15.4-2003: 20 ksym/s */
duration = 50 * NSEC_PER_USEC;
- else if (BIT(phy->current_channel) & 0x7FE)
+ else if (BIT(channel) & 0x7FE)
/* 915 MHz BPSK 802.15.4-2003: 40 ksym/s */
duration = 25 * NSEC_PER_USEC;
- else if (BIT(phy->current_channel) & 0x7FFF800)
+ else if (BIT(channel) & 0x7FFF800)
/* 2400 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */
duration = 16 * NSEC_PER_USEC;
break;
case 2:
- if (BIT(phy->current_channel) & 0x1)
+ if (BIT(channel) & 0x1)
/* 868 MHz O-QPSK 802.15.4-2006: 25 ksym/s */
duration = 40 * NSEC_PER_USEC;
- else if (BIT(phy->current_channel) & 0x7FE)
+ else if (BIT(channel) & 0x7FE)
/* 915 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */
duration = 16 * NSEC_PER_USEC;
break;
case 3:
- if (BIT(phy->current_channel) & 0x3FFF)
+ if (BIT(channel) & 0x3FFF)
/* 2.4 GHz CSS 802.15.4a-2007: 1/6 Msym/s */
duration = 6 * NSEC_PER_USEC;
break;
@@ -184,6 +189,7 @@ static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
int ieee802154_register_hw(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
+ char mac_wq_name[IFNAMSIZ + 10] = {};
struct net_device *dev;
int rc = -ENOSYS;
@@ -194,6 +200,13 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
goto out;
}
+ snprintf(mac_wq_name, IFNAMSIZ + 10, "%s-mac-cmds", wpan_phy_name(local->phy));
+ local->mac_wq = create_singlethread_workqueue(mac_wq_name);
+ if (!local->mac_wq) {
+ rc = -ENOMEM;
+ goto out_wq;
+ }
+
hrtimer_init(&local->ifs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
local->ifs_timer.function = ieee802154_xmit_ifs_timer;
@@ -201,7 +214,8 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
ieee802154_setup_wpan_phy_pib(local->phy);
- ieee802154_configure_durations(local->phy);
+ ieee802154_configure_durations(local->phy, local->phy->current_page,
+ local->phy->current_channel);
if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) {
local->phy->supported.min_csma_backoffs = 4;
@@ -222,7 +236,7 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
rc = wpan_phy_register(local->phy);
if (rc < 0)
- goto out_wq;
+ goto out_mac_wq;
rtnl_lock();
@@ -241,6 +255,8 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
out_phy:
wpan_phy_unregister(local->phy);
+out_mac_wq:
+ destroy_workqueue(local->mac_wq);
out_wq:
destroy_workqueue(local->workqueue);
out:
@@ -261,6 +277,7 @@ void ieee802154_unregister_hw(struct ieee802154_hw *hw)
rtnl_unlock();
+ destroy_workqueue(local->mac_wq);
destroy_workqueue(local->workqueue);
wpan_phy_unregister(local->phy);
}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 97bb4401dd3e..da0628ee3c89 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -29,12 +29,31 @@ static int ieee802154_deliver_skb(struct sk_buff *skb)
return netif_receive_skb(skb);
}
+void mac802154_rx_beacon_worker(struct work_struct *work)
+{
+ struct ieee802154_local *local =
+ container_of(work, struct ieee802154_local, rx_beacon_work);
+ struct cfg802154_mac_pkt *mac_pkt;
+
+ mac_pkt = list_first_entry_or_null(&local->rx_beacon_list,
+ struct cfg802154_mac_pkt, node);
+ if (!mac_pkt)
+ return;
+
+ mac802154_process_beacon(local, mac_pkt->skb, mac_pkt->page, mac_pkt->channel);
+
+ list_del(&mac_pkt->node);
+ kfree_skb(mac_pkt->skb);
+ kfree(mac_pkt);
+}
+
static int
ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb, const struct ieee802154_hdr *hdr)
{
- struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct wpan_phy *wpan_phy = sdata->local->hw.phy;
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ struct cfg802154_mac_pkt *mac_pkt;
__le16 span, sshort;
int rc;
@@ -106,6 +125,21 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
switch (mac_cb(skb)->type) {
case IEEE802154_FC_TYPE_BEACON:
+ dev_dbg(&sdata->dev->dev, "BEACON received\n");
+ if (!mac802154_is_scanning(sdata->local))
+ goto fail;
+
+ mac_pkt = kzalloc(sizeof(*mac_pkt), GFP_ATOMIC);
+ if (!mac_pkt)
+ goto fail;
+
+ mac_pkt->skb = skb_get(skb);
+ mac_pkt->sdata = sdata;
+ mac_pkt->page = sdata->local->scan_page;
+ mac_pkt->channel = sdata->local->scan_channel;
+ list_add_tail(&mac_pkt->node, &sdata->local->rx_beacon_list);
+ queue_work(sdata->local->mac_wq, &sdata->local->rx_beacon_work);
+ return NET_RX_SUCCESS;
case IEEE802154_FC_TYPE_ACK:
case IEEE802154_FC_TYPE_MAC_CMD:
goto fail;
diff --git a/net/mac802154/scan.c b/net/mac802154/scan.c
new file mode 100644
index 000000000000..9b0933a185eb
--- /dev/null
+++ b/net/mac802154/scan.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE 802.15.4 scanning management
+ *
+ * Copyright (C) 2021 Qorvo US, Inc
+ * Authors:
+ * - David Girault <david.girault@qorvo.com>
+ * - Miquel Raynal <miquel.raynal@bootlin.com>
+ */
+
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <net/mac802154.h>
+
+#include "ieee802154_i.h"
+#include "driver-ops.h"
+#include "../ieee802154/nl802154.h"
+
+#define IEEE802154_BEACON_MHR_SZ 13
+#define IEEE802154_BEACON_PL_SZ 4
+#define IEEE802154_BEACON_SKB_SZ (IEEE802154_BEACON_MHR_SZ + \
+ IEEE802154_BEACON_PL_SZ)
+
+/* mac802154_scan_cleanup_locked() must be called upon scan completion or abort.
+ * - Completions are asynchronous, not locked by the rtnl and decided by the
+ * scan worker.
+ * - Aborts are decided by userspace, and locked by the rtnl.
+ *
+ * Concurrent modifications to the PHY, the interfaces or the hardware is in
+ * general prevented by the rtnl. So in most cases we don't need additional
+ * protection.
+ *
+ * However, the scan worker get's triggered without anybody noticing and thus we
+ * must ensure the presence of the devices as well as data consistency:
+ * - The sub-interface and device driver module get both their reference
+ * counters incremented whenever we start a scan, so they cannot disappear
+ * during operation.
+ * - Data consistency is achieved by the use of rcu protected pointers.
+ */
+static int mac802154_scan_cleanup_locked(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ bool aborted)
+{
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ struct wpan_phy *wpan_phy = local->phy;
+ struct cfg802154_scan_request *request;
+ u8 arg;
+
+ /* Prevent any further use of the scan request */
+ clear_bit(IEEE802154_IS_SCANNING, &local->ongoing);
+ cancel_delayed_work(&local->scan_work);
+ request = rcu_replace_pointer(local->scan_req, NULL, 1);
+ if (!request)
+ return 0;
+ kfree_rcu(request);
+
+ /* Advertize first, while we know the devices cannot be removed */
+ if (aborted)
+ arg = NL802154_SCAN_DONE_REASON_ABORTED;
+ else
+ arg = NL802154_SCAN_DONE_REASON_FINISHED;
+ nl802154_scan_done(wpan_phy, wpan_dev, arg);
+
+ /* Cleanup software stack */
+ ieee802154_mlme_op_post(local);
+
+ /* Set the hardware back in its original state */
+ drv_set_channel(local, wpan_phy->current_page,
+ wpan_phy->current_channel);
+ ieee802154_configure_durations(wpan_phy, wpan_phy->current_page,
+ wpan_phy->current_channel);
+ drv_stop(local);
+ synchronize_net();
+ sdata->required_filtering = sdata->iface_default_filtering;
+ drv_start(local, sdata->required_filtering, &local->addr_filt);
+
+ return 0;
+}
+
+int mac802154_abort_scan_locked(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata)
+{
+ ASSERT_RTNL();
+
+ if (!mac802154_is_scanning(local))
+ return -ESRCH;
+
+ return mac802154_scan_cleanup_locked(local, sdata, true);
+}
+
+static unsigned int mac802154_scan_get_channel_time(u8 duration_order,
+ u8 symbol_duration)
+{
+ u64 base_super_frame_duration = (u64)symbol_duration *
+ IEEE802154_SUPERFRAME_PERIOD * IEEE802154_SLOT_PERIOD;
+
+ return usecs_to_jiffies(base_super_frame_duration *
+ (BIT(duration_order) + 1));
+}
+
+static void mac802154_flush_queued_beacons(struct ieee802154_local *local)
+{
+ struct cfg802154_mac_pkt *mac_pkt, *tmp;
+
+ list_for_each_entry_safe(mac_pkt, tmp, &local->rx_beacon_list, node) {
+ list_del(&mac_pkt->node);
+ kfree_skb(mac_pkt->skb);
+ kfree(mac_pkt);
+ }
+}
+
+static void
+mac802154_scan_get_next_channel(struct ieee802154_local *local,
+ struct cfg802154_scan_request *scan_req,
+ u8 *channel)
+{
+ (*channel)++;
+ *channel = find_next_bit((const unsigned long *)&scan_req->channels,
+ IEEE802154_MAX_CHANNEL + 1,
+ *channel);
+}
+
+static int mac802154_scan_find_next_chan(struct ieee802154_local *local,
+ struct cfg802154_scan_request *scan_req,
+ u8 page, u8 *channel)
+{
+ mac802154_scan_get_next_channel(local, scan_req, channel);
+ if (*channel > IEEE802154_MAX_CHANNEL)
+ return -EINVAL;
+
+ return 0;
+}
+
+void mac802154_scan_worker(struct work_struct *work)
+{
+ struct ieee802154_local *local =
+ container_of(work, struct ieee802154_local, scan_work.work);
+ struct cfg802154_scan_request *scan_req;
+ struct ieee802154_sub_if_data *sdata;
+ unsigned int scan_duration = 0;
+ struct wpan_phy *wpan_phy;
+ u8 scan_req_duration;
+ u8 page, channel;
+ int ret;
+
+ /* Ensure the device receiver is turned off when changing channels
+ * because there is no atomic way to change the channel and know on
+ * which one a beacon might have been received.
+ */
+ drv_stop(local);
+ synchronize_net();
+ mac802154_flush_queued_beacons(local);
+
+ rcu_read_lock();
+ scan_req = rcu_dereference(local->scan_req);
+ if (unlikely(!scan_req)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(scan_req->wpan_dev);
+
+ /* Wait an arbitrary amount of time in case we cannot use the device */
+ if (local->suspended || !ieee802154_sdata_running(sdata)) {
+ rcu_read_unlock();
+ queue_delayed_work(local->mac_wq, &local->scan_work,
+ msecs_to_jiffies(1000));
+ return;
+ }
+
+ wpan_phy = scan_req->wpan_phy;
+ scan_req_duration = scan_req->duration;
+
+ /* Look for the next valid chan */
+ page = local->scan_page;
+ channel = local->scan_channel;
+ do {
+ ret = mac802154_scan_find_next_chan(local, scan_req, page, &channel);
+ if (ret) {
+ rcu_read_unlock();
+ goto end_scan;
+ }
+ } while (!ieee802154_chan_is_valid(scan_req->wpan_phy, page, channel));
+
+ rcu_read_unlock();
+
+ /* Bypass the stack on purpose when changing the channel */
+ rtnl_lock();
+ ret = drv_set_channel(local, page, channel);
+ rtnl_unlock();
+ if (ret) {
+ dev_err(&sdata->dev->dev,
+ "Channel change failure during scan, aborting (%d)\n", ret);
+ goto end_scan;
+ }
+
+ local->scan_page = page;
+ local->scan_channel = channel;
+
+ rtnl_lock();
+ ret = drv_start(local, IEEE802154_FILTERING_3_SCAN, &local->addr_filt);
+ rtnl_unlock();
+ if (ret) {
+ dev_err(&sdata->dev->dev,
+ "Restarting failure after channel change, aborting (%d)\n", ret);
+ goto end_scan;
+ }
+
+ ieee802154_configure_durations(wpan_phy, page, channel);
+ scan_duration = mac802154_scan_get_channel_time(scan_req_duration,
+ wpan_phy->symbol_duration);
+ dev_dbg(&sdata->dev->dev,
+ "Scan page %u channel %u for %ums\n",
+ page, channel, jiffies_to_msecs(scan_duration));
+ queue_delayed_work(local->mac_wq, &local->scan_work, scan_duration);
+ return;
+
+end_scan:
+ rtnl_lock();
+ mac802154_scan_cleanup_locked(local, sdata, false);
+ rtnl_unlock();
+}
+
+int mac802154_trigger_scan_locked(struct ieee802154_sub_if_data *sdata,
+ struct cfg802154_scan_request *request)
+{
+ struct ieee802154_local *local = sdata->local;
+
+ ASSERT_RTNL();
+
+ if (mac802154_is_scanning(local))
+ return -EBUSY;
+
+ /* TODO: support other scanning type */
+ if (request->type != NL802154_SCAN_PASSIVE)
+ return -EOPNOTSUPP;
+
+ /* Store scanning parameters */
+ rcu_assign_pointer(local->scan_req, request);
+
+ /* Software scanning requires to set promiscuous mode, so we need to
+ * pause the Tx queue during the entire operation.
+ */
+ ieee802154_mlme_op_pre(local);
+
+ sdata->required_filtering = IEEE802154_FILTERING_3_SCAN;
+ local->scan_page = request->page;
+ local->scan_channel = -1;
+ set_bit(IEEE802154_IS_SCANNING, &local->ongoing);
+
+ nl802154_scan_started(request->wpan_phy, request->wpan_dev);
+
+ queue_delayed_work(local->mac_wq, &local->scan_work, 0);
+
+ return 0;
+}
+
+int mac802154_process_beacon(struct ieee802154_local *local,
+ struct sk_buff *skb,
+ u8 page, u8 channel)
+{
+ struct ieee802154_beacon_hdr *bh = (void *)skb->data;
+ struct ieee802154_addr *src = &mac_cb(skb)->source;
+ struct cfg802154_scan_request *scan_req;
+ struct ieee802154_coord_desc desc;
+
+ if (skb->len != sizeof(*bh))
+ return -EINVAL;
+
+ if (unlikely(src->mode == IEEE802154_ADDR_NONE))
+ return -EINVAL;
+
+ dev_dbg(&skb->dev->dev,
+ "BEACON received on page %u channel %u\n",
+ page, channel);
+
+ memcpy(&desc.addr, src, sizeof(desc.addr));
+ desc.page = page;
+ desc.channel = channel;
+ desc.link_quality = mac_cb(skb)->lqi;
+ desc.superframe_spec = get_unaligned_le16(skb->data);
+ desc.gts_permit = bh->gts_permit;
+
+ trace_802154_scan_event(&desc);
+
+ rcu_read_lock();
+ scan_req = rcu_dereference(local->scan_req);
+ if (likely(scan_req))
+ nl802154_scan_event(scan_req->wpan_phy, scan_req->wpan_dev, &desc);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int mac802154_transmit_beacon(struct ieee802154_local *local,
+ struct wpan_dev *wpan_dev)
+{
+ struct cfg802154_beacon_request *beacon_req;
+ struct ieee802154_sub_if_data *sdata;
+ struct sk_buff *skb;
+ int ret;
+
+ /* Update the sequence number */
+ local->beacon.mhr.seq = atomic_inc_return(&wpan_dev->bsn) & 0xFF;
+
+ skb = alloc_skb(IEEE802154_BEACON_SKB_SZ, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ rcu_read_lock();
+ beacon_req = rcu_dereference(local->beacon_req);
+ if (unlikely(!beacon_req)) {
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(beacon_req->wpan_dev);
+ skb->dev = sdata->dev;
+
+ rcu_read_unlock();
+
+ ret = ieee802154_beacon_push(skb, &local->beacon);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ /* Using the MLME transmission helper for sending beacons is a bit
+ * overkill because we do not really care about the final outcome.
+ *
+ * Even though, going through the whole net stack with a regular
+ * dev_queue_xmit() is not relevant either because we want beacons to be
+ * sent "now" rather than go through the whole net stack scheduling
+ * (qdisc & co).
+ *
+ * Finally, using ieee802154_subif_start_xmit() would only be an option
+ * if we had a generic transmit helper which would acquire the
+ * HARD_TX_LOCK() to prevent buffer handling conflicts with regular
+ * packets.
+ *
+ * So for now we keep it simple and send beacons with our MLME helper,
+ * even if it stops the ieee802154 queue entirely during these
+ * transmissions, wich anyway does not have a huge impact on the
+ * performances given the current design of the stack.
+ */
+ return ieee802154_mlme_tx(local, sdata, skb);
+}
+
+void mac802154_beacon_worker(struct work_struct *work)
+{
+ struct ieee802154_local *local =
+ container_of(work, struct ieee802154_local, beacon_work.work);
+ struct cfg802154_beacon_request *beacon_req;
+ struct ieee802154_sub_if_data *sdata;
+ struct wpan_dev *wpan_dev;
+ int ret;
+
+ rcu_read_lock();
+ beacon_req = rcu_dereference(local->beacon_req);
+ if (unlikely(!beacon_req)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(beacon_req->wpan_dev);
+
+ /* Wait an arbitrary amount of time in case we cannot use the device */
+ if (local->suspended || !ieee802154_sdata_running(sdata)) {
+ rcu_read_unlock();
+ queue_delayed_work(local->mac_wq, &local->beacon_work,
+ msecs_to_jiffies(1000));
+ return;
+ }
+
+ wpan_dev = beacon_req->wpan_dev;
+
+ rcu_read_unlock();
+
+ dev_dbg(&sdata->dev->dev, "Sending beacon\n");
+ ret = mac802154_transmit_beacon(local, wpan_dev);
+ if (ret)
+ dev_err(&sdata->dev->dev,
+ "Beacon could not be transmitted (%d)\n", ret);
+
+ queue_delayed_work(local->mac_wq, &local->beacon_work,
+ local->beacon_interval);
+}
+
+int mac802154_stop_beacons_locked(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata)
+{
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ struct cfg802154_beacon_request *request;
+
+ ASSERT_RTNL();
+
+ if (!mac802154_is_beaconing(local))
+ return -ESRCH;
+
+ clear_bit(IEEE802154_IS_BEACONING, &local->ongoing);
+ cancel_delayed_work(&local->beacon_work);
+ request = rcu_replace_pointer(local->beacon_req, NULL, 1);
+ if (!request)
+ return 0;
+ kfree_rcu(request);
+
+ nl802154_beaconing_done(wpan_dev);
+
+ return 0;
+}
+
+int mac802154_send_beacons_locked(struct ieee802154_sub_if_data *sdata,
+ struct cfg802154_beacon_request *request)
+{
+ struct ieee802154_local *local = sdata->local;
+
+ ASSERT_RTNL();
+
+ if (mac802154_is_beaconing(local))
+ mac802154_stop_beacons_locked(local, sdata);
+
+ /* Store beaconing parameters */
+ rcu_assign_pointer(local->beacon_req, request);
+
+ set_bit(IEEE802154_IS_BEACONING, &local->ongoing);
+
+ memset(&local->beacon, 0, sizeof(local->beacon));
+ local->beacon.mhr.fc.type = IEEE802154_FC_TYPE_BEACON;
+ local->beacon.mhr.fc.security_enabled = 0;
+ local->beacon.mhr.fc.frame_pending = 0;
+ local->beacon.mhr.fc.ack_request = 0;
+ local->beacon.mhr.fc.intra_pan = 0;
+ local->beacon.mhr.fc.dest_addr_mode = IEEE802154_NO_ADDRESSING;
+ local->beacon.mhr.fc.version = IEEE802154_2003_STD;
+ local->beacon.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING;
+ atomic_set(&request->wpan_dev->bsn, -1);
+ local->beacon.mhr.source.mode = IEEE802154_ADDR_LONG;
+ local->beacon.mhr.source.pan_id = request->wpan_dev->pan_id;
+ local->beacon.mhr.source.extended_addr = request->wpan_dev->extended_addr;
+ local->beacon.mac_pl.beacon_order = request->interval;
+ local->beacon.mac_pl.superframe_order = request->interval;
+ local->beacon.mac_pl.final_cap_slot = 0xf;
+ local->beacon.mac_pl.battery_life_ext = 0;
+ /* TODO: Fill this field depending on the coordinator capacity */
+ local->beacon.mac_pl.pan_coordinator = 1;
+ local->beacon.mac_pl.assoc_permit = 1;
+
+ /* Start the beacon work */
+ local->beacon_interval =
+ mac802154_scan_get_channel_time(request->interval,
+ request->wpan_phy->symbol_duration);
+ queue_delayed_work(local->mac_wq, &local->beacon_work, 0);
+
+ return 0;
+}
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 9d8d43cf1e64..2a6f1ed763c9 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -137,34 +137,37 @@ int ieee802154_mlme_op_pre(struct ieee802154_local *local)
return ieee802154_sync_and_hold_queue(local);
}
-int ieee802154_mlme_tx(struct ieee802154_local *local,
- struct ieee802154_sub_if_data *sdata,
- struct sk_buff *skb)
+int ieee802154_mlme_tx_locked(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb)
{
- int ret;
-
/* Avoid possible calls to ->ndo_stop() when we asynchronously perform
* MLME transmissions.
*/
- rtnl_lock();
+ ASSERT_RTNL();
/* Ensure the device was not stopped, otherwise error out */
- if (!local->open_count) {
- rtnl_unlock();
+ if (!local->open_count)
return -ENETDOWN;
- }
/* Warn if the ieee802154 core thinks MLME frames can be sent while the
* net interface expects this cannot happen.
*/
- if (WARN_ON_ONCE(!netif_running(sdata->dev))) {
- rtnl_unlock();
+ if (WARN_ON_ONCE(!netif_running(sdata->dev)))
return -ENETDOWN;
- }
ieee802154_tx(local, skb);
- ret = ieee802154_sync_queue(local);
+ return ieee802154_sync_queue(local);
+}
+
+int ieee802154_mlme_tx(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ int ret;
+ rtnl_lock();
+ ret = ieee802154_mlme_tx_locked(local, sdata, skb);
rtnl_unlock();
return ret;
@@ -188,6 +191,19 @@ int ieee802154_mlme_tx_one(struct ieee802154_local *local,
return ret;
}
+int ieee802154_mlme_tx_one_locked(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ ieee802154_mlme_op_pre(local);
+ ret = ieee802154_mlme_tx_locked(local, sdata, skb);
+ ieee802154_mlme_op_post(local);
+
+ return ret;
+}
+
static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
{
return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 5ded85e2c374..b30cea2fbf3f 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -1594,8 +1594,7 @@ mp_rst:
TCPOLEN_MPTCP_PRIO,
opts->backup, TCPOPT_NOP);
- MPTCP_INC_STATS(sock_net((const struct sock *)tp),
- MPTCP_MIB_MPPRIOTX);
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPPRIOTX);
}
mp_capable_done:
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 10fe9771a852..56628b52d100 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -152,7 +152,6 @@ static struct mptcp_pm_addr_entry *
select_local_address(const struct pm_nl_pernet *pernet,
const struct mptcp_sock *msk)
{
- const struct sock *sk = (const struct sock *)msk;
struct mptcp_pm_addr_entry *entry, *ret = NULL;
msk_owned_by_me(msk);
@@ -165,16 +164,6 @@ select_local_address(const struct pm_nl_pernet *pernet,
if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
continue;
- if (entry->addr.family != sk->sk_family) {
-#if IS_ENABLED(CONFIG_MPTCP_IPV6)
- if ((entry->addr.family == AF_INET &&
- !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
- (sk->sk_family == AF_INET &&
- !ipv6_addr_v4mapped(&entry->addr.addr6)))
-#endif
- continue;
- }
-
ret = entry;
break;
}
@@ -423,7 +412,9 @@ static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned
/* Fill all the remote addresses into the array addrs[],
* and return the array size.
*/
-static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullmesh,
+static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
+ struct mptcp_addr_info *local,
+ bool fullmesh,
struct mptcp_addr_info *addrs)
{
bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
@@ -443,6 +434,9 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullm
if (deny_id0)
return 0;
+ if (!mptcp_pm_addr_families_match(sk, local, &remote))
+ return 0;
+
msk->pm.subflows++;
addrs[i++] = remote;
} else {
@@ -453,6 +447,9 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullm
if (deny_id0 && !addrs[i].id)
continue;
+ if (!mptcp_pm_addr_families_match(sk, local, &addrs[i]))
+ continue;
+
if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
msk->pm.subflows < subflows_max) {
msk->pm.subflows++;
@@ -603,9 +600,11 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
msk->pm.local_addr_used++;
- nr = fill_remote_addresses_vec(msk, fullmesh, addrs);
- if (nr)
- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+ __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+ nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
+ if (nr == 0)
+ continue;
+
spin_unlock_bh(&msk->pm.lock);
for (i = 0; i < nr; i++)
__mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
@@ -628,11 +627,11 @@ static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
* and return the array size.
*/
static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ struct mptcp_addr_info *remote,
struct mptcp_addr_info *addrs)
{
struct sock *sk = (struct sock *)msk;
struct mptcp_pm_addr_entry *entry;
- struct mptcp_addr_info local;
struct pm_nl_pernet *pernet;
unsigned int subflows_max;
int i = 0;
@@ -645,15 +644,8 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
continue;
- if (entry->addr.family != sk->sk_family) {
-#if IS_ENABLED(CONFIG_MPTCP_IPV6)
- if ((entry->addr.family == AF_INET &&
- !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
- (sk->sk_family == AF_INET &&
- !ipv6_addr_v4mapped(&entry->addr.addr6)))
-#endif
- continue;
- }
+ if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote))
+ continue;
if (msk->pm.subflows < subflows_max) {
msk->pm.subflows++;
@@ -666,8 +658,18 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
* 'IPADDRANY' local address
*/
if (!i) {
+ struct mptcp_addr_info local;
+
memset(&local, 0, sizeof(local));
- local.family = msk->pm.remote.family;
+ local.family =
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ remote->family == AF_INET6 &&
+ ipv6_addr_v4mapped(&remote->addr6) ? AF_INET :
+#endif
+ remote->family;
+
+ if (!mptcp_pm_addr_families_match(sk, &local, remote))
+ return 0;
msk->pm.subflows++;
addrs[i++] = local;
@@ -706,7 +708,9 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
/* connect to the specified remote address, using whatever
* local address the routing configuration will pick.
*/
- nr = fill_local_addresses_vec(msk, addrs);
+ nr = fill_local_addresses_vec(msk, &remote, addrs);
+ if (nr == 0)
+ return;
msk->pm.add_addr_accepted++;
if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
@@ -1145,7 +1149,7 @@ void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ss
if (!tcp_rtx_and_write_queues_empty(ssk)) {
subflow->stale = 1;
__mptcp_retransmit_pending_data(sk);
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_SUBFLOWSTALE);
+ MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE);
}
unlock_sock_fast(ssk, slow);
@@ -1905,8 +1909,7 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
}
if (token)
- return mptcp_userspace_pm_set_flags(sock_net(skb->sk),
- token, &addr, &remote, bkup);
+ return mptcp_userspace_pm_set_flags(net, token, &addr, &remote, bkup);
spin_lock_bh(&pernet->lock);
entry = __lookup_addr(pernet, &addr.addr, lookup_by_id);
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index ea6ad9da7493..a02d3cbf2a1b 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -59,8 +59,8 @@ int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
*/
e = sock_kmalloc(sk, sizeof(*e), GFP_ATOMIC);
if (!e) {
- spin_unlock_bh(&msk->pm.lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto append_err;
}
*e = *entry;
@@ -74,6 +74,7 @@ int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
ret = entry->addr.id;
}
+append_err:
spin_unlock_bh(&msk->pm.lock);
return ret;
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index bc6c1f62a690..3ad9c46202fc 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -923,9 +923,8 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow;
- struct sock *sk = (struct sock *)msk;
- sock_owned_by_me(sk);
+ msk_owned_by_me(msk);
mptcp_for_each_subflow(msk, subflow) {
if (READ_ONCE(subflow->data_avail))
@@ -1408,7 +1407,7 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
u64 linger_time;
long tout = 0;
- sock_owned_by_me(sk);
+ msk_owned_by_me(msk);
if (__mptcp_check_fallback(msk)) {
if (!msk->first)
@@ -1890,7 +1889,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
u32 time, advmss = 1;
u64 rtt_us, mstamp;
- sock_owned_by_me(sk);
+ msk_owned_by_me(msk);
if (copied <= 0)
return;
@@ -2217,7 +2216,7 @@ static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
struct mptcp_subflow_context *subflow;
int min_stale_count = INT_MAX;
- sock_owned_by_me((const struct sock *)msk);
+ msk_owned_by_me(msk);
if (__mptcp_check_fallback(msk))
return NULL;
@@ -2724,8 +2723,8 @@ static int mptcp_init_sock(struct sock *sk)
mptcp_ca_reset(sk);
sk_sockets_allocated_inc(sk);
- sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
- sk->sk_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
+ sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]);
+ sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]);
return 0;
}
@@ -2876,7 +2875,6 @@ static void __mptcp_destroy_sock(struct sock *sk)
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
- sk_refcnt_debug_release(sk);
sock_put(sk);
}
@@ -2892,6 +2890,12 @@ static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
return EPOLLIN | EPOLLRDNORM;
}
+static void mptcp_listen_inuse_dec(struct sock *sk)
+{
+ if (inet_sk_state_load(sk) == TCP_LISTEN)
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+}
+
bool __mptcp_close(struct sock *sk, long timeout)
{
struct mptcp_subflow_context *subflow;
@@ -2902,6 +2906,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
sk->sk_shutdown = SHUTDOWN_MASK;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
+ mptcp_listen_inuse_dec(sk);
inet_sk_state_store(sk, TCP_CLOSE);
goto cleanup;
}
@@ -3010,6 +3015,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
if (msk->fastopening)
return 0;
+ mptcp_listen_inuse_dec(sk);
inet_sk_state_store(sk, TCP_CLOSE);
mptcp_stop_timer(sk);
@@ -3648,12 +3654,13 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
static int mptcp_listen(struct socket *sock, int backlog)
{
struct mptcp_sock *msk = mptcp_sk(sock->sk);
+ struct sock *sk = sock->sk;
struct socket *ssock;
int err;
pr_debug("msk=%p", msk);
- lock_sock(sock->sk);
+ lock_sock(sk);
ssock = __mptcp_nmpc_socket(msk);
if (!ssock) {
err = -EINVAL;
@@ -3661,18 +3668,20 @@ static int mptcp_listen(struct socket *sock, int backlog)
}
mptcp_token_destroy(msk);
- inet_sk_state_store(sock->sk, TCP_LISTEN);
- sock_set_flag(sock->sk, SOCK_RCU_FREE);
+ inet_sk_state_store(sk, TCP_LISTEN);
+ sock_set_flag(sk, SOCK_RCU_FREE);
err = ssock->ops->listen(ssock, backlog);
- inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
- if (!err)
- mptcp_copy_inaddrs(sock->sk, ssock->sk);
+ inet_sk_state_store(sk, inet_sk_state_load(ssock->sk));
+ if (!err) {
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ mptcp_copy_inaddrs(sk, ssock->sk);
+ }
mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED);
unlock:
- release_sock(sock->sk);
+ release_sock(sk);
return err;
}
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 601469249da8..61fd8eabfca2 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -755,7 +755,7 @@ static inline void mptcp_token_init_request(struct request_sock *req)
int mptcp_token_new_request(struct request_sock *req);
void mptcp_token_destroy_request(struct request_sock *req);
-int mptcp_token_new_connect(struct sock *sk);
+int mptcp_token_new_connect(struct sock *ssk);
void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
struct mptcp_sock *msk);
bool mptcp_token_exists(u32 token);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 7f2c3727ab23..8a9656248b0f 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -18,7 +18,7 @@
static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
{
- sock_owned_by_me((const struct sock *)msk);
+ msk_owned_by_me(msk);
if (likely(!__mptcp_check_fallback(msk)))
return NULL;
@@ -1262,6 +1262,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
ssk->sk_priority = sk->sk_priority;
ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
+ ssk->sk_ipv6only = sk->sk_ipv6only;
__ip_sock_set_tos(ssk, inet_sk(sk)->tos);
if (sk->sk_userlocks & tx_rx_locks) {
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 32904c76c6a1..4ae1a7304cf0 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -26,6 +26,7 @@
#include "mib.h"
#include <trace/events/mptcp.h>
+#include <trace/events/sock.h>
static void mptcp_subflow_ops_undo_override(struct sock *ssk);
@@ -1446,6 +1447,8 @@ static void subflow_data_ready(struct sock *sk)
struct sock *parent = subflow->conn;
struct mptcp_sock *msk;
+ trace_sk_data_ready(sk);
+
msk = mptcp_sk(parent);
if (state & TCPF_LISTEN) {
/* MPJ subflow are removed from accept queue before reaching here,
diff --git a/net/mptcp/token.c b/net/mptcp/token.c
index 65430f314a68..5bb924534387 100644
--- a/net/mptcp/token.c
+++ b/net/mptcp/token.c
@@ -134,7 +134,7 @@ int mptcp_token_new_request(struct request_sock *req)
/**
* mptcp_token_new_connect - create new key/idsn/token for subflow
- * @sk: the socket that will initiate a connection
+ * @ssk: the socket that will initiate a connection
*
* This function is called when a new outgoing mptcp connection is
* initiated.
@@ -148,11 +148,12 @@ int mptcp_token_new_request(struct request_sock *req)
*
* returns 0 on success.
*/
-int mptcp_token_new_connect(struct sock *sk)
+int mptcp_token_new_connect(struct sock *ssk)
{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
int retries = MPTCP_TOKEN_MAX_RETRIES;
+ struct sock *sk = subflow->conn;
struct token_bucket *bucket;
again:
@@ -169,12 +170,13 @@ again:
}
pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
- sk, subflow->local_key, subflow->token, subflow->idsn);
+ ssk, subflow->local_key, subflow->token, subflow->idsn);
WRITE_ONCE(msk->token, subflow->token);
__sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain);
bucket->chain_len++;
spin_unlock_bh(&bucket->lock);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
@@ -190,8 +192,10 @@ void mptcp_token_accept(struct mptcp_subflow_request_sock *req,
struct mptcp_sock *msk)
{
struct mptcp_subflow_request_sock *pos;
+ struct sock *sk = (struct sock *)msk;
struct token_bucket *bucket;
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
bucket = token_bucket(req->token);
spin_lock_bh(&bucket->lock);
@@ -370,12 +374,14 @@ void mptcp_token_destroy_request(struct request_sock *req)
*/
void mptcp_token_destroy(struct mptcp_sock *msk)
{
+ struct sock *sk = (struct sock *)msk;
struct token_bucket *bucket;
struct mptcp_sock *pos;
if (sk_unhashed((struct sock *)msk))
return;
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
bucket = token_bucket(msk->token);
spin_lock_bh(&bucket->lock);
pos = __token_lookup_msk(bucket, msk->token);
diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
index 5d984bec1cd8..0758865ab658 100644
--- a/net/mptcp/token_test.c
+++ b/net/mptcp/token_test.c
@@ -57,6 +57,9 @@ static struct mptcp_sock *build_msk(struct kunit *test)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
sock_net_set((struct sock *)msk, &init_net);
+
+ /* be sure the token helpers can dereference sk->sk_prot */
+ ((struct sock *)msk)->sk_prot = &tcp_prot;
return msk;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f71b41c7ce2f..4d6737160857 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -189,6 +189,9 @@ config NF_CONNTRACK_LABELS
to connection tracking entries. It can be used with xtables connlabel
match and the nftables ct expression.
+config NF_CONNTRACK_OVS
+ bool
+
config NF_CT_PROTO_DCCP
bool 'DCCP protocol connection tracking support'
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 3754eb06fb41..5ffef1cd6143 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -11,6 +11,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_OVS) += nf_conntrack_ovs.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
@@ -98,6 +99,12 @@ nf_tables-objs += nft_set_pipapo_avx2.o
endif
endif
+ifdef CONFIG_NFT_CT
+ifdef CONFIG_RETPOLINE
+nf_tables-objs += nft_ct_fast.o
+endif
+endif
+
obj-$(CONFIG_NF_TABLES) += nf_tables.o
obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
obj-$(CONFIG_NFT_CONNLIMIT) += nft_connlimit.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 5a6705a0e4ec..b2fdbbed2b4b 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -702,6 +702,22 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
}
EXPORT_SYMBOL(nf_conntrack_destroy);
+void nf_ct_set_closing(struct nf_conntrack *nfct)
+{
+ const struct nf_ct_hook *ct_hook;
+
+ if (!nfct)
+ return;
+
+ rcu_read_lock();
+ ct_hook = rcu_dereference(nf_ct_hook);
+ if (ct_hook)
+ ct_hook->set_closing(nfct);
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nf_ct_set_closing);
+
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
{
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
index 3c273483df23..b1ea054bb82c 100644
--- a/net/netfilter/ipset/Kconfig
+++ b/net/netfilter/ipset/Kconfig
@@ -30,7 +30,7 @@ config IP_SET_BITMAP_IP
depends on IP_SET
help
This option adds the bitmap:ip set type support, by which one
- can store IPv4 addresses (or network addresse) from a range.
+ can store IPv4 addresses (or network addresses) from a range.
To compile it as a module, choose M here. If unsure, say N.
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index ce2a1549b304..c5970ba416ae 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -549,7 +549,7 @@ void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
__set_bit(row, kd->avail);
if (!kd->tick_len[row]) {
RCU_INIT_POINTER(kd->ticks[row], NULL);
- kfree_rcu(td);
+ kfree_rcu(td, rcu_head);
}
kd->est_count--;
if (kd->est_count) {
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 029171379884..80448885c3d7 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -994,7 +994,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
old_dsfield = ipv4_get_dsfield(old_iph);
*ttl = old_iph->ttl;
if (payload_len)
- *payload_len = ntohs(old_iph->tot_len);
+ *payload_len = skb_ip_totlen(skb);
}
/* Implement full-functionality option for ECN encapsulation */
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index 24002bc61e07..34913521c385 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -249,7 +249,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
-struct nf_conn___init *
+__bpf_kfunc struct nf_conn___init *
bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
@@ -283,7 +283,7 @@ bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
-struct nf_conn *
+__bpf_kfunc struct nf_conn *
bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
@@ -316,7 +316,7 @@ bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
-struct nf_conn___init *
+__bpf_kfunc struct nf_conn___init *
bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
@@ -351,7 +351,7 @@ bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
-struct nf_conn *
+__bpf_kfunc struct nf_conn *
bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
@@ -376,7 +376,7 @@ bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
* @nfct - Pointer to referenced nf_conn___init object, obtained
* using bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
*/
-struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
+__bpf_kfunc struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
{
struct nf_conn *nfct = (struct nf_conn *)nfct_i;
int err;
@@ -400,7 +400,7 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
* @nf_conn - Pointer to referenced nf_conn object, obtained using
* bpf_xdp_ct_lookup or bpf_skb_ct_lookup.
*/
-void bpf_ct_release(struct nf_conn *nfct)
+__bpf_kfunc void bpf_ct_release(struct nf_conn *nfct)
{
if (!nfct)
return;
@@ -417,7 +417,7 @@ void bpf_ct_release(struct nf_conn *nfct)
* bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
* @timeout - Timeout in msecs.
*/
-void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
+__bpf_kfunc void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
{
__nf_ct_set_timeout((struct nf_conn *)nfct, msecs_to_jiffies(timeout));
}
@@ -432,7 +432,7 @@ void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
* bpf_ct_insert_entry, bpf_xdp_ct_lookup, or bpf_skb_ct_lookup.
* @timeout - New timeout in msecs.
*/
-int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
+__bpf_kfunc int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
{
return __nf_ct_change_timeout(nfct, msecs_to_jiffies(timeout));
}
@@ -447,7 +447,7 @@ int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
* bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
* @status - New status value.
*/
-int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
+__bpf_kfunc int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
{
return nf_ct_change_status_common((struct nf_conn *)nfct, status);
}
@@ -462,7 +462,7 @@ int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
* bpf_ct_insert_entry, bpf_xdp_ct_lookup or bpf_skb_ct_lookup.
* @status - New status value.
*/
-int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
+__bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
{
return nf_ct_change_status_common(nfct, status);
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 496c4920505b..70c4f892174e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -514,7 +514,6 @@ EXPORT_SYMBOL_GPL(nf_ct_get_id);
static void
clean_from_lists(struct nf_conn *ct)
{
- pr_debug("clean_from_lists(%p)\n", ct);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
@@ -582,7 +581,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
- pr_debug("%s(%p)\n", __func__, ct);
WARN_ON(refcount_read(&nfct->use) != 0);
if (unlikely(nf_ct_is_template(ct))) {
@@ -603,7 +601,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
if (ct->master)
nf_ct_put(ct->master);
- pr_debug("%s: returning ct=%p to slab\n", __func__, ct);
nf_conntrack_free(ct);
}
EXPORT_SYMBOL(nf_ct_destroy);
@@ -786,8 +783,6 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
- rcu_read_lock();
-
h = ____nf_conntrack_find(net, zone, tuple, hash);
if (h) {
/* We have a candidate that matches the tuple we're interested
@@ -799,7 +794,7 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
smp_acquire__after_ctrl_dep();
if (likely(nf_ct_key_equal(h, tuple, zone, net)))
- goto found;
+ return h;
/* TYPESAFE_BY_RCU recycled the candidate */
nf_ct_put(ct);
@@ -807,8 +802,6 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
h = NULL;
}
-found:
- rcu_read_unlock();
return h;
}
@@ -820,16 +813,21 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
struct nf_conntrack_tuple_hash *thash;
+ rcu_read_lock();
+
thash = __nf_conntrack_find_get(net, zone, tuple,
hash_conntrack_raw(tuple, zone_id, net));
if (thash)
- return thash;
+ goto out_unlock;
rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
if (rid != zone_id)
- return __nf_conntrack_find_get(net, zone, tuple,
- hash_conntrack_raw(tuple, rid, net));
+ thash = __nf_conntrack_find_get(net, zone, tuple,
+ hash_conntrack_raw(tuple, rid, net));
+
+out_unlock:
+ rcu_read_unlock();
return thash;
}
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
@@ -1210,7 +1208,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
goto dying;
}
- pr_debug("Confirming conntrack %p\n", ct);
/* We have to check the DYING flag after unlink to prevent
* a race against nf_ct_get_next_corpse() possibly called from
* user context, else we insert an already 'dead' hash, blocking
@@ -1374,9 +1371,6 @@ static unsigned int early_drop_list(struct net *net,
hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
- if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
- continue;
-
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
continue;
@@ -1446,11 +1440,14 @@ static bool gc_worker_skip_ct(const struct nf_conn *ct)
static bool gc_worker_can_early_drop(const struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
+ u8 protonum = nf_ct_protonum(ct);
+ if (test_bit(IPS_OFFLOAD_BIT, &ct->status) && protonum != IPPROTO_UDP)
+ return false;
if (!test_bit(IPS_ASSURED_BIT, &ct->status))
return true;
- l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(protonum);
if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
return true;
@@ -1507,7 +1504,8 @@ static void gc_worker(struct work_struct *work)
if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
nf_ct_offload_timeout(tmp);
- continue;
+ if (!nf_conntrack_max95)
+ continue;
}
if (expired_count > GC_SCAN_EXPIRED_MAX) {
@@ -1721,10 +1719,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_zone tmp;
struct nf_conntrack_net *cnet;
- if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
- pr_debug("Can't invert tuple.\n");
+ if (!nf_ct_invert_tuple(&repl_tuple, tuple))
return NULL;
- }
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
@@ -1764,8 +1760,6 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
spin_lock_bh(&nf_conntrack_expect_lock);
exp = nf_ct_find_expectation(net, zone, tuple);
if (exp) {
- pr_debug("expectation arrives ct=%p exp=%p\n",
- ct, exp);
/* Welcome, Mr. Bond. We've been expecting you... */
__set_bit(IPS_EXPECTED_BIT, &ct->status);
/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
@@ -1829,10 +1823,8 @@ resolve_normal_ct(struct nf_conn *tmpl,
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, state->pf, protonum, state->net,
- &tuple)) {
- pr_debug("Can't get tuple\n");
+ &tuple))
return 0;
- }
/* look for tuple match */
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
@@ -1864,17 +1856,15 @@ resolve_normal_ct(struct nf_conn *tmpl,
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
ctinfo = IP_CT_ESTABLISHED_REPLY;
} else {
+ unsigned long status = READ_ONCE(ct->status);
+
/* Once we've had two way comms, always ESTABLISHED. */
- if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
- pr_debug("normal packet for %p\n", ct);
+ if (likely(status & IPS_SEEN_REPLY))
ctinfo = IP_CT_ESTABLISHED;
- } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
- pr_debug("related packet for %p\n", ct);
+ else if (status & IPS_EXPECTED)
ctinfo = IP_CT_RELATED;
- } else {
- pr_debug("new packet for %p\n", ct);
+ else
ctinfo = IP_CT_NEW;
- }
}
nf_ct_set(skb, ct, ctinfo);
return 0;
@@ -1988,7 +1978,6 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
/* rcu_read_lock()ed by nf_hook_thresh */
dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
if (dataoff <= 0) {
- pr_debug("not prepared to track yet or error occurred\n");
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
@@ -2027,7 +2016,6 @@ repeat:
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
- pr_debug("nf_conntrack_in: Can't track with proto module\n");
nf_ct_put(ct);
skb->_nfct = 0;
/* Special case: TCP tracker reports an attempt to reopen a
@@ -2066,7 +2054,6 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
/* Should be unconfirmed, so not in hash table yet */
WARN_ON(nf_ct_is_confirmed(ct));
- pr_debug("Altering reply tuple of %p to ", ct);
nf_ct_dump_tuple(newreply);
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
@@ -2761,11 +2748,23 @@ err_cachep:
return ret;
}
+static void nf_conntrack_set_closing(struct nf_conntrack *nfct)
+{
+ struct nf_conn *ct = nf_ct_to_nf_conn(nfct);
+
+ switch (nf_ct_protonum(ct)) {
+ case IPPROTO_TCP:
+ nf_conntrack_tcp_set_closing(ct);
+ break;
+ }
+}
+
static const struct nf_ct_hook nf_conntrack_hook = {
.update = nf_conntrack_update,
.destroy = nf_ct_destroy,
.get_tuple_skb = nf_conntrack_get_tuple_skb,
.attach = nf_conntrack_attach,
+ .set_closing = nf_conntrack_set_closing,
};
void nf_conntrack_init_end(void)
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 48ea6d0264b5..0c4db2f2ac43 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -242,104 +242,6 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
}
EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper);
-/* 'skb' should already be pulled to nh_ofs. */
-int nf_ct_helper(struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo, u16 proto)
-{
- const struct nf_conntrack_helper *helper;
- const struct nf_conn_help *help;
- unsigned int protoff;
- int err;
-
- if (ctinfo == IP_CT_RELATED_REPLY)
- return NF_ACCEPT;
-
- help = nfct_help(ct);
- if (!help)
- return NF_ACCEPT;
-
- helper = rcu_dereference(help->helper);
- if (!helper)
- return NF_ACCEPT;
-
- if (helper->tuple.src.l3num != NFPROTO_UNSPEC &&
- helper->tuple.src.l3num != proto)
- return NF_ACCEPT;
-
- switch (proto) {
- case NFPROTO_IPV4:
- protoff = ip_hdrlen(skb);
- proto = ip_hdr(skb)->protocol;
- break;
- case NFPROTO_IPV6: {
- u8 nexthdr = ipv6_hdr(skb)->nexthdr;
- __be16 frag_off;
- int ofs;
-
- ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
- &frag_off);
- if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
- pr_debug("proto header not found\n");
- return NF_ACCEPT;
- }
- protoff = ofs;
- proto = nexthdr;
- break;
- }
- default:
- WARN_ONCE(1, "helper invoked on non-IP family!");
- return NF_DROP;
- }
-
- if (helper->tuple.dst.protonum != proto)
- return NF_ACCEPT;
-
- err = helper->help(skb, protoff, ct, ctinfo);
- if (err != NF_ACCEPT)
- return err;
-
- /* Adjust seqs after helper. This is needed due to some helpers (e.g.,
- * FTP with NAT) adusting the TCP payload size when mangling IP
- * addresses and/or port numbers in the text-based control connection.
- */
- if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
- !nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
- return NF_DROP;
- return NF_ACCEPT;
-}
-EXPORT_SYMBOL_GPL(nf_ct_helper);
-
-int nf_ct_add_helper(struct nf_conn *ct, const char *name, u8 family,
- u8 proto, bool nat, struct nf_conntrack_helper **hp)
-{
- struct nf_conntrack_helper *helper;
- struct nf_conn_help *help;
- int ret = 0;
-
- helper = nf_conntrack_helper_try_module_get(name, family, proto);
- if (!helper)
- return -EINVAL;
-
- help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
- if (!help) {
- nf_conntrack_helper_put(helper);
- return -ENOMEM;
- }
-#if IS_ENABLED(CONFIG_NF_NAT)
- if (nat) {
- ret = nf_nat_helper_try_module_get(name, family, proto);
- if (ret) {
- nf_conntrack_helper_put(helper);
- return ret;
- }
- }
-#endif
- rcu_assign_pointer(help->helper, helper);
- *hp = helper;
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_ct_add_helper);
-
/* appropriate ct lock protecting must be taken by caller */
static int unhelp(struct nf_conn *ct, void *me)
{
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1286ae7d4609..308fc0023c7e 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -3866,7 +3866,7 @@ static int __init ctnetlink_init(void)
{
int ret;
- BUILD_BUG_ON(sizeof(struct ctnetlink_list_dump_ctx) > sizeof_field(struct netlink_callback, ctx));
+ NL_ASSERT_DUMP_CTX_FITS(struct ctnetlink_list_dump_ctx);
ret = nfnetlink_subsys_register(&ctnl_subsys);
if (ret < 0) {
diff --git a/net/netfilter/nf_conntrack_ovs.c b/net/netfilter/nf_conntrack_ovs.c
new file mode 100644
index 000000000000..52b776bdf526
--- /dev/null
+++ b/net/netfilter/nf_conntrack_ovs.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Support ct functions for openvswitch and used by OVS and TC conntrack. */
+
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#include <net/ipv6_frag.h>
+#include <net/ip.h>
+
+/* 'skb' should already be pulled to nh_ofs. */
+int nf_ct_helper(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, u16 proto)
+{
+ const struct nf_conntrack_helper *helper;
+ const struct nf_conn_help *help;
+ unsigned int protoff;
+ int err;
+
+ if (ctinfo == IP_CT_RELATED_REPLY)
+ return NF_ACCEPT;
+
+ help = nfct_help(ct);
+ if (!help)
+ return NF_ACCEPT;
+
+ helper = rcu_dereference(help->helper);
+ if (!helper)
+ return NF_ACCEPT;
+
+ if (helper->tuple.src.l3num != NFPROTO_UNSPEC &&
+ helper->tuple.src.l3num != proto)
+ return NF_ACCEPT;
+
+ switch (proto) {
+ case NFPROTO_IPV4:
+ protoff = ip_hdrlen(skb);
+ proto = ip_hdr(skb)->protocol;
+ break;
+ case NFPROTO_IPV6: {
+ u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+ __be16 frag_off;
+ int ofs;
+
+ ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+ &frag_off);
+ if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
+ pr_debug("proto header not found\n");
+ return NF_ACCEPT;
+ }
+ protoff = ofs;
+ proto = nexthdr;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "helper invoked on non-IP family!");
+ return NF_DROP;
+ }
+
+ if (helper->tuple.dst.protonum != proto)
+ return NF_ACCEPT;
+
+ err = helper->help(skb, protoff, ct, ctinfo);
+ if (err != NF_ACCEPT)
+ return err;
+
+ /* Adjust seqs after helper. This is needed due to some helpers (e.g.,
+ * FTP with NAT) adusting the TCP payload size when mangling IP
+ * addresses and/or port numbers in the text-based control connection.
+ */
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+ !nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
+ return NF_DROP;
+ return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper);
+
+int nf_ct_add_helper(struct nf_conn *ct, const char *name, u8 family,
+ u8 proto, bool nat, struct nf_conntrack_helper **hp)
+{
+ struct nf_conntrack_helper *helper;
+ struct nf_conn_help *help;
+ int ret = 0;
+
+ helper = nf_conntrack_helper_try_module_get(name, family, proto);
+ if (!helper)
+ return -EINVAL;
+
+ help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
+ if (!help) {
+ nf_conntrack_helper_put(helper);
+ return -ENOMEM;
+ }
+#if IS_ENABLED(CONFIG_NF_NAT)
+ if (nat) {
+ ret = nf_nat_helper_try_module_get(name, family, proto);
+ if (ret) {
+ nf_conntrack_helper_put(helper);
+ return ret;
+ }
+ }
+#endif
+ rcu_assign_pointer(help->helper, helper);
+ *hp = helper;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_ct_add_helper);
+
+/* Trim the skb to the length specified by the IP/IPv6 header,
+ * removing any trailing lower-layer padding. This prepares the skb
+ * for higher-layer processing that assumes skb->len excludes padding
+ * (such as nf_ip_checksum). The caller needs to pull the skb to the
+ * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
+ */
+int nf_ct_skb_network_trim(struct sk_buff *skb, int family)
+{
+ unsigned int len;
+
+ switch (family) {
+ case NFPROTO_IPV4:
+ len = skb_ip_totlen(skb);
+ break;
+ case NFPROTO_IPV6:
+ len = sizeof(struct ipv6hdr)
+ + ntohs(ipv6_hdr(skb)->payload_len);
+ break;
+ default:
+ len = skb->len;
+ }
+
+ return pskb_trim_rcsum(skb, len);
+}
+EXPORT_SYMBOL_GPL(nf_ct_skb_network_trim);
+
+/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
+ * value if 'skb' is freed.
+ */
+int nf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
+ u16 zone, u8 family, u8 *proto, u16 *mru)
+{
+ int err;
+
+ if (family == NFPROTO_IPV4) {
+ enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
+
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ local_bh_disable();
+ err = ip_defrag(net, skb, user);
+ local_bh_enable();
+ if (err)
+ return err;
+
+ *mru = IPCB(skb)->frag_max_size;
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ } else if (family == NFPROTO_IPV6) {
+ enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
+
+ memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+ err = nf_ct_frag6_gather(net, skb, user);
+ if (err) {
+ if (err != -EINPROGRESS)
+ kfree_skb(skb);
+ return err;
+ }
+
+ *proto = ipv6_hdr(skb)->nexthdr;
+ *mru = IP6CB(skb)->frag_max_size;
+#endif
+ } else {
+ kfree_skb(skb);
+ return -EPFNOSUPPORT;
+ }
+
+ skb_clear_hash(skb);
+ skb->ignore_df = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_handle_fragments);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index ccef340be575..c928ff63b10e 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -284,16 +284,11 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
/* We only do TCP and SCTP at the moment: is there a better way? */
if (tuple.dst.protonum != IPPROTO_TCP &&
- tuple.dst.protonum != IPPROTO_SCTP) {
- pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
+ tuple.dst.protonum != IPPROTO_SCTP)
return -ENOPROTOOPT;
- }
- if ((unsigned int)*len < sizeof(struct sockaddr_in)) {
- pr_debug("SO_ORIGINAL_DST: len %d not %zu\n",
- *len, sizeof(struct sockaddr_in));
+ if ((unsigned int)*len < sizeof(struct sockaddr_in))
return -EINVAL;
- }
h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (h) {
@@ -307,17 +302,12 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
.tuple.dst.u3.ip;
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
- pr_debug("SO_ORIGINAL_DST: %pI4 %u\n",
- &sin.sin_addr.s_addr, ntohs(sin.sin_port));
nf_ct_put(ct);
if (copy_to_user(user, &sin, sizeof(sin)) != 0)
return -EFAULT;
else
return 0;
}
- pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n",
- &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port),
- &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port));
return -ENOENT;
}
@@ -360,12 +350,8 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
return -EINVAL;
h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
- if (!h) {
- pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
- &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
- &tuple.dst.u3.ip6, ntohs(tuple.dst.u.tcp.port));
+ if (!h)
return -ENOENT;
- }
ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 011d414038ea..91eacc9b0b98 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -153,7 +153,8 @@ for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \
static int do_basic_checks(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- unsigned long *map)
+ unsigned long *map,
+ const struct nf_hook_state *state)
{
u_int32_t offset, count;
struct sctp_chunkhdr _sch, *sch;
@@ -162,8 +163,6 @@ static int do_basic_checks(struct nf_conn *ct,
flag = 0;
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
- pr_debug("Chunk Num: %d Type: %d\n", count, sch->type);
-
if (sch->type == SCTP_CID_INIT ||
sch->type == SCTP_CID_INIT_ACK ||
sch->type == SCTP_CID_SHUTDOWN_COMPLETE)
@@ -178,7 +177,9 @@ static int do_basic_checks(struct nf_conn *ct,
sch->type == SCTP_CID_COOKIE_ECHO ||
flag) &&
count != 0) || !sch->length) {
- pr_debug("Basic checks failed\n");
+ nf_ct_l4proto_log_invalid(skb, ct, state,
+ "%s failed. chunk num %d, type %d, len %d flag %d\n",
+ __func__, count, sch->type, sch->length, flag);
return 1;
}
@@ -186,7 +187,6 @@ static int do_basic_checks(struct nf_conn *ct,
set_bit(sch->type, map);
}
- pr_debug("Basic checks passed\n");
return count == 0;
}
@@ -196,64 +196,47 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
{
int i;
- pr_debug("Chunk type: %d\n", chunk_type);
-
switch (chunk_type) {
case SCTP_CID_INIT:
- pr_debug("SCTP_CID_INIT\n");
i = 0;
break;
case SCTP_CID_INIT_ACK:
- pr_debug("SCTP_CID_INIT_ACK\n");
i = 1;
break;
case SCTP_CID_ABORT:
- pr_debug("SCTP_CID_ABORT\n");
i = 2;
break;
case SCTP_CID_SHUTDOWN:
- pr_debug("SCTP_CID_SHUTDOWN\n");
i = 3;
break;
case SCTP_CID_SHUTDOWN_ACK:
- pr_debug("SCTP_CID_SHUTDOWN_ACK\n");
i = 4;
break;
case SCTP_CID_ERROR:
- pr_debug("SCTP_CID_ERROR\n");
i = 5;
break;
case SCTP_CID_COOKIE_ECHO:
- pr_debug("SCTP_CID_COOKIE_ECHO\n");
i = 6;
break;
case SCTP_CID_COOKIE_ACK:
- pr_debug("SCTP_CID_COOKIE_ACK\n");
i = 7;
break;
case SCTP_CID_SHUTDOWN_COMPLETE:
- pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n");
i = 8;
break;
case SCTP_CID_HEARTBEAT:
- pr_debug("SCTP_CID_HEARTBEAT");
i = 9;
break;
case SCTP_CID_HEARTBEAT_ACK:
- pr_debug("SCTP_CID_HEARTBEAT_ACK");
i = 10;
break;
default:
/* Other chunks like DATA or SACK do not change the state */
- pr_debug("Unknown chunk type, Will stay in %s\n",
- sctp_conntrack_names[cur_state]);
+ pr_debug("Unknown chunk type %d, Will stay in %s\n",
+ chunk_type, sctp_conntrack_names[cur_state]);
return cur_state;
}
- pr_debug("dir: %d cur_state: %s chunk_type: %d new_state: %s\n",
- dir, sctp_conntrack_names[cur_state], chunk_type,
- sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]);
-
return sctp_conntracks[dir][i][cur_state];
}
@@ -370,7 +353,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
if (sh == NULL)
goto out;
- if (do_basic_checks(ct, skb, dataoff, map) != 0)
+ if (do_basic_checks(ct, skb, dataoff, map, state) != 0)
goto out;
if (!nf_ct_is_confirmed(ct)) {
@@ -393,7 +376,9 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
!test_bit(SCTP_CID_HEARTBEAT, map) &&
!test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
sh->vtag != ct->proto.sctp.vtag[dir]) {
- pr_debug("Verification tag check failed\n");
+ nf_ct_l4proto_log_invalid(skb, ct, state,
+ "verification tag check failed %x vs %x for dir %d",
+ sh->vtag, ct->proto.sctp.vtag[dir], dir);
goto out;
}
@@ -468,9 +453,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
/* Invalid */
if (new_state == SCTP_CONNTRACK_MAX) {
- pr_debug("nf_conntrack_sctp: Invalid dir=%i ctype=%u "
- "conntrack=%u\n",
- dir, sch->type, old_state);
+ nf_ct_l4proto_log_invalid(skb, ct, state,
+ "Invalid, old_state %d, dir %d, type %d",
+ old_state, dir, sch->type);
+
goto out_unlock;
}
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 3ac1af6f59fc..4018acb1d674 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -911,6 +911,41 @@ static bool tcp_can_early_drop(const struct nf_conn *ct)
return false;
}
+void nf_conntrack_tcp_set_closing(struct nf_conn *ct)
+{
+ enum tcp_conntrack old_state;
+ const unsigned int *timeouts;
+ u32 timeout;
+
+ if (!nf_ct_is_confirmed(ct))
+ return;
+
+ spin_lock_bh(&ct->lock);
+ old_state = ct->proto.tcp.state;
+ ct->proto.tcp.state = TCP_CONNTRACK_CLOSE;
+
+ if (old_state == TCP_CONNTRACK_CLOSE ||
+ test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
+ spin_unlock_bh(&ct->lock);
+ return;
+ }
+
+ timeouts = nf_ct_timeout_lookup(ct);
+ if (!timeouts) {
+ const struct nf_tcp_net *tn;
+
+ tn = nf_tcp_pernet(nf_ct_net(ct));
+ timeouts = tn->timeouts;
+ }
+
+ timeout = timeouts[TCP_CONNTRACK_CLOSE];
+ WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
+
+ spin_unlock_bh(&ct->lock);
+
+ nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
+}
+
static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
{
state->td_end = 0;
@@ -930,7 +965,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
- struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
enum nf_ct_tcp_action res;
@@ -954,7 +988,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
dir = CTINFO2DIR(ctinfo);
index = get_conntrack_index(th);
new_state = tcp_conntracks[dir][index][old_state];
- tuple = &ct->tuplehash[dir].tuple;
switch (new_state) {
case TCP_CONNTRACK_SYN_SENT:
@@ -1232,13 +1265,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
- pr_debug("tcp_conntracks: ");
- nf_ct_dump_tuple(tuple);
- pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
- (th->syn ? 1 : 0), (th->ack ? 1 : 0),
- (th->fin ? 1 : 0), (th->rst ? 1 : 0),
- old_state, new_state);
-
ct->proto.tcp.state = new_state;
if (old_state != new_state
&& new_state == TCP_CONNTRACK_FIN_WAIT)
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 3b516cffc779..0030fbe8885c 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -88,6 +88,7 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
const struct nf_hook_state *state)
{
unsigned int *timeouts;
+ unsigned long status;
if (udp_error(skb, dataoff, state))
return -NF_ACCEPT;
@@ -96,26 +97,27 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
if (!timeouts)
timeouts = udp_get_timeouts(nf_ct_net(ct));
- if (!nf_ct_is_confirmed(ct))
+ status = READ_ONCE(ct->status);
+ if ((status & IPS_CONFIRMED) == 0)
ct->proto.udp.stream_ts = 2 * HZ + jiffies;
/* If we've seen traffic both ways, this is some kind of UDP
* stream. Set Assured.
*/
- if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ if (status & IPS_SEEN_REPLY) {
unsigned long extra = timeouts[UDP_CT_UNREPLIED];
bool stream = false;
/* Still active after two seconds? Extend timeout. */
if (time_after(jiffies, ct->proto.udp.stream_ts)) {
extra = timeouts[UDP_CT_REPLIED];
- stream = true;
+ stream = (status & IPS_ASSURED) == 0;
}
nf_ct_refresh_acct(ct, ctinfo, skb, extra);
/* never set ASSURED for IPS_NAT_CLASH, they time out soon */
- if (unlikely((ct->status & IPS_NAT_CLASH)))
+ if (unlikely((status & IPS_NAT_CLASH)))
return NF_ACCEPT;
/* Also, more likely to be important, and not a probe */
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 460294bd4b60..57f6724c99a7 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -275,7 +275,7 @@ static const char* l4proto_name(u16 proto)
return "unknown";
}
-static unsigned int
+static void
seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
{
struct nf_conn_acct *acct;
@@ -283,14 +283,12 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
acct = nf_conn_acct_find(ct);
if (!acct)
- return 0;
+ return;
counter = acct->counter;
seq_printf(s, "packets=%llu bytes=%llu ",
(unsigned long long)atomic64_read(&counter[dir].packets),
(unsigned long long)atomic64_read(&counter[dir].bytes));
-
- return 0;
}
/* return 0 on success, 1 in case of error */
@@ -342,8 +340,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (seq_has_overflowed(s))
goto release;
- if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
- goto release;
+ seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL);
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
seq_puts(s, "[UNREPLIED] ");
@@ -352,8 +349,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
- if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
- goto release;
+ seq_print_acct(s, ct, IP_CT_DIR_REPLY);
if (test_bit(IPS_HW_OFFLOAD_BIT, &ct->status))
seq_puts(s, "[HW_OFFLOAD] ");
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 81c26a96c30b..04bd0ed4d2ae 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -193,8 +193,11 @@ static void flow_offload_fixup_ct(struct nf_conn *ct)
timeout -= tn->offload_timeout;
} else if (l4num == IPPROTO_UDP) {
struct nf_udp_net *tn = nf_udp_pernet(net);
+ enum udp_conntrack state =
+ test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
+ UDP_CT_REPLIED : UDP_CT_UNREPLIED;
- timeout = tn->timeouts[UDP_CT_REPLIED];
+ timeout = tn->timeouts[state];
timeout -= tn->offload_timeout;
} else {
return;
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 0ccabf3fa6aa..9505f9d188ff 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -39,7 +39,7 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
}
static int nf_flow_rule_route_inet(struct net *net,
- const struct flow_offload *flow,
+ struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 4d9b99abe37d..1c26f03fc661 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -679,7 +679,7 @@ nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
return 0;
}
-int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
@@ -704,7 +704,7 @@ int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
}
EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
-int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
+int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
@@ -735,7 +735,7 @@ nf_flow_offload_rule_alloc(struct net *net,
{
const struct nf_flowtable *flowtable = offload->flowtable;
const struct flow_offload_tuple *tuple, *other_tuple;
- const struct flow_offload *flow = offload->flow;
+ struct flow_offload *flow = offload->flow;
struct dst_entry *other_dst = NULL;
struct nf_flow_rule *flow_rule;
int err = -ENOMEM;
@@ -895,8 +895,9 @@ static int flow_offload_rule_add(struct flow_offload_work *offload,
ok_count += flow_offload_tuple_add(offload, flow_rule[0],
FLOW_OFFLOAD_DIR_ORIGINAL);
- ok_count += flow_offload_tuple_add(offload, flow_rule[1],
- FLOW_OFFLOAD_DIR_REPLY);
+ if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
+ ok_count += flow_offload_tuple_add(offload, flow_rule[1],
+ FLOW_OFFLOAD_DIR_REPLY);
if (ok_count == 0)
return -ENOENT;
@@ -926,7 +927,8 @@ static void flow_offload_work_del(struct flow_offload_work *offload)
{
clear_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
- flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
+ if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
+ flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
}
@@ -946,7 +948,9 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
u64 lastused;
flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
- flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
+ if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
+ flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY,
+ &stats[1]);
lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
offload->flow->timeout = max_t(u64, offload->flow->timeout,
diff --git a/net/netfilter/nf_log_syslog.c b/net/netfilter/nf_log_syslog.c
index cb894f0d63e9..c66689ad2b49 100644
--- a/net/netfilter/nf_log_syslog.c
+++ b/net/netfilter/nf_log_syslog.c
@@ -322,7 +322,7 @@ dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
/* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
nf_log_buf_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
- ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
+ iph_totlen(skb, ih), ih->tos & IPTOS_TOS_MASK,
ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
/* Max length: 6 "CE DF MF " */
diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c
index 0fa5a0bbb0ff..141ee7783223 100644
--- a/net/netfilter/nf_nat_bpf.c
+++ b/net/netfilter/nf_nat_bpf.c
@@ -30,9 +30,9 @@ __diag_ignore_all("-Wmissing-prototypes",
* interpreted as select a random port.
* @manip - NF_NAT_MANIP_SRC or NF_NAT_MANIP_DST
*/
-int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
- union nf_inet_addr *addr, int port,
- enum nf_nat_manip_type manip)
+__bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
+ union nf_inet_addr *addr, int port,
+ enum nf_nat_manip_type manip)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
u16 proto = nf_ct_l3num(ct);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 8c09e4d12ac1..d73edbd4eec4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1401,6 +1401,10 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
}
if (IS_ERR(table)) {
+ if (PTR_ERR(table) == -ENOENT &&
+ NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYTABLE)
+ return 0;
+
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(table);
}
@@ -2639,6 +2643,10 @@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
chain = nft_chain_lookup(net, table, attr, genmask);
}
if (IS_ERR(chain)) {
+ if (PTR_ERR(chain) == -ENOENT &&
+ NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYCHAIN)
+ return 0;
+
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(chain);
}
@@ -3716,6 +3724,10 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN],
genmask);
if (IS_ERR(chain)) {
+ if (PTR_ERR(chain) == -ENOENT &&
+ NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYRULE)
+ return 0;
+
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
@@ -3729,6 +3741,10 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
if (nla[NFTA_RULE_HANDLE]) {
rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
if (IS_ERR(rule)) {
+ if (PTR_ERR(rule) == -ENOENT &&
+ NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYRULE)
+ return 0;
+
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return PTR_ERR(rule);
}
@@ -4808,6 +4824,10 @@ static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info,
}
if (IS_ERR(set)) {
+ if (PTR_ERR(set) == -ENOENT &&
+ NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYSET)
+ return 0;
+
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(set);
}
@@ -6690,6 +6710,10 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_del_setelem(&ctx, set, attr);
+ if (err == -ENOENT &&
+ NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYSETELEM)
+ continue;
+
if (err < 0) {
NL_SET_BAD_ATTR(extack, attr);
break;
@@ -6999,6 +7023,9 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
return -EOPNOTSUPP;
type = __nft_obj_type_get(objtype);
+ if (WARN_ON_ONCE(!type))
+ return -ENOENT;
+
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj);
@@ -7334,6 +7361,10 @@ static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info,
}
if (IS_ERR(obj)) {
+ if (PTR_ERR(obj) == -ENOENT &&
+ NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYOBJ)
+ return 0;
+
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(obj);
}
@@ -7964,6 +7995,10 @@ static int nf_tables_delflowtable(struct sk_buff *skb,
}
if (IS_ERR(flowtable)) {
+ if (PTR_ERR(flowtable) == -ENOENT &&
+ NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYFLOWTABLE)
+ return 0;
+
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(flowtable);
}
@@ -8373,6 +8408,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.attr_count = NFTA_TABLE_MAX,
.policy = nft_table_policy,
},
+ [NFT_MSG_DESTROYTABLE] = {
+ .call = nf_tables_deltable,
+ .type = NFNL_CB_BATCH,
+ .attr_count = NFTA_TABLE_MAX,
+ .policy = nft_table_policy,
+ },
[NFT_MSG_NEWCHAIN] = {
.call = nf_tables_newchain,
.type = NFNL_CB_BATCH,
@@ -8391,6 +8432,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.attr_count = NFTA_CHAIN_MAX,
.policy = nft_chain_policy,
},
+ [NFT_MSG_DESTROYCHAIN] = {
+ .call = nf_tables_delchain,
+ .type = NFNL_CB_BATCH,
+ .attr_count = NFTA_CHAIN_MAX,
+ .policy = nft_chain_policy,
+ },
[NFT_MSG_NEWRULE] = {
.call = nf_tables_newrule,
.type = NFNL_CB_BATCH,
@@ -8415,6 +8462,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.attr_count = NFTA_RULE_MAX,
.policy = nft_rule_policy,
},
+ [NFT_MSG_DESTROYRULE] = {
+ .call = nf_tables_delrule,
+ .type = NFNL_CB_BATCH,
+ .attr_count = NFTA_RULE_MAX,
+ .policy = nft_rule_policy,
+ },
[NFT_MSG_NEWSET] = {
.call = nf_tables_newset,
.type = NFNL_CB_BATCH,
@@ -8433,6 +8486,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.attr_count = NFTA_SET_MAX,
.policy = nft_set_policy,
},
+ [NFT_MSG_DESTROYSET] = {
+ .call = nf_tables_delset,
+ .type = NFNL_CB_BATCH,
+ .attr_count = NFTA_SET_MAX,
+ .policy = nft_set_policy,
+ },
[NFT_MSG_NEWSETELEM] = {
.call = nf_tables_newsetelem,
.type = NFNL_CB_BATCH,
@@ -8451,6 +8510,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.attr_count = NFTA_SET_ELEM_LIST_MAX,
.policy = nft_set_elem_list_policy,
},
+ [NFT_MSG_DESTROYSETELEM] = {
+ .call = nf_tables_delsetelem,
+ .type = NFNL_CB_BATCH,
+ .attr_count = NFTA_SET_ELEM_LIST_MAX,
+ .policy = nft_set_elem_list_policy,
+ },
[NFT_MSG_GETGEN] = {
.call = nf_tables_getgen,
.type = NFNL_CB_RCU,
@@ -8473,6 +8538,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,
},
+ [NFT_MSG_DESTROYOBJ] = {
+ .call = nf_tables_delobj,
+ .type = NFNL_CB_BATCH,
+ .attr_count = NFTA_OBJ_MAX,
+ .policy = nft_obj_policy,
+ },
[NFT_MSG_GETOBJ_RESET] = {
.call = nf_tables_getobj,
.type = NFNL_CB_RCU,
@@ -8497,6 +8568,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.attr_count = NFTA_FLOWTABLE_MAX,
.policy = nft_flowtable_policy,
},
+ [NFT_MSG_DESTROYFLOWTABLE] = {
+ .call = nf_tables_delflowtable,
+ .type = NFNL_CB_BATCH,
+ .attr_count = NFTA_FLOWTABLE_MAX,
+ .policy = nft_flowtable_policy,
+ },
};
static int nf_tables_validate(struct net *net)
@@ -8590,6 +8667,7 @@ static void nft_commit_release(struct nft_trans *trans)
{
switch (trans->msg_type) {
case NFT_MSG_DELTABLE:
+ case NFT_MSG_DESTROYTABLE:
nf_tables_table_destroy(&trans->ctx);
break;
case NFT_MSG_NEWCHAIN:
@@ -8597,23 +8675,29 @@ static void nft_commit_release(struct nft_trans *trans)
kfree(nft_trans_chain_name(trans));
break;
case NFT_MSG_DELCHAIN:
+ case NFT_MSG_DESTROYCHAIN:
nf_tables_chain_destroy(&trans->ctx);
break;
case NFT_MSG_DELRULE:
+ case NFT_MSG_DESTROYRULE:
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
break;
case NFT_MSG_DELSET:
+ case NFT_MSG_DESTROYSET:
nft_set_destroy(&trans->ctx, nft_trans_set(trans));
break;
case NFT_MSG_DELSETELEM:
+ case NFT_MSG_DESTROYSETELEM:
nf_tables_set_elem_destroy(&trans->ctx,
nft_trans_elem_set(trans),
nft_trans_elem(trans).priv);
break;
case NFT_MSG_DELOBJ:
+ case NFT_MSG_DESTROYOBJ:
nft_obj_destroy(&trans->ctx, nft_trans_obj(trans));
break;
case NFT_MSG_DELFLOWTABLE:
+ case NFT_MSG_DESTROYFLOWTABLE:
if (nft_trans_flowtable_update(trans))
nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
else
@@ -9065,8 +9149,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_trans_destroy(trans);
break;
case NFT_MSG_DELTABLE:
+ case NFT_MSG_DESTROYTABLE:
list_del_rcu(&trans->ctx.table->list);
- nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
+ nf_tables_table_notify(&trans->ctx, trans->msg_type);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
@@ -9081,8 +9166,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
}
break;
case NFT_MSG_DELCHAIN:
+ case NFT_MSG_DESTROYCHAIN:
nft_chain_del(trans->ctx.chain);
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
+ nf_tables_chain_notify(&trans->ctx, trans->msg_type);
nf_tables_unregister_hook(trans->ctx.net,
trans->ctx.table,
trans->ctx.chain);
@@ -9098,10 +9184,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_trans_destroy(trans);
break;
case NFT_MSG_DELRULE:
+ case NFT_MSG_DESTROYRULE:
list_del_rcu(&nft_trans_rule(trans)->list);
nf_tables_rule_notify(&trans->ctx,
nft_trans_rule(trans),
- NFT_MSG_DELRULE);
+ trans->msg_type);
nft_rule_expr_deactivate(&trans->ctx,
nft_trans_rule(trans),
NFT_TRANS_COMMIT);
@@ -9129,9 +9216,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSET:
+ case NFT_MSG_DESTROYSET:
list_del_rcu(&nft_trans_set(trans)->list);
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
- NFT_MSG_DELSET, GFP_KERNEL);
+ trans->msg_type, GFP_KERNEL);
break;
case NFT_MSG_NEWSETELEM:
te = (struct nft_trans_elem *)trans->data;
@@ -9143,11 +9231,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSETELEM:
+ case NFT_MSG_DESTROYSETELEM:
te = (struct nft_trans_elem *)trans->data;
nf_tables_setelem_notify(&trans->ctx, te->set,
&te->elem,
- NFT_MSG_DELSETELEM);
+ trans->msg_type);
nft_setelem_remove(net, te->set, &te->elem);
if (!nft_setelem_is_catchall(te->set, &te->elem)) {
atomic_dec(&te->set->nelems);
@@ -9169,9 +9258,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
}
break;
case NFT_MSG_DELOBJ:
+ case NFT_MSG_DESTROYOBJ:
nft_obj_del(nft_trans_obj(trans));
nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans),
- NFT_MSG_DELOBJ);
+ trans->msg_type);
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
@@ -9193,11 +9283,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_trans_destroy(trans);
break;
case NFT_MSG_DELFLOWTABLE:
+ case NFT_MSG_DESTROYFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
- NFT_MSG_DELFLOWTABLE);
+ trans->msg_type);
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable_hooks(trans));
} else {
@@ -9205,7 +9296,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable(trans)->hook_list,
- NFT_MSG_DELFLOWTABLE);
+ trans->msg_type);
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable(trans)->hook_list);
}
@@ -9301,6 +9392,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
}
break;
case NFT_MSG_DELTABLE:
+ case NFT_MSG_DESTROYTABLE:
nft_clear(trans->ctx.net, trans->ctx.table);
nft_trans_destroy(trans);
break;
@@ -9322,6 +9414,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
}
break;
case NFT_MSG_DELCHAIN:
+ case NFT_MSG_DESTROYCHAIN:
trans->ctx.table->use++;
nft_clear(trans->ctx.net, trans->ctx.chain);
nft_trans_destroy(trans);
@@ -9336,6 +9429,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_DELRULE:
+ case NFT_MSG_DESTROYRULE:
trans->ctx.chain->use++;
nft_clear(trans->ctx.net, nft_trans_rule(trans));
nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
@@ -9357,6 +9451,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
list_del_rcu(&nft_trans_set(trans)->list);
break;
case NFT_MSG_DELSET:
+ case NFT_MSG_DESTROYSET:
trans->ctx.table->use++;
nft_clear(trans->ctx.net, nft_trans_set(trans));
nft_trans_destroy(trans);
@@ -9372,6 +9467,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
atomic_dec(&te->set->nelems);
break;
case NFT_MSG_DELSETELEM:
+ case NFT_MSG_DESTROYSETELEM:
te = (struct nft_trans_elem *)trans->data;
nft_setelem_data_activate(net, te->set, &te->elem);
@@ -9391,6 +9487,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
}
break;
case NFT_MSG_DELOBJ:
+ case NFT_MSG_DESTROYOBJ:
trans->ctx.table->use++;
nft_clear(trans->ctx.net, nft_trans_obj(trans));
nft_trans_destroy(trans);
@@ -9407,6 +9504,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
}
break;
case NFT_MSG_DELFLOWTABLE:
+ case NFT_MSG_DESTROYFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
list_splice(&nft_trans_flowtable_hooks(trans),
&nft_trans_flowtable(trans)->hook_list);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 709a736c301c..6ecd0ba2e546 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -21,6 +21,26 @@
#include <net/netfilter/nf_log.h>
#include <net/netfilter/nft_meta.h>
+#if defined(CONFIG_RETPOLINE) && defined(CONFIG_X86)
+
+static struct static_key_false nf_tables_skip_direct_calls;
+
+static bool nf_skip_indirect_calls(void)
+{
+ return static_branch_likely(&nf_tables_skip_direct_calls);
+}
+
+static void __init nf_skip_indirect_calls_enable(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_RETPOLINE))
+ static_branch_enable(&nf_tables_skip_direct_calls);
+}
+#else
+static inline bool nf_skip_indirect_calls(void) { return false; }
+
+static inline void nf_skip_indirect_calls_enable(void) { }
+#endif
+
static noinline void __nft_trace_packet(struct nft_traceinfo *info,
const struct nft_chain *chain,
enum nft_trace_types type)
@@ -193,7 +213,12 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
struct nft_pktinfo *pkt)
{
#ifdef CONFIG_RETPOLINE
- unsigned long e = (unsigned long)expr->ops->eval;
+ unsigned long e;
+
+ if (nf_skip_indirect_calls())
+ goto indirect_call;
+
+ e = (unsigned long)expr->ops->eval;
#define X(e, fun) \
do { if ((e) == (unsigned long)(fun)) \
return fun(expr, regs, pkt); } while (0)
@@ -203,13 +228,19 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
X(e, nft_counter_eval);
X(e, nft_meta_get_eval);
X(e, nft_lookup_eval);
+#if IS_ENABLED(CONFIG_NFT_CT)
+ X(e, nft_ct_get_fast_eval);
+#endif
X(e, nft_range_eval);
X(e, nft_immediate_eval);
X(e, nft_byteorder_eval);
X(e, nft_dynset_eval);
X(e, nft_rt_get_eval);
X(e, nft_bitwise_eval);
+ X(e, nft_objref_eval);
+ X(e, nft_objref_map_eval);
#undef X
+indirect_call:
#endif /* CONFIG_RETPOLINE */
expr->ops->eval(expr, regs, pkt);
}
@@ -369,6 +400,8 @@ int __init nf_tables_core_module_init(void)
goto err;
}
+ nf_skip_indirect_calls_enable();
+
return 0;
err:
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index c68e2151defe..b9c84499438b 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -12,7 +12,7 @@
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_tuple.h>
@@ -23,16 +23,6 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
-struct nft_ct {
- enum nft_ct_keys key:8;
- enum ip_conntrack_dir dir:8;
- u8 len;
- union {
- u8 dreg;
- u8 sreg;
- };
-};
-
struct nft_ct_helper_obj {
struct nf_conntrack_helper *helper4;
struct nf_conntrack_helper *helper6;
@@ -759,6 +749,18 @@ static bool nft_ct_set_reduce(struct nft_regs_track *track,
return false;
}
+#ifdef CONFIG_RETPOLINE
+static const struct nft_expr_ops nft_ct_get_fast_ops = {
+ .type = &nft_ct_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
+ .eval = nft_ct_get_fast_eval,
+ .init = nft_ct_get_init,
+ .destroy = nft_ct_get_destroy,
+ .dump = nft_ct_get_dump,
+ .reduce = nft_ct_set_reduce,
+};
+#endif
+
static const struct nft_expr_ops nft_ct_set_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
@@ -791,8 +793,21 @@ nft_ct_select_ops(const struct nft_ctx *ctx,
if (tb[NFTA_CT_DREG] && tb[NFTA_CT_SREG])
return ERR_PTR(-EINVAL);
- if (tb[NFTA_CT_DREG])
+ if (tb[NFTA_CT_DREG]) {
+#ifdef CONFIG_RETPOLINE
+ u32 k = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
+
+ switch (k) {
+ case NFT_CT_STATE:
+ case NFT_CT_DIRECTION:
+ case NFT_CT_STATUS:
+ case NFT_CT_MARK:
+ case NFT_CT_SECMARK:
+ return &nft_ct_get_fast_ops;
+ }
+#endif
return &nft_ct_get_ops;
+ }
if (tb[NFTA_CT_SREG]) {
#ifdef CONFIG_NF_CONNTRACK_ZONES
diff --git a/net/netfilter/nft_ct_fast.c b/net/netfilter/nft_ct_fast.c
new file mode 100644
index 000000000000..89983b0613fa
--- /dev/null
+++ b/net/netfilter/nft_ct_fast.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#if IS_ENABLED(CONFIG_NFT_CT)
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_conntrack.h>
+
+void nft_ct_get_fast_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_ct *priv = nft_expr_priv(expr);
+ u32 *dest = &regs->data[priv->dreg];
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn *ct;
+ unsigned int state;
+
+ ct = nf_ct_get(pkt->skb, &ctinfo);
+ if (!ct) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
+ switch (priv->key) {
+ case NFT_CT_STATE:
+ if (ct)
+ state = NF_CT_STATE_BIT(ctinfo);
+ else if (ctinfo == IP_CT_UNTRACKED)
+ state = NF_CT_STATE_UNTRACKED_BIT;
+ else
+ state = NF_CT_STATE_INVALID_BIT;
+ *dest = state;
+ return;
+ case NFT_CT_DIRECTION:
+ nft_reg_store8(dest, CTINFO2DIR(ctinfo));
+ return;
+ case NFT_CT_STATUS:
+ *dest = ct->status;
+ return;
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ case NFT_CT_MARK:
+ *dest = ct->mark;
+ return;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ case NFT_CT_SECMARK:
+ *dest = ct->secmark;
+ return;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ regs->verdict.code = NFT_BREAK;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(nft_ct_get_fast_eval);
+#endif
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 7b01aa2ef653..cb37169608ba 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -13,9 +13,9 @@
#define nft_objref_priv(expr) *((struct nft_object **)nft_expr_priv(expr))
-static void nft_objref_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_objref_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
struct nft_object *obj = nft_objref_priv(expr);
@@ -100,9 +100,9 @@ struct nft_objref_map {
struct nft_set_binding binding;
};
-static void nft_objref_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_objref_map_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
struct nft_objref_map *priv = nft_expr_priv(expr);
const struct nft_set *set = priv->set;
diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c
index 1873da3a945a..b3d623a52885 100644
--- a/net/netfilter/xt_length.c
+++ b/net/netfilter/xt_length.c
@@ -21,7 +21,7 @@ static bool
length_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_length_info *info = par->matchinfo;
- u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len);
+ u32 pktlen = skb_ip_totlen(skb);
return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 600993c80050..04c4036bf406 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -13,7 +13,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
-#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <linux/skbuff.h>
#include <linux/mutex.h>
#include <linux/bitmap.h>
@@ -457,7 +457,7 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
if (WARN_ON(grp->name[0] == '\0'))
return -EINVAL;
- if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
+ if (WARN_ON(!string_is_terminated(grp->name, GENL_NAMSIZ)))
return -EINVAL;
}
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 747d537a3f06..29a7081858cd 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -15,6 +15,7 @@ config OPENVSWITCH
select NET_MPLS_GSO
select DST_CACHE
select NET_NSH
+ select NF_CONNTRACK_OVS if NF_CONNTRACK
select NF_NAT_OVS if NF_NAT
help
Open vSwitch is a multilayer Ethernet switch targeted at virtualized
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c8b137649ca4..331730fd3580 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -9,6 +9,7 @@
#include <linux/udp.h>
#include <linux/sctp.h>
#include <linux/static_key.h>
+#include <linux/string_helpers.h>
#include <net/ip.h>
#include <net/genetlink.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -434,52 +435,21 @@ static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key,
return 0;
}
-/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
- * value if 'skb' is freed.
- */
-static int handle_fragments(struct net *net, struct sw_flow_key *key,
- u16 zone, struct sk_buff *skb)
+static int ovs_ct_handle_fragments(struct net *net, struct sw_flow_key *key,
+ u16 zone, int family, struct sk_buff *skb)
{
struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
int err;
- if (key->eth.type == htons(ETH_P_IP)) {
- enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
-
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
- err = ip_defrag(net, skb, user);
- if (err)
- return err;
-
- ovs_cb.mru = IPCB(skb)->frag_max_size;
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
- } else if (key->eth.type == htons(ETH_P_IPV6)) {
- enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
-
- memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
- err = nf_ct_frag6_gather(net, skb, user);
- if (err) {
- if (err != -EINPROGRESS)
- kfree_skb(skb);
- return err;
- }
-
- key->ip.proto = ipv6_hdr(skb)->nexthdr;
- ovs_cb.mru = IP6CB(skb)->frag_max_size;
-#endif
- } else {
- kfree_skb(skb);
- return -EPFNOSUPPORT;
- }
+ err = nf_ct_handle_fragments(net, skb, zone, family, &key->ip.proto, &ovs_cb.mru);
+ if (err)
+ return err;
/* The key extracted from the fragment that completed this datagram
* likely didn't have an L4 header, so regenerate it.
*/
ovs_flow_key_update_l3l4(skb, key);
-
key->ip.frag = OVS_FRAG_TYPE_NONE;
- skb_clear_hash(skb);
- skb->ignore_df = 1;
*OVS_CB(skb) = ovs_cb;
return 0;
@@ -1090,36 +1060,6 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
return 0;
}
-/* Trim the skb to the length specified by the IP/IPv6 header,
- * removing any trailing lower-layer padding. This prepares the skb
- * for higher-layer processing that assumes skb->len excludes padding
- * (such as nf_ip_checksum). The caller needs to pull the skb to the
- * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
- */
-static int ovs_skb_network_trim(struct sk_buff *skb)
-{
- unsigned int len;
- int err;
-
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- len = ntohs(ip_hdr(skb)->tot_len);
- break;
- case htons(ETH_P_IPV6):
- len = sizeof(struct ipv6hdr)
- + ntohs(ipv6_hdr(skb)->payload_len);
- break;
- default:
- len = skb->len;
- }
-
- err = pskb_trim_rcsum(skb, len);
- if (err)
- kfree_skb(skb);
-
- return err;
-}
-
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
* value if 'skb' is freed.
*/
@@ -1134,12 +1074,15 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
nh_ofs = skb_network_offset(skb);
skb_pull_rcsum(skb, nh_ofs);
- err = ovs_skb_network_trim(skb);
- if (err)
+ err = nf_ct_skb_network_trim(skb, info->family);
+ if (err) {
+ kfree_skb(skb);
return err;
+ }
if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
- err = handle_fragments(net, key, info->zone.id, skb);
+ err = ovs_ct_handle_fragments(net, key, info->zone.id,
+ info->family, skb);
if (err)
return err;
}
@@ -1383,7 +1326,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
#endif
case OVS_CT_ATTR_HELPER:
*helper = nla_data(a);
- if (!memchr(*helper, '\0', nla_len(a))) {
+ if (!string_is_terminated(*helper, nla_len(a))) {
OVS_NLERR(log, "Invalid conntrack helper");
return -EINVAL;
}
@@ -1404,7 +1347,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
case OVS_CT_ATTR_TIMEOUT:
memcpy(info->timeout, nla_data(a), nla_len(a));
- if (!memchr(info->timeout, '\0', nla_len(a))) {
+ if (!string_is_terminated(info->timeout, nla_len(a))) {
OVS_NLERR(log, "Invalid conntrack timeout");
return -EINVAL;
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index e20d1a973417..33b21a0c0548 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -107,7 +107,8 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
rcu_assign_pointer(flow->stats[cpu],
new_stats);
- cpumask_set_cpu(cpu, &flow->cpu_used_mask);
+ cpumask_set_cpu(cpu,
+ flow->cpu_used_mask);
goto unlock;
}
}
@@ -135,7 +136,8 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
memset(ovs_stats, 0, sizeof(*ovs_stats));
/* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
+ for (cpu = 0; cpu < nr_cpu_ids;
+ cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
@@ -159,7 +161,8 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
int cpu;
/* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
+ for (cpu = 0; cpu < nr_cpu_ids;
+ cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
@@ -1038,7 +1041,8 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
if (tc_skb_ext_tc_enabled()) {
tc_ext = skb_ext_find(skb, TC_SKB_EXT);
- key->recirc_id = tc_ext ? tc_ext->chain : 0;
+ key->recirc_id = tc_ext && !tc_ext->act_miss ?
+ tc_ext->chain : 0;
OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
post_ct = tc_ext ? tc_ext->post_ct : false;
post_ct_snat = post_ct ? tc_ext->post_ct_snat : false;
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 073ab73ffeaa..b5711aff6e76 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -229,7 +229,7 @@ struct sw_flow {
*/
struct sw_flow_key key;
struct sw_flow_id id;
- struct cpumask cpu_used_mask;
+ struct cpumask *cpu_used_mask;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 0a0e4c283f02..791504b7f42b 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -79,6 +79,7 @@ struct sw_flow *ovs_flow_alloc(void)
return ERR_PTR(-ENOMEM);
flow->stats_last_writer = -1;
+ flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
/* Initialize the default stat node. */
stats = kmem_cache_alloc_node(flow_stats_cache,
@@ -91,7 +92,7 @@ struct sw_flow *ovs_flow_alloc(void)
RCU_INIT_POINTER(flow->stats[0], stats);
- cpumask_set_cpu(0, &flow->cpu_used_mask);
+ cpumask_set_cpu(0, flow->cpu_used_mask);
return flow;
err:
@@ -115,7 +116,7 @@ static void flow_free(struct sw_flow *flow)
flow->sf_acts);
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids;
- cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
+ cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
(struct sw_flow_stats __force *)flow->stats[cpu]);
@@ -1196,7 +1197,8 @@ int ovs_flow_init(void)
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_cpu_ids
- * sizeof(struct sw_flow_stats *)),
+ * sizeof(struct sw_flow_stats *))
+ + cpumask_size(),
0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b5ab98ca2511..d4e76e2ae153 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1335,8 +1335,6 @@ static void packet_sock_destruct(struct sock *sk)
pr_err("Attempt to release alive packet socket: %p\n", sk);
return;
}
-
- sk_refcnt_debug_dec(sk);
}
static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
@@ -2296,6 +2294,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
else if (skb->pkt_type != PACKET_OUTGOING &&
skb_csum_unnecessary(skb))
status |= TP_STATUS_CSUM_VALID;
+ if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
+ status |= TP_STATUS_GSO_TCP;
if (snaplen > res)
snaplen = res;
@@ -3172,7 +3172,6 @@ static int packet_release(struct socket *sock)
skb_queue_purge(&sk->sk_receive_queue);
packet_free_pending(po);
- sk_refcnt_debug_release(sk);
sock_put(sk);
return 0;
@@ -3362,7 +3361,6 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
packet_cached_dev_reset(po);
sk->sk_destruct = packet_sock_destruct;
- sk_refcnt_debug_inc(sk);
/*
* Attach a protocol block
@@ -3522,6 +3520,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
else if (skb->pkt_type != PACKET_OUTGOING &&
skb_csum_unnecessary(skb))
aux.tp_status |= TP_STATUS_CSUM_VALID;
+ if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
+ aux.tp_status |= TP_STATUS_GSO_TCP;
aux.tp_len = origlen;
aux.tp_snaplen = skb->len;
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 1f5df0432d37..7f68d8662cfb 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -19,6 +19,8 @@
#include <net/tcp_states.h>
#include <net/phonet/gprs.h>
+#include <trace/events/sock.h>
+
#define GPRS_DEFAULT_MTU 1400
struct gprs_dev {
@@ -138,6 +140,8 @@ static void gprs_data_ready(struct sock *sk)
struct gprs_dev *gp = sk->sk_user_data;
struct sk_buff *skb;
+ trace_sk_data_ready(sk);
+
while ((skb = pep_read(sk)) != NULL) {
skb_orphan(skb);
gprs_recv(gp, skb);
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index e595079c2caf..722936f7dd98 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -12,6 +12,7 @@
#include "qrtr.h"
+#include <trace/events/sock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/qrtr.h>
@@ -755,6 +756,8 @@ static void qrtr_ns_worker(struct work_struct *work)
static void qrtr_ns_data_ready(struct sock *sk)
{
+ trace_sk_data_ready(sk);
+
queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
}
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index cfbf0e129cba..e53b7f266bd7 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -31,6 +31,7 @@
*
*/
#include <linux/kernel.h>
+#include <linux/sched/clock.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/net/rds/message.c b/net/rds/message.c
index c19c93561227..7af59d2443e5 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -118,7 +118,7 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs,
ck = &info->zcookies;
memset(ck, 0, sizeof(*ck));
WARN_ON(!rds_zcookie_add(info, cookie));
- list_add_tail(&q->zcookie_head, &info->rs_zcookie_next);
+ list_add_tail(&info->rs_zcookie_next, &q->zcookie_head);
spin_unlock_irqrestore(&q->lock, flags);
/* caller invokes rds_wake_sk_sleep() */
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 5b426dc3634d..c71b923764fd 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -35,6 +35,7 @@
#include <net/sock.h>
#include <linux/in.h>
#include <linux/export.h>
+#include <linux/sched/clock.h>
#include <linux/time.h>
#include <linux/rds.h>
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 7edf2e69d3fe..014fa24418c1 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -34,6 +34,7 @@
#include <linux/gfp.h>
#include <linux/in.h>
#include <net/tcp.h>
+#include <trace/events/sock.h>
#include "rds.h"
#include "tcp.h"
@@ -234,6 +235,7 @@ void rds_tcp_listen_data_ready(struct sock *sk)
{
void (*ready)(struct sock *sk);
+ trace_sk_data_ready(sk);
rdsdebug("listen data ready sk %p\n", sk);
read_lock_bh(&sk->sk_callback_lock);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index f4ee13da90c7..c00f04a1a534 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <net/tcp.h>
+#include <trace/events/sock.h>
#include "rds.h"
#include "tcp.h"
@@ -309,6 +310,7 @@ void rds_tcp_data_ready(struct sock *sk)
struct rds_conn_path *cp;
struct rds_tcp_connection *tc;
+ trace_sk_data_ready(sk);
rdsdebug("data ready sk %p\n", sk);
read_lock_bh(&sk->sk_callback_lock);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index b390ff245d5e..01fca7a10b4b 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -685,7 +685,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%s\n", rfkill->name);
+ return sysfs_emit(buf, "%s\n", rfkill->name);
}
static DEVICE_ATTR_RO(name);
@@ -694,7 +694,7 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
+ return sysfs_emit(buf, "%s\n", rfkill_types[rfkill->type]);
}
static DEVICE_ATTR_RO(type);
@@ -703,7 +703,7 @@ static ssize_t index_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", rfkill->idx);
+ return sysfs_emit(buf, "%d\n", rfkill->idx);
}
static DEVICE_ATTR_RO(index);
@@ -712,7 +712,7 @@ static ssize_t persistent_show(struct device *dev,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", rfkill->persistent);
+ return sysfs_emit(buf, "%d\n", rfkill->persistent);
}
static DEVICE_ATTR_RO(persistent);
@@ -721,7 +721,7 @@ static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
+ return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0);
}
static DEVICE_ATTR_RO(hard);
@@ -730,7 +730,7 @@ static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
+ return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0);
}
static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
@@ -764,7 +764,7 @@ static ssize_t hard_block_reasons_show(struct device *dev,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "0x%lx\n", rfkill->hard_block_reasons);
+ return sysfs_emit(buf, "0x%lx\n", rfkill->hard_block_reasons);
}
static DEVICE_ATTR_RO(hard_block_reasons);
@@ -783,7 +783,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
+ return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state));
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index f5afc9bcdee6..786dbfdad772 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -75,6 +75,8 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill;
struct gpio_desc *gpio;
+ const char *name_property;
+ const char *type_property;
const char *type_name;
int ret;
@@ -82,8 +84,15 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
if (!rfkill)
return -ENOMEM;
- device_property_read_string(&pdev->dev, "name", &rfkill->name);
- device_property_read_string(&pdev->dev, "type", &type_name);
+ if (dev_of_node(&pdev->dev)) {
+ name_property = "label";
+ type_property = "radio-type";
+ } else {
+ name_property = "name";
+ type_property = "type";
+ }
+ device_property_read_string(&pdev->dev, name_property, &rfkill->name);
+ device_property_read_string(&pdev->dev, type_property, &type_name);
if (!rfkill->name)
rfkill->name = dev_name(&pdev->dev);
@@ -157,12 +166,19 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match);
#endif
+static const struct of_device_id rfkill_of_match[] __maybe_unused = {
+ { .compatible = "rfkill-gpio", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rfkill_of_match);
+
static struct platform_driver rfkill_gpio_driver = {
.probe = rfkill_gpio_probe,
.remove = rfkill_gpio_remove,
.driver = {
.name = "rfkill_gpio",
.acpi_match_table = ACPI_PTR(rfkill_acpi_match),
+ .of_match_table = of_match_ptr(rfkill_of_match),
},
};
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index 7ae023b37a83..a20986806fea 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -36,6 +36,15 @@ config AF_RXRPC_INJECT_LOSS
Say Y here to inject packet loss by discarding some received and some
transmitted packets.
+config AF_RXRPC_INJECT_RX_DELAY
+ bool "Inject delay into packet reception"
+ depends on SYSCTL
+ help
+ Say Y here to inject a delay into packet reception, allowing an
+ extended RTT time to be modelled. The delay can be configured using
+ /proc/sys/net/rxrpc/rxrpc_inject_rx_delay, setting a number of
+ milliseconds up to 0.5s (note that the granularity is actually in
+ jiffies).
config AF_RXRPC_DEBUG
bool "RxRPC dynamic debugging"
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index ebbd4a1c3f86..102f5cbff91a 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -786,7 +786,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
INIT_LIST_HEAD(&rx->sock_calls);
INIT_LIST_HEAD(&rx->to_be_accepted);
INIT_LIST_HEAD(&rx->recvmsg_q);
- rwlock_init(&rx->recvmsg_lock);
+ spin_lock_init(&rx->recvmsg_lock);
rwlock_init(&rx->call_lock);
memset(&rx->srx, 0, sizeof(rx->srx));
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 3931a4bba8af..67b0a894162d 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -149,7 +149,7 @@ struct rxrpc_sock {
struct list_head sock_calls; /* List of calls owned by this socket */
struct list_head to_be_accepted; /* calls awaiting acceptance */
struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
- rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
+ spinlock_t recvmsg_lock; /* Lock for recvmsg_q */
struct key *key; /* security for this socket */
struct key *securities; /* list of server security descriptors */
struct rb_root calls; /* User ID -> call mapping */
@@ -284,7 +284,9 @@ struct rxrpc_local {
struct task_struct *io_thread;
struct completion io_thread_ready; /* Indication that the I/O thread started */
struct rxrpc_sock *service; /* Service(s) listening on this endpoint */
- struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+ struct sk_buff_head rx_delay_queue; /* Delay injection queue */
+#endif
struct sk_buff_head rx_queue; /* Received packets */
struct list_head conn_attend_q; /* Conns requiring immediate attention */
struct list_head call_attend_q; /* Calls requiring immediate attention */
@@ -688,9 +690,11 @@ struct rxrpc_call {
/* Receive-phase ACK management (ACKs we send). */
u8 ackr_reason; /* reason to ACK */
+ u16 ackr_sack_base; /* Starting slot in SACK table ring */
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
- atomic64_t ackr_window; /* Base (in LSW) and top (in MSW) of SACK window */
- atomic_t ackr_nr_unacked; /* Number of unacked packets */
+ rxrpc_seq_t ackr_window; /* Base of SACK window */
+ rxrpc_seq_t ackr_wtop; /* Base of SACK window */
+ unsigned int ackr_nr_unacked; /* Number of unacked packets */
atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */
struct {
#define RXRPC_SACK_SIZE 256
@@ -1109,6 +1113,9 @@ extern unsigned long rxrpc_idle_ack_delay;
extern unsigned int rxrpc_rx_window_size;
extern unsigned int rxrpc_rx_mtu;
extern unsigned int rxrpc_rx_jumbo_max;
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+extern unsigned long rxrpc_inject_rx_delay;
+#endif
/*
* net_ns.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 3e8689fdc437..0f5a1d77b890 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -195,7 +195,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
tail = b->peer_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_peer *peer = b->peer_backlog[tail];
- rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_conn);
+ rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_peer);
kfree(peer);
tail = (tail + 1) & (size - 1);
}
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 1abdef15debc..e363f21a2014 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -498,9 +498,18 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
rxrpc_propose_ack_rx_idle);
- if (atomic_read(&call->ackr_nr_unacked) > 2)
- rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
- rxrpc_propose_ack_input_data);
+ if (call->ackr_nr_unacked > 2) {
+ if (call->peer->rtt_count < 3)
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_rtt);
+ else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
+ ktime_get_real()))
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_old_rtt);
+ else
+ rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
+ rxrpc_propose_ack_input_data);
+ }
/* Make sure the timer is restarted */
if (!__rxrpc_call_is_complete(call)) {
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index f3c9f0201c15..e9f1f49d18c2 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -54,12 +54,14 @@ void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
spin_lock_bh(&local->lock);
busy = !list_empty(&call->attend_link);
trace_rxrpc_poke_call(call, busy, what);
+ if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke))
+ busy = true;
if (!busy) {
- rxrpc_get_call(call, rxrpc_call_get_poke);
list_add_tail(&call->attend_link, &local->call_attend_q);
}
spin_unlock_bh(&local->lock);
- rxrpc_wake_up_io_thread(local);
+ if (!busy)
+ rxrpc_wake_up_io_thread(local);
}
}
@@ -167,7 +169,8 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
call->tx_total_len = -1;
call->next_rx_timo = 20 * HZ;
call->next_req_timo = 1 * HZ;
- atomic64_set(&call->ackr_window, 0x100000001ULL);
+ call->ackr_window = 1;
+ call->ackr_wtop = 1;
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
@@ -560,7 +563,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
rxrpc_put_call_slot(call);
/* Make sure we don't get any more notifications */
- write_lock(&rx->recvmsg_lock);
+ spin_lock(&rx->recvmsg_lock);
if (!list_empty(&call->recvmsg_link)) {
_debug("unlinking once-pending call %p { e=%lx f=%lx }",
@@ -573,7 +576,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
call->recvmsg_link.next = NULL;
call->recvmsg_link.prev = NULL;
- write_unlock(&rx->recvmsg_lock);
+ spin_unlock(&rx->recvmsg_lock);
if (put)
rxrpc_put_call(call, rxrpc_call_put_unnotify);
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 44414e724415..95f4bc206b3d 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -163,7 +163,7 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
ntohl(pkt.ack.firstPacket),
ntohl(pkt.ack.serial),
- pkt.ack.reason, 0);
+ pkt.ack.reason, 0, rxrpc_rx_window_size);
break;
default:
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index f30323de82bd..89ac05a711a4 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -8,11 +8,6 @@
#include <linux/slab.h>
#include "ar-internal.h"
-static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
- .ref = REFCOUNT_INIT(1),
- .debug_id = UINT_MAX,
-};
-
/*
* Find a service connection under RCU conditions.
*
@@ -132,8 +127,6 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
*/
conn->state = RXRPC_CONN_SERVICE_PREALLOC;
refcount_set(&conn->ref, 2);
- conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle,
- rxrpc_bundle_get_service_conn);
atomic_inc(&rxnet->nr_conns);
write_lock(&rxnet->conn_lock);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 367927a99881..030d64f282f3 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -338,7 +338,8 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
rxrpc_seq_t window, rxrpc_seq_t wtop)
{
- atomic64_set_release(&call->ackr_window, ((u64)wtop) << 32 | window);
+ call->ackr_window = window;
+ call->ackr_wtop = wtop;
}
/*
@@ -367,9 +368,9 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct sk_buff *oos;
rxrpc_serial_t serial = sp->hdr.serial;
- u64 win = atomic64_read(&call->ackr_window);
- rxrpc_seq_t window = lower_32_bits(win);
- rxrpc_seq_t wtop = upper_32_bits(win);
+ unsigned int sack = call->ackr_sack_base;
+ rxrpc_seq_t window = call->ackr_window;
+ rxrpc_seq_t wtop = call->ackr_wtop;
rxrpc_seq_t wlimit = window + call->rx_winsize - 1;
rxrpc_seq_t seq = sp->hdr.seq;
bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
@@ -410,20 +411,23 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
/* Queue the packet. */
if (seq == window) {
- rxrpc_seq_t reset_from;
- bool reset_sack = false;
-
if (sp->hdr.flags & RXRPC_REQUEST_ACK)
ack_reason = RXRPC_ACK_REQUESTED;
/* Send an immediate ACK if we fill in a hole */
else if (!skb_queue_empty(&call->rx_oos_queue))
ack_reason = RXRPC_ACK_DELAY;
else
- atomic_inc_return(&call->ackr_nr_unacked);
+ call->ackr_nr_unacked++;
window++;
- if (after(window, wtop))
+ if (after(window, wtop)) {
+ trace_rxrpc_sack(call, seq, sack, rxrpc_sack_none);
wtop = window;
+ } else {
+ trace_rxrpc_sack(call, seq, sack, rxrpc_sack_advance);
+ sack = (sack + 1) % RXRPC_SACK_SIZE;
+ }
+
rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg);
@@ -440,43 +444,39 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
__skb_unlink(oos, &call->rx_oos_queue);
last = osp->hdr.flags & RXRPC_LAST_PACKET;
seq = osp->hdr.seq;
- if (!reset_sack) {
- reset_from = seq;
- reset_sack = true;
- }
+ call->ackr_sack_table[sack] = 0;
+ trace_rxrpc_sack(call, seq, sack, rxrpc_sack_fill);
+ sack = (sack + 1) % RXRPC_SACK_SIZE;
window++;
rxrpc_input_queue_data(call, oos, window, wtop,
- rxrpc_receive_queue_oos);
+ rxrpc_receive_queue_oos);
}
spin_unlock(&call->recvmsg_queue.lock);
- if (reset_sack) {
- do {
- call->ackr_sack_table[reset_from % RXRPC_SACK_SIZE] = 0;
- } while (reset_from++, before(reset_from, window));
- }
+ call->ackr_sack_base = sack;
} else {
- bool keep = false;
+ unsigned int slot;
ack_reason = RXRPC_ACK_OUT_OF_SEQUENCE;
- if (!call->ackr_sack_table[seq % RXRPC_SACK_SIZE]) {
- call->ackr_sack_table[seq % RXRPC_SACK_SIZE] = 1;
- keep = 1;
+ slot = seq - window;
+ sack = (sack + slot) % RXRPC_SACK_SIZE;
+
+ if (call->ackr_sack_table[sack % RXRPC_SACK_SIZE]) {
+ ack_reason = RXRPC_ACK_DUPLICATE;
+ goto send_ack;
}
+ call->ackr_sack_table[sack % RXRPC_SACK_SIZE] |= 1;
+ trace_rxrpc_sack(call, seq, sack, rxrpc_sack_oos);
+
if (after(seq + 1, wtop)) {
wtop = seq + 1;
rxrpc_input_update_ack_window(call, window, wtop);
}
- if (!keep) {
- ack_reason = RXRPC_ACK_DUPLICATE;
- goto send_ack;
- }
-
skb_queue_walk(&call->rx_oos_queue, oos) {
struct rxrpc_skb_priv *osp = rxrpc_skb(oos);
@@ -567,8 +567,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_serial_t serial = sp->hdr.serial;
rxrpc_seq_t seq0 = sp->hdr.seq;
- _enter("{%llx,%x},{%u,%x}",
- atomic64_read(&call->ackr_window), call->rx_highest_seq,
+ _enter("{%x,%x,%x},{%u,%x}",
+ call->ackr_window, call->ackr_wtop, call->rx_highest_seq,
skb->len, seq0);
if (__rxrpc_call_is_complete(call))
@@ -606,7 +606,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_proto_abort(call, sp->hdr.seq, rxrpc_badmsg_bad_jumbo);
goto out_notify;
}
- skb = NULL;
+ return;
out_notify:
trace_rxrpc_notify_socket(call->debug_id, serial);
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index 9e9dfb2fc559..4a3a08a0e2cd 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -25,6 +25,7 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
*/
int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
{
+ struct sk_buff_head *rx_queue;
struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
if (unlikely(!local)) {
@@ -36,7 +37,16 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
skb->mark = RXRPC_SKB_MARK_PACKET;
rxrpc_new_skb(skb, rxrpc_skb_new_encap_rcv);
- skb_queue_tail(&local->rx_queue, skb);
+ rx_queue = &local->rx_queue;
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+ if (rxrpc_inject_rx_delay ||
+ !skb_queue_empty(&local->rx_delay_queue)) {
+ skb->tstamp = ktime_add_ms(skb->tstamp, rxrpc_inject_rx_delay);
+ rx_queue = &local->rx_delay_queue;
+ }
+#endif
+
+ skb_queue_tail(rx_queue, skb);
rxrpc_wake_up_io_thread(local);
return 0;
}
@@ -407,6 +417,9 @@ int rxrpc_io_thread(void *data)
struct rxrpc_local *local = data;
struct rxrpc_call *call;
struct sk_buff *skb;
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+ ktime_t now;
+#endif
bool should_stop;
complete(&local->io_thread_ready);
@@ -481,6 +494,17 @@ int rxrpc_io_thread(void *data)
continue;
}
+ /* Inject a delay into packets if requested. */
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+ now = ktime_get_real();
+ while ((skb = skb_peek(&local->rx_delay_queue))) {
+ if (ktime_before(now, skb->tstamp))
+ break;
+ skb = skb_dequeue(&local->rx_delay_queue);
+ skb_queue_tail(&local->rx_queue, skb);
+ }
+#endif
+
if (!skb_queue_empty(&local->rx_queue)) {
spin_lock_irq(&local->rx_queue.lock);
skb_queue_splice_tail_init(&local->rx_queue, &rx_queue);
@@ -502,6 +526,28 @@ int rxrpc_io_thread(void *data)
if (should_stop)
break;
+
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+ skb = skb_peek(&local->rx_delay_queue);
+ if (skb) {
+ unsigned long timeout;
+ ktime_t tstamp = skb->tstamp;
+ ktime_t now = ktime_get_real();
+ s64 delay_ns = ktime_to_ns(ktime_sub(tstamp, now));
+
+ if (delay_ns <= 0) {
+ __set_current_state(TASK_RUNNING);
+ continue;
+ }
+
+ timeout = nsecs_to_jiffies(delay_ns);
+ timeout = max(timeout, 1UL);
+ schedule_timeout(timeout);
+ __set_current_state(TASK_RUNNING);
+ continue;
+ }
+#endif
+
schedule();
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index b8eaca5d9f22..7d910aee4f8c 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -108,8 +108,10 @@ static struct rxrpc_local *rxrpc_alloc_local(struct net *net,
local->net = net;
local->rxnet = rxrpc_net(net);
INIT_HLIST_NODE(&local->link);
- init_rwsem(&local->defrag_sem);
init_completion(&local->io_thread_ready);
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+ skb_queue_head_init(&local->rx_delay_queue);
+#endif
skb_queue_head_init(&local->rx_queue);
INIT_LIST_HEAD(&local->conn_attend_q);
INIT_LIST_HEAD(&local->call_attend_q);
@@ -434,6 +436,9 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
/* At this point, there should be no more packets coming in to the
* local endpoint.
*/
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+ rxrpc_purge_queue(&local->rx_delay_queue);
+#endif
rxrpc_purge_queue(&local->rx_queue);
rxrpc_purge_client_connections(local);
}
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 056c428d8bf3..825b81183046 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -53,3 +53,10 @@ unsigned int rxrpc_rx_mtu = 5692;
* sender that we're willing to handle.
*/
unsigned int rxrpc_rx_jumbo_max = 4;
+
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+/*
+ * The delay to inject into packet reception.
+ */
+unsigned long rxrpc_inject_rx_delay;
+#endif
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index a9746be29634..5e53429c6922 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -80,62 +80,40 @@ static void rxrpc_set_keepalive(struct rxrpc_call *call)
*/
static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
struct rxrpc_call *call,
- struct rxrpc_txbuf *txb)
+ struct rxrpc_txbuf *txb,
+ u16 *_rwind)
{
struct rxrpc_ackinfo ackinfo;
- unsigned int qsize;
- rxrpc_seq_t window, wtop, wrap_point, ix, first;
+ unsigned int qsize, sack, wrap, to;
+ rxrpc_seq_t window, wtop;
int rsize;
- u64 wtmp;
u32 mtu, jmax;
u8 *ackp = txb->acks;
- u8 sack_buffer[sizeof(call->ackr_sack_table)] __aligned(8);
- atomic_set(&call->ackr_nr_unacked, 0);
+ call->ackr_nr_unacked = 0;
atomic_set(&call->ackr_nr_consumed, 0);
rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill);
+ clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
- /* Barrier against rxrpc_input_data(). */
-retry:
- wtmp = atomic64_read_acquire(&call->ackr_window);
- window = lower_32_bits(wtmp);
- wtop = upper_32_bits(wtmp);
+ window = call->ackr_window;
+ wtop = call->ackr_wtop;
+ sack = call->ackr_sack_base % RXRPC_SACK_SIZE;
txb->ack.firstPacket = htonl(window);
- txb->ack.nAcks = 0;
+ txb->ack.nAcks = wtop - window;
if (after(wtop, window)) {
- /* Try to copy the SACK ring locklessly. We can use the copy,
- * only if the now-current top of the window didn't go past the
- * previously read base - otherwise we can't know whether we
- * have old data or new data.
- */
- memcpy(sack_buffer, call->ackr_sack_table, sizeof(sack_buffer));
- wrap_point = window + RXRPC_SACK_SIZE - 1;
- wtmp = atomic64_read_acquire(&call->ackr_window);
- window = lower_32_bits(wtmp);
- wtop = upper_32_bits(wtmp);
- if (after(wtop, wrap_point)) {
- cond_resched();
- goto retry;
- }
-
- /* The buffer is maintained as a ring with an invariant mapping
- * between bit position and sequence number, so we'll probably
- * need to rotate it.
- */
- txb->ack.nAcks = wtop - window;
- ix = window % RXRPC_SACK_SIZE;
- first = sizeof(sack_buffer) - ix;
+ wrap = RXRPC_SACK_SIZE - sack;
+ to = min_t(unsigned int, txb->ack.nAcks, RXRPC_SACK_SIZE);
- if (ix + txb->ack.nAcks <= RXRPC_SACK_SIZE) {
- memcpy(txb->acks, sack_buffer + ix, txb->ack.nAcks);
+ if (sack + txb->ack.nAcks <= RXRPC_SACK_SIZE) {
+ memcpy(txb->acks, call->ackr_sack_table + sack, txb->ack.nAcks);
} else {
- memcpy(txb->acks, sack_buffer + ix, first);
- memcpy(txb->acks + first, sack_buffer,
- txb->ack.nAcks - first);
+ memcpy(txb->acks, call->ackr_sack_table + sack, wrap);
+ memcpy(txb->acks + wrap, call->ackr_sack_table,
+ to - wrap);
}
- ackp += txb->ack.nAcks;
+ ackp += to;
} else if (before(wtop, window)) {
pr_warn("ack window backward %x %x", window, wtop);
} else if (txb->ack.reason == RXRPC_ACK_DELAY) {
@@ -147,6 +125,7 @@ retry:
jmax = rxrpc_rx_jumbo_max;
qsize = (window - 1) - call->rx_consumed;
rsize = max_t(int, call->rx_winsize - qsize, 0);
+ *_rwind = rsize;
ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
ackinfo.maxMTU = htonl(mtu);
ackinfo.rwind = htonl(rsize);
@@ -213,6 +192,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
rxrpc_serial_t serial;
size_t len, n;
int ret, rtt_slot = -1;
+ u16 rwind;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
return -ECONNRESET;
@@ -228,7 +208,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
if (txb->ack.reason == RXRPC_ACK_PING)
txb->wire.flags |= RXRPC_REQUEST_ACK;
- n = rxrpc_fill_out_ack(conn, call, txb);
+ n = rxrpc_fill_out_ack(conn, call, txb, &rwind);
if (n == 0)
return 0;
@@ -240,7 +220,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
txb->wire.serial = htonl(serial);
trace_rxrpc_tx_ack(call->debug_id, serial,
ntohl(txb->ack.firstPacket),
- ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks);
+ ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks,
+ rwind);
if (txb->ack.reason == RXRPC_ACK_PING)
rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
@@ -253,12 +234,15 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
call->peer->last_tx_at = ktime_get_seconds();
- if (ret < 0)
+ if (ret < 0) {
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_ack);
- else
+ } else {
trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
rxrpc_tx_point_call_ack);
+ if (txb->wire.flags & RXRPC_REQUEST_ACK)
+ call->peer->rtt_last_req = ktime_get_real();
+ }
rxrpc_tx_backoff(call, ret);
if (!__rxrpc_call_is_complete(call)) {
@@ -429,8 +413,6 @@ dont_set_request_ack:
if (txb->len >= call->peer->maxdata)
goto send_fragmentable;
- down_read(&conn->local->defrag_sem);
-
txb->last_sent = ktime_get_real();
if (txb->wire.flags & RXRPC_REQUEST_ACK)
rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
@@ -445,7 +427,6 @@ dont_set_request_ack:
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
conn->peer->last_tx_at = ktime_get_seconds();
- up_read(&conn->local->defrag_sem);
if (ret < 0) {
rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
@@ -506,8 +487,6 @@ send_fragmentable:
/* attempt to send this message with fragmentation enabled */
_debug("send fragment");
- down_write(&conn->local->defrag_sem);
-
txb->last_sent = ktime_get_real();
if (txb->wire.flags & RXRPC_REQUEST_ACK)
rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
@@ -539,8 +518,6 @@ send_fragmentable:
rxrpc_tx_point_call_data_frag);
}
rxrpc_tx_backoff(call, ret);
-
- up_write(&conn->local->defrag_sem);
goto done;
}
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 750158a085cd..682636d3b060 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -55,7 +55,6 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
unsigned long timeout = 0;
rxrpc_seq_t acks_hard_ack;
char lbuff[50], rbuff[50];
- u64 wtmp;
if (v == &rxnet->calls) {
seq_puts(seq,
@@ -83,7 +82,6 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
}
acks_hard_ack = READ_ONCE(call->acks_hard_ack);
- wtmp = atomic64_read_acquire(&call->ackr_window);
seq_printf(seq,
"UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
" %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n",
@@ -98,7 +96,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
call->abort_code,
call->debug_id,
acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
- lower_32_bits(wtmp), upper_32_bits(wtmp) - lower_32_bits(wtmp),
+ call->ackr_window, call->ackr_wtop - call->ackr_window,
call->rx_serial,
call->cong_cwnd,
timeout);
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index dd54ceee7bcc..a482f88c5fc5 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -40,12 +40,12 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
call->notify_rx(sk, call, call->user_call_ID);
spin_unlock(&call->notify_lock);
} else {
- write_lock(&rx->recvmsg_lock);
+ spin_lock(&rx->recvmsg_lock);
if (list_empty(&call->recvmsg_link)) {
rxrpc_get_call(call, rxrpc_call_get_notify_socket);
list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
}
- write_unlock(&rx->recvmsg_lock);
+ spin_unlock(&rx->recvmsg_lock);
if (!sock_flag(sk, SOCK_DEAD)) {
_debug("call %ps", sk->sk_data_ready);
@@ -95,7 +95,7 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
}
trace_rxrpc_recvdata(call, rxrpc_recvmsg_terminal,
- lower_32_bits(atomic64_read(&call->ackr_window)) - 1,
+ call->ackr_window - 1,
call->rx_pkt_offset, call->rx_pkt_len, ret);
return ret;
}
@@ -137,7 +137,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
/* Check to see if there's an ACK that needs sending. */
acked = atomic_add_return(call->rx_consumed - old_consumed,
&call->ackr_nr_consumed);
- if (acked > 2 &&
+ if (acked > 8 &&
!test_and_set_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
rxrpc_poke_call(call, rxrpc_call_poke_idle);
}
@@ -175,13 +175,13 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
rx_pkt_len = call->rx_pkt_len;
if (rxrpc_call_has_failed(call)) {
- seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
+ seq = call->ackr_window - 1;
ret = -EIO;
goto done;
}
if (test_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags)) {
- seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
+ seq = call->ackr_window - 1;
ret = 1;
goto done;
}
@@ -334,15 +334,28 @@ try_again:
/* Find the next call and dequeue it if we're not just peeking. If we
* do dequeue it, that comes with a ref that we will need to release.
+ * We also want to weed out calls that got requeued whilst we were
+ * shovelling data out.
*/
- write_lock(&rx->recvmsg_lock);
+ spin_lock(&rx->recvmsg_lock);
l = rx->recvmsg_q.next;
call = list_entry(l, struct rxrpc_call, recvmsg_link);
+
+ if (!rxrpc_call_is_complete(call) &&
+ skb_queue_empty(&call->recvmsg_queue)) {
+ list_del_init(&call->recvmsg_link);
+ spin_unlock(&rx->recvmsg_lock);
+ release_sock(&rx->sk);
+ trace_rxrpc_recvmsg(call->debug_id, rxrpc_recvmsg_unqueue, 0);
+ rxrpc_put_call(call, rxrpc_call_put_recvmsg);
+ goto try_again;
+ }
+
if (!(flags & MSG_PEEK))
list_del_init(&call->recvmsg_link);
else
rxrpc_get_call(call, rxrpc_call_get_recvmsg);
- write_unlock(&rx->recvmsg_lock);
+ spin_unlock(&rx->recvmsg_lock);
call_debug_id = call->debug_id;
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_dequeue, 0);
@@ -402,7 +415,8 @@ try_again:
if (rxrpc_call_has_failed(call))
goto call_failed;
- rxrpc_notify_socket(call);
+ if (!skb_queue_empty(&call->recvmsg_queue))
+ rxrpc_notify_socket(call);
goto not_yet_complete;
call_failed:
@@ -431,9 +445,9 @@ error_unlock_call:
error_requeue_call:
if (!(flags & MSG_PEEK)) {
- write_lock(&rx->recvmsg_lock);
+ spin_lock(&rx->recvmsg_lock);
list_add(&call->recvmsg_link, &rx->recvmsg_q);
- write_unlock(&rx->recvmsg_lock);
+ spin_unlock(&rx->recvmsg_lock);
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0);
} else {
rxrpc_put_call(call, rxrpc_call_put_recvmsg);
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index ebe0c75e7b07..3bcd6ee80396 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -63,7 +63,7 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
if (skb) {
int n = atomic_dec_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
- kfree_skb(skb);
+ consume_skb(skb);
}
}
@@ -78,6 +78,6 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
int n = atomic_dec_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n,
rxrpc_skb_put_purge);
- kfree_skb(skb);
+ consume_skb(skb);
}
}
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index cde3224a5cd2..ecaeb4ecfb58 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -17,6 +17,9 @@ static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = 255;
static const unsigned long one_jiffy = 1;
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+static const unsigned long max_500 = 500;
+#endif
/*
* RxRPC operating parameters.
@@ -63,6 +66,19 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.extra2 = (void *)&max_jiffies,
},
+ /* Values used in milliseconds */
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+ {
+ .procname = "inject_rx_delay",
+ .data = &rxrpc_inject_rx_delay,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = (void *)SYSCTL_LONG_ZERO,
+ .extra2 = (void *)&max_500,
+ },
+#endif
+
/* Non-time values */
{
.procname = "reap_client_conns",
@@ -109,7 +125,6 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.extra1 = (void *)SYSCTL_ONE,
.extra2 = (void *)&four,
},
-
{ }
};
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index d2cf2aac3adb..d43be8512386 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -110,12 +110,8 @@ void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call)
_enter("%x/%x/%x", call->tx_bottom, call->acks_hard_ack, call->tx_top);
- for (;;) {
- spin_lock(&call->tx_lock);
- txb = list_first_entry_or_null(&call->tx_buffer,
- struct rxrpc_txbuf, call_link);
- if (!txb)
- break;
+ while ((txb = list_first_entry_or_null(&call->tx_buffer,
+ struct rxrpc_txbuf, call_link))) {
hard_ack = smp_load_acquire(&call->acks_hard_ack);
if (before(hard_ack, txb->seq))
break;
@@ -128,15 +124,11 @@ void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call)
trace_rxrpc_txqueue(call, rxrpc_txqueue_dequeue);
- spin_unlock(&call->tx_lock);
-
rxrpc_put_txbuf(txb, rxrpc_txbuf_put_rotated);
if (after(call->acks_hard_ack, call->tx_bottom + 128))
wake = true;
}
- spin_unlock(&call->tx_lock);
-
if (wake)
wake_up(&call->waitq);
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 777d6b50505c..4b95cb1ac435 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -45,23 +45,6 @@ if NET_SCHED
comment "Queueing/Scheduling"
-config NET_SCH_CBQ
- tristate "Class Based Queueing (CBQ)"
- help
- Say Y here if you want to use the Class-Based Queueing (CBQ) packet
- scheduling algorithm. This algorithm classifies the waiting packets
- into a tree-like hierarchy of classes; the leaves of this tree are
- in turn scheduled by separate algorithms.
-
- See the top of <file:net/sched/sch_cbq.c> for more details.
-
- CBQ is a commonly used scheduler, so if you're unsure, you should
- say Y here. Then say Y to all the queueing algorithms below that you
- want to use as leaf disciplines.
-
- To compile this code as a module, choose M here: the
- module will be called sch_cbq.
-
config NET_SCH_HTB
tristate "Hierarchical Token Bucket (HTB)"
help
@@ -85,20 +68,6 @@ config NET_SCH_HFSC
To compile this code as a module, choose M here: the
module will be called sch_hfsc.
-config NET_SCH_ATM
- tristate "ATM Virtual Circuits (ATM)"
- depends on ATM
- help
- Say Y here if you want to use the ATM pseudo-scheduler. This
- provides a framework for invoking classifiers, which in turn
- select classes of this queuing discipline. Each class maps
- the flow(s) it is handling to a given virtual circuit.
-
- See the top of <file:net/sched/sch_atm.c> for more details.
-
- To compile this code as a module, choose M here: the
- module will be called sch_atm.
-
config NET_SCH_PRIO
tristate "Multi Band Priority Queueing (PRIO)"
help
@@ -195,8 +164,14 @@ config NET_SCH_ETF
To compile this code as a module, choose M here: the
module will be called sch_etf.
+config NET_SCH_MQPRIO_LIB
+ tristate
+ help
+ Common library for manipulating mqprio queue configurations.
+
config NET_SCH_TAPRIO
tristate "Time Aware Priority (taprio) Scheduler"
+ select NET_SCH_MQPRIO_LIB
help
Say Y here if you want to use the Time Aware Priority (taprio) packet
scheduling algorithm.
@@ -217,17 +192,6 @@ config NET_SCH_GRED
To compile this code as a module, choose M here: the
module will be called sch_gred.
-config NET_SCH_DSMARK
- tristate "Differentiated Services marker (DSMARK)"
- help
- Say Y if you want to schedule packets according to the
- Differentiated Services architecture proposed in RFC 2475.
- Technical information on this method, with pointers to associated
- RFCs, is available at <http://www.gta.ufrj.br/diffserv/>.
-
- To compile this code as a module, choose M here: the
- module will be called sch_dsmark.
-
config NET_SCH_NETEM
tristate "Network emulator (NETEM)"
help
@@ -253,6 +217,7 @@ config NET_SCH_DRR
config NET_SCH_MQPRIO
tristate "Multi-queue priority scheduler (MQPRIO)"
+ select NET_SCH_MQPRIO_LIB
help
Say Y here if you want to use the Multi-queue Priority scheduler.
This scheduler allows QOS to be offloaded on NICs that have support
@@ -337,7 +302,7 @@ config NET_SCH_FQ
Say Y here if you want to use the FQ packet scheduling algorithm.
FQ does flow separation, and is able to respect pacing requirements
- set by TCP stack into sk->sk_pacing_rate (for localy generated
+ set by TCP stack into sk->sk_pacing_rate (for locally generated
traffic)
To compile this driver as a module, choose M here: the module
@@ -503,17 +468,6 @@ config NET_CLS_BASIC
To compile this code as a module, choose M here: the
module will be called cls_basic.
-config NET_CLS_TCINDEX
- tristate "Traffic-Control Index (TCINDEX)"
- select NET_CLS
- help
- Say Y here if you want to be able to classify packets based on
- traffic control indices. You will want this feature if you want
- to implement Differentiated Services together with DSMARK.
-
- To compile this code as a module, choose M here: the
- module will be called cls_tcindex.
-
config NET_CLS_ROUTE4
tristate "Routing decision (ROUTE)"
depends on INET
@@ -559,34 +513,6 @@ config CLS_U32_MARK
help
Say Y here to be able to use netfilter marks as u32 key.
-config NET_CLS_RSVP
- tristate "IPv4 Resource Reservation Protocol (RSVP)"
- select NET_CLS
- help
- The Resource Reservation Protocol (RSVP) permits end systems to
- request a minimum and maximum data flow rate for a connection; this
- is important for real time data such as streaming sound or video.
-
- Say Y here if you want to be able to classify outgoing packets based
- on their RSVP requests.
-
- To compile this code as a module, choose M here: the
- module will be called cls_rsvp.
-
-config NET_CLS_RSVP6
- tristate "IPv6 Resource Reservation Protocol (RSVP6)"
- select NET_CLS
- help
- The Resource Reservation Protocol (RSVP) permits end systems to
- request a minimum and maximum data flow rate for a connection; this
- is important for real time data such as streaming sound or video.
-
- Say Y here if you want to be able to classify outgoing packets based
- on their RSVP requests and you are using the IPv6 protocol.
-
- To compile this code as a module, choose M here: the
- module will be called cls_rsvp6.
-
config NET_CLS_FLOW
tristate "Flow classifier"
select NET_CLS
@@ -977,6 +903,7 @@ config NET_ACT_TUNNEL_KEY
config NET_ACT_CT
tristate "connection tracking tc action"
depends on NET_CLS_ACT && NF_CONNTRACK && (!NF_NAT || NF_NAT) && NF_FLOW_TABLE
+ select NF_CONNTRACK_OVS
select NF_NAT_OVS if NF_NAT
help
Say Y here to allow sending the packets to conntrack module.
diff --git a/net/sched/Makefile b/net/sched/Makefile
index dd14ef413fda..b5fd49641d91 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -33,25 +33,23 @@ obj-$(CONFIG_NET_ACT_TUNNEL_KEY)+= act_tunnel_key.o
obj-$(CONFIG_NET_ACT_CT) += act_ct.o
obj-$(CONFIG_NET_ACT_GATE) += act_gate.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
-obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o
obj-$(CONFIG_NET_SCH_RED) += sch_red.o
obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o
obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
-obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
-obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
obj-$(CONFIG_NET_SCH_ETS) += sch_ets.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
+obj-$(CONFIG_NET_SCH_MQPRIO_LIB) += sch_mqprio_lib.o
obj-$(CONFIG_NET_SCH_SKBPRIO) += sch_skbprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
@@ -69,9 +67,6 @@ obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
obj-$(CONFIG_NET_CLS_FW) += cls_fw.o
-obj-$(CONFIG_NET_CLS_RSVP) += cls_rsvp.o
-obj-$(CONFIG_NET_CLS_TCINDEX) += cls_tcindex.o
-obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o
obj-$(CONFIG_NET_CLS_BASIC) += cls_basic.o
obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o
obj-$(CONFIG_NET_CLS_CGROUP) += cls_cgroup.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 5b3c0ac495be..fce522886099 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -125,7 +125,7 @@ static void free_tcf(struct tc_action *p)
free_percpu(p->cpu_bstats_hw);
free_percpu(p->cpu_qstats);
- tcf_set_action_cookie(&p->act_cookie, NULL);
+ tcf_set_action_cookie(&p->user_cookie, NULL);
if (chain)
tcf_chain_put_by_act(chain);
@@ -169,11 +169,6 @@ static bool tc_act_skip_sw(u32 flags)
return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
}
-static bool tc_act_in_hw(struct tc_action *act)
-{
- return !!act->in_hw_count;
-}
-
/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
static bool tc_act_flags_valid(u32 flags)
{
@@ -192,6 +187,7 @@ static int offload_action_init(struct flow_offload_action *fl_action,
fl_action->extack = extack;
fl_action->command = cmd;
fl_action->index = act->tcfa_index;
+ fl_action->cookie = (unsigned long)act;
if (act->ops->offload_act_setup) {
spin_lock_bh(&act->tcfa_lock);
@@ -272,7 +268,7 @@ static int tcf_action_offload_add_ex(struct tc_action *action,
if (err)
goto fl_err;
- err = tc_setup_action(&fl_action->action, actions, extack);
+ err = tc_setup_action(&fl_action->action, actions, 0, extack);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to setup tc actions for offload");
@@ -307,9 +303,6 @@ int tcf_action_update_hw_stats(struct tc_action *action)
struct flow_offload_action fl_act = {};
int err;
- if (!tc_act_in_hw(action))
- return -EOPNOTSUPP;
-
err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
if (err)
return err;
@@ -438,14 +431,14 @@ EXPORT_SYMBOL(tcf_idr_release);
static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
{
- struct tc_cookie *act_cookie;
+ struct tc_cookie *user_cookie;
u32 cookie_len = 0;
rcu_read_lock();
- act_cookie = rcu_dereference(act->act_cookie);
+ user_cookie = rcu_dereference(act->user_cookie);
- if (act_cookie)
- cookie_len = nla_total_size(act_cookie->len);
+ if (user_cookie)
+ cookie_len = nla_total_size(user_cookie->len);
rcu_read_unlock();
return nla_total_size(0) /* action number nested */
@@ -495,7 +488,7 @@ tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
goto nla_put_failure;
rcu_read_lock();
- cookie = rcu_dereference(a->act_cookie);
+ cookie = rcu_dereference(a->user_cookie);
if (cookie) {
if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
rcu_read_unlock();
@@ -539,6 +532,8 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
(unsigned long)p->tcfa_tm.lastuse))
continue;
+ tcf_action_update_hw_stats(p);
+
nest = nla_nest_start_noflag(skb, n_i);
if (!nest) {
index--;
@@ -1367,9 +1362,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
{
bool police = flags & TCA_ACT_FLAGS_POLICE;
struct nla_bitfield32 userflags = { 0, 0 };
+ struct tc_cookie *user_cookie = NULL;
u8 hw_stats = TCA_ACT_HW_STATS_ANY;
struct nlattr *tb[TCA_ACT_MAX + 1];
- struct tc_cookie *cookie = NULL;
struct tc_action *a;
int err;
@@ -1380,8 +1375,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
if (err < 0)
return ERR_PTR(err);
if (tb[TCA_ACT_COOKIE]) {
- cookie = nla_memdup_cookie(tb);
- if (!cookie) {
+ user_cookie = nla_memdup_cookie(tb);
+ if (!user_cookie) {
NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
err = -ENOMEM;
goto err_out;
@@ -1407,7 +1402,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
*init_res = err;
if (!police && tb[TCA_ACT_COOKIE])
- tcf_set_action_cookie(&a->act_cookie, cookie);
+ tcf_set_action_cookie(&a->user_cookie, user_cookie);
if (!police)
a->hw_stats = hw_stats;
@@ -1415,9 +1410,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
return a;
err_out:
- if (cookie) {
- kfree(cookie->data);
- kfree(cookie);
+ if (user_cookie) {
+ kfree(user_cookie->data);
+ kfree(user_cookie);
}
return ERR_PTR(err);
}
@@ -1539,9 +1534,6 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
if (p == NULL)
goto errout;
- /* update hw stats for this action */
- tcf_action_update_hw_stats(p);
-
/* compat_mode being true specifies a call that is supposed
* to add additional backward compatibility statistic TLVs.
*/
@@ -1582,7 +1574,7 @@ errout:
static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
u32 portid, u32 seq, u16 flags, int event, int bind,
- int ref)
+ int ref, struct netlink_ext_ack *extack)
{
struct tcamsg *t;
struct nlmsghdr *nlh;
@@ -1606,7 +1598,12 @@ static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
nla_nest_end(skb, nest);
+ if (extack && extack->_msg &&
+ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
+ goto out_nlmsg_trim;
+
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+
return skb->len;
out_nlmsg_trim:
@@ -1625,7 +1622,7 @@ tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
if (!skb)
return -ENOBUFS;
if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
- 0, 1) <= 0) {
+ 0, 1, NULL) <= 0) {
NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
kfree_skb(skb);
return -EINVAL;
@@ -1799,7 +1796,7 @@ tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
if (!skb)
return -ENOBUFS;
- if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) {
+ if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1, NULL) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -1886,7 +1883,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
return -ENOBUFS;
if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
- 0, 2) <= 0) {
+ 0, 2, extack) <= 0) {
NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
kfree_skb(skb);
return -EINVAL;
@@ -1965,7 +1962,7 @@ tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
return -ENOBUFS;
if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
- RTM_NEWACTION, 0, 0) <= 0) {
+ RTM_NEWACTION, 0, 0, extack) <= 0) {
NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
kfree_skb(skb);
return -EINVAL;
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 7e63ff7e3ed7..8dabfb52ea3d 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -36,13 +36,15 @@ TC_INDIRECT_SCOPE int tcf_connmark_act(struct sk_buff *skb,
struct nf_conntrack_tuple tuple;
enum ip_conntrack_info ctinfo;
struct tcf_connmark_info *ca = to_connmark(a);
+ struct tcf_connmark_parms *parms;
struct nf_conntrack_zone zone;
struct nf_conn *c;
int proto;
- spin_lock(&ca->tcf_lock);
tcf_lastuse_update(&ca->tcf_tm);
- bstats_update(&ca->tcf_bstats, skb);
+ tcf_action_update_bstats(&ca->common, skb);
+
+ parms = rcu_dereference_bh(ca->parms);
switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
@@ -64,31 +66,29 @@ TC_INDIRECT_SCOPE int tcf_connmark_act(struct sk_buff *skb,
c = nf_ct_get(skb, &ctinfo);
if (c) {
skb->mark = READ_ONCE(c->mark);
- /* using overlimits stats to count how many packets marked */
- ca->tcf_qstats.overlimits++;
- goto out;
+ goto count;
}
- if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
- proto, ca->net, &tuple))
+ if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, parms->net,
+ &tuple))
goto out;
- zone.id = ca->zone;
+ zone.id = parms->zone;
zone.dir = NF_CT_DEFAULT_ZONE_DIR;
- thash = nf_conntrack_find_get(ca->net, &zone, &tuple);
+ thash = nf_conntrack_find_get(parms->net, &zone, &tuple);
if (!thash)
goto out;
c = nf_ct_tuplehash_to_ctrack(thash);
- /* using overlimits stats to count how many packets marked */
- ca->tcf_qstats.overlimits++;
skb->mark = READ_ONCE(c->mark);
nf_ct_put(c);
+count:
+ /* using overlimits stats to count how many packets marked */
+ tcf_action_inc_overlimit_qstats(&ca->common);
out:
- spin_unlock(&ca->tcf_lock);
- return ca->tcf_action;
+ return READ_ONCE(ca->tcf_action);
}
static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
@@ -101,6 +101,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, act_connmark_ops.net_id);
+ struct tcf_connmark_parms *nparms, *oparms;
struct nlattr *tb[TCA_CONNMARK_MAX + 1];
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct tcf_chain *goto_ch = NULL;
@@ -120,52 +121,66 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
if (!tb[TCA_CONNMARK_PARMS])
return -EINVAL;
+ nparms = kzalloc(sizeof(*nparms), GFP_KERNEL);
+ if (!nparms)
+ return -ENOMEM;
+
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
index = parm->index;
ret = tcf_idr_check_alloc(tn, &index, a, bind);
if (!ret) {
- ret = tcf_idr_create(tn, index, est, a,
- &act_connmark_ops, bind, false, flags);
+ ret = tcf_idr_create_from_flags(tn, index, est, a,
+ &act_connmark_ops, bind, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
- return ret;
+ err = ret;
+ goto out_free;
}
ci = to_connmark(*a);
- err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
- extack);
- if (err < 0)
- goto release_idr;
- tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- ci->net = net;
- ci->zone = parm->zone;
+
+ nparms->net = net;
+ nparms->zone = parm->zone;
ret = ACT_P_CREATED;
} else if (ret > 0) {
ci = to_connmark(*a);
- if (bind)
- return 0;
- if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
- tcf_idr_release(*a, bind);
- return -EEXIST;
+ if (bind) {
+ err = 0;
+ goto out_free;
}
- err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
- extack);
- if (err < 0)
+ if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
+ err = -EEXIST;
goto release_idr;
- /* replacing action and zone */
- spin_lock_bh(&ci->tcf_lock);
- goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- ci->zone = parm->zone;
- spin_unlock_bh(&ci->tcf_lock);
- if (goto_ch)
- tcf_chain_put_by_act(goto_ch);
+ }
+
+ nparms->net = rtnl_dereference(ci->parms)->net;
+ nparms->zone = parm->zone;
+
ret = 0;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
+
+ spin_lock_bh(&ci->tcf_lock);
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+ oparms = rcu_replace_pointer(ci->parms, nparms, lockdep_is_held(&ci->tcf_lock));
+ spin_unlock_bh(&ci->tcf_lock);
+
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+ if (oparms)
+ kfree_rcu(oparms, rcu);
+
return ret;
+
release_idr:
tcf_idr_release(*a, bind);
+out_free:
+ kfree(nparms);
return err;
}
@@ -179,11 +194,14 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
.refcnt = refcount_read(&ci->tcf_refcnt) - ref,
.bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
};
+ struct tcf_connmark_parms *parms;
struct tcf_t t;
spin_lock_bh(&ci->tcf_lock);
+ parms = rcu_dereference_protected(ci->parms, lockdep_is_held(&ci->tcf_lock));
+
opt.action = ci->tcf_action;
- opt.zone = ci->zone;
+ opt.zone = parms->zone;
if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -201,6 +219,16 @@ nla_put_failure:
return -1;
}
+static void tcf_connmark_cleanup(struct tc_action *a)
+{
+ struct tcf_connmark_info *ci = to_connmark(a);
+ struct tcf_connmark_parms *parms;
+
+ parms = rcu_dereference_protected(ci->parms, 1);
+ if (parms)
+ kfree_rcu(parms, rcu);
+}
+
static struct tc_action_ops act_connmark_ops = {
.kind = "connmark",
.id = TCA_ID_CONNMARK,
@@ -208,6 +236,7 @@ static struct tc_action_ops act_connmark_ops = {
.act = tcf_connmark_act,
.dump = tcf_connmark_dump,
.init = tcf_connmark_init,
+ .cleanup = tcf_connmark_cleanup,
.size = sizeof(struct tcf_connmark_info),
};
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 0ca2bb8ed026..9cc0bc7c71ed 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -170,11 +170,11 @@ tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
enum ip_conntrack_dir dir,
+ enum ip_conntrack_info ctinfo,
struct flow_action *action)
{
struct nf_conn_labels *ct_labels;
struct flow_action_entry *entry;
- enum ip_conntrack_info ctinfo;
u32 *act_ct_labels;
entry = tcf_ct_flow_table_flow_action_get_next(action);
@@ -182,8 +182,6 @@ static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
entry->ct_metadata.mark = READ_ONCE(ct->mark);
#endif
- ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
- IP_CT_ESTABLISHED_REPLY;
/* aligns with the CT reference on the SKB nf_ct_set */
entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
@@ -237,22 +235,28 @@ static int tcf_ct_flow_table_add_action_nat(struct net *net,
}
static int tcf_ct_flow_table_fill_actions(struct net *net,
- const struct flow_offload *flow,
+ struct flow_offload *flow,
enum flow_offload_tuple_dir tdir,
struct nf_flow_rule *flow_rule)
{
struct flow_action *action = &flow_rule->rule->action;
int num_entries = action->num_entries;
struct nf_conn *ct = flow->ct;
+ enum ip_conntrack_info ctinfo;
enum ip_conntrack_dir dir;
int i, err;
switch (tdir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
dir = IP_CT_DIR_ORIGINAL;
+ ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
+ IP_CT_ESTABLISHED : IP_CT_NEW;
+ if (ctinfo == IP_CT_ESTABLISHED)
+ set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
break;
case FLOW_OFFLOAD_DIR_REPLY:
dir = IP_CT_DIR_REPLY;
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
break;
default:
return -EOPNOTSUPP;
@@ -262,7 +266,7 @@ static int tcf_ct_flow_table_fill_actions(struct net *net,
if (err)
goto err_nat;
- tcf_ct_flow_table_add_action_meta(ct, dir, action);
+ tcf_ct_flow_table_add_action_meta(ct, dir, ctinfo, action);
return 0;
err_nat:
@@ -365,7 +369,7 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
struct nf_conn *ct,
- bool tcp)
+ bool tcp, bool bidirectional)
{
struct nf_conn_act_ct_ext *act_ct_ext;
struct flow_offload *entry;
@@ -384,6 +388,8 @@ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
}
+ if (bidirectional)
+ __set_bit(NF_FLOW_HW_BIDIRECTIONAL, &entry->flags);
act_ct_ext = nf_conn_act_ct_ext_find(ct);
if (act_ct_ext) {
@@ -407,26 +413,34 @@ static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
- bool tcp = false;
-
- if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) ||
- !test_bit(IPS_ASSURED_BIT, &ct->status))
- return;
+ bool tcp = false, bidirectional = true;
switch (nf_ct_protonum(ct)) {
case IPPROTO_TCP:
- tcp = true;
- if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+ if ((ctinfo != IP_CT_ESTABLISHED &&
+ ctinfo != IP_CT_ESTABLISHED_REPLY) ||
+ !test_bit(IPS_ASSURED_BIT, &ct->status) ||
+ ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
return;
+
+ tcp = true;
break;
case IPPROTO_UDP:
+ if (!nf_ct_is_confirmed(ct))
+ return;
+ if (!test_bit(IPS_ASSURED_BIT, &ct->status))
+ bidirectional = false;
break;
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE: {
struct nf_conntrack_tuple *tuple;
- if (ct->status & IPS_NAT_MASK)
+ if ((ctinfo != IP_CT_ESTABLISHED &&
+ ctinfo != IP_CT_ESTABLISHED_REPLY) ||
+ !test_bit(IPS_ASSURED_BIT, &ct->status) ||
+ ct->status & IPS_NAT_MASK)
return;
+
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
/* No support for GRE v1 */
if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
@@ -442,7 +456,7 @@ static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
ct->status & IPS_SEQ_ADJUST)
return;
- tcf_ct_flow_table_add(ct_ft, ct, tcp);
+ tcf_ct_flow_table_add(ct_ft, ct, tcp, bidirectional);
}
static bool
@@ -621,13 +635,30 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
ct = flow->ct;
+ if (dir == FLOW_OFFLOAD_DIR_REPLY &&
+ !test_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags)) {
+ /* Only offload reply direction after connection became
+ * assured.
+ */
+ if (test_bit(IPS_ASSURED_BIT, &ct->status))
+ set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
+ else if (test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags))
+ /* If flow_table flow has already been updated to the
+ * established state, then don't refresh.
+ */
+ return false;
+ }
+
if (tcph && (unlikely(tcph->fin || tcph->rst))) {
flow_offload_teardown(flow);
return false;
}
- ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
- IP_CT_ESTABLISHED_REPLY;
+ if (dir == FLOW_OFFLOAD_DIR_ORIGINAL)
+ ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
+ IP_CT_ESTABLISHED : IP_CT_NEW;
+ else
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
flow_offload_refresh(nf_ft, flow);
nf_conntrack_get(&ct->ct_general);
@@ -695,31 +726,6 @@ drop_ct:
return false;
}
-/* Trim the skb to the length specified by the IP/IPv6 header,
- * removing any trailing lower-layer padding. This prepares the skb
- * for higher-layer processing that assumes skb->len excludes padding
- * (such as nf_ip_checksum). The caller needs to pull the skb to the
- * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
- */
-static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
-{
- unsigned int len;
-
- switch (family) {
- case NFPROTO_IPV4:
- len = ntohs(ip_hdr(skb)->tot_len);
- break;
- case NFPROTO_IPV6:
- len = sizeof(struct ipv6hdr)
- + ntohs(ipv6_hdr(skb)->payload_len);
- break;
- default:
- len = skb->len;
- }
-
- return pskb_trim_rcsum(skb, len);
-}
-
static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
{
u8 family = NFPROTO_UNSPEC;
@@ -779,6 +785,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
struct nf_conn *ct;
int err = 0;
bool frag;
+ u8 proto;
u16 mru;
/* Previously seen (loopback)? Ignore. */
@@ -794,50 +801,14 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
return err;
skb_get(skb);
- mru = tc_skb_cb(skb)->mru;
-
- if (family == NFPROTO_IPV4) {
- enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
-
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
- local_bh_disable();
- err = ip_defrag(net, skb, user);
- local_bh_enable();
- if (err && err != -EINPROGRESS)
- return err;
-
- if (!err) {
- *defrag = true;
- mru = IPCB(skb)->frag_max_size;
- }
- } else { /* NFPROTO_IPV6 */
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
- enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
-
- memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
- err = nf_ct_frag6_gather(net, skb, user);
- if (err && err != -EINPROGRESS)
- goto out_free;
-
- if (!err) {
- *defrag = true;
- mru = IP6CB(skb)->frag_max_size;
- }
-#else
- err = -EOPNOTSUPP;
- goto out_free;
-#endif
- }
+ err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru);
+ if (err)
+ return err;
- if (err != -EINPROGRESS)
- tc_skb_cb(skb)->mru = mru;
- skb_clear_hash(skb);
- skb->ignore_df = 1;
- return err;
+ *defrag = true;
+ tc_skb_cb(skb)->mru = mru;
-out_free:
- kfree_skb(skb);
- return err;
+ return 0;
}
static void tcf_ct_params_free(struct tcf_ct_params *params)
@@ -980,7 +951,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
if (err)
goto drop;
- err = tcf_ct_skb_network_trim(skb, family);
+ err = nf_ct_skb_network_trim(skb, family);
if (err)
goto drop;
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index 9b8def0be41e..c9a811f4c7ee 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -119,35 +119,37 @@ TC_INDIRECT_SCOPE int tcf_gate_act(struct sk_buff *skb,
struct tcf_result *res)
{
struct tcf_gate *gact = to_gate(a);
-
- spin_lock(&gact->tcf_lock);
+ int action = READ_ONCE(gact->tcf_action);
tcf_lastuse_update(&gact->tcf_tm);
- bstats_update(&gact->tcf_bstats, skb);
+ tcf_action_update_bstats(&gact->common, skb);
+ spin_lock(&gact->tcf_lock);
if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) {
spin_unlock(&gact->tcf_lock);
- return gact->tcf_action;
+ return action;
}
- if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN))
+ if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN)) {
+ spin_unlock(&gact->tcf_lock);
goto drop;
+ }
if (gact->current_max_octets >= 0) {
gact->current_entry_octets += qdisc_pkt_len(skb);
if (gact->current_entry_octets > gact->current_max_octets) {
- gact->tcf_qstats.overlimits++;
- goto drop;
+ spin_unlock(&gact->tcf_lock);
+ goto overlimit;
}
}
-
spin_unlock(&gact->tcf_lock);
- return gact->tcf_action;
-drop:
- gact->tcf_qstats.drops++;
- spin_unlock(&gact->tcf_lock);
+ return action;
+overlimit:
+ tcf_action_inc_overlimit_qstats(&gact->common);
+drop:
+ tcf_action_inc_drop_qstats(&gact->common);
return TC_ACT_SHOT;
}
@@ -357,8 +359,8 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
return 0;
if (!err) {
- ret = tcf_idr_create(tn, index, est, a,
- &act_gate_ops, bind, false, flags);
+ ret = tcf_idr_create_from_flags(tn, index, est, a,
+ &act_gate_ops, bind, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 7284bcea7b0b..8037ec9b1d31 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -29,8 +29,8 @@
static LIST_HEAD(mirred_list);
static DEFINE_SPINLOCK(mirred_list_lock);
-#define MIRRED_RECURSION_LIMIT 4
-static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
+#define MIRRED_NEST_LIMIT 4
+static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
static bool tcf_mirred_is_act_redirect(int action)
{
@@ -206,12 +206,19 @@ release_idr:
return err;
}
+static bool is_mirred_nested(void)
+{
+ return unlikely(__this_cpu_read(mirred_nest_level) > 1);
+}
+
static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
{
int err;
if (!want_ingress)
err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
+ else if (is_mirred_nested())
+ err = netif_rx(skb);
else
err = netif_receive_skb(skb);
@@ -226,7 +233,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
struct sk_buff *skb2 = skb;
bool m_mac_header_xmit;
struct net_device *dev;
- unsigned int rec_level;
+ unsigned int nest_level;
int retval, err = 0;
bool use_reinsert;
bool want_ingress;
@@ -237,11 +244,11 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
int mac_len;
bool at_nh;
- rec_level = __this_cpu_inc_return(mirred_rec_level);
- if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
+ nest_level = __this_cpu_inc_return(mirred_nest_level);
+ if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
netdev_name(skb->dev));
- __this_cpu_dec(mirred_rec_level);
+ __this_cpu_dec(mirred_nest_level);
return TC_ACT_SHOT;
}
@@ -310,7 +317,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
err = tcf_mirred_forward(want_ingress, skb);
if (err)
tcf_action_inc_overlimit_qstats(&m->common);
- __this_cpu_dec(mirred_rec_level);
+ __this_cpu_dec(mirred_nest_level);
return TC_ACT_CONSUMED;
}
}
@@ -322,7 +329,7 @@ out:
if (tcf_mirred_is_act_redirect(m_eaction))
retval = TC_ACT_SHOT;
}
- __this_cpu_dec(mirred_rec_level);
+ __this_cpu_dec(mirred_nest_level);
return retval;
}
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 74c74be33048..4184af5abbf3 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -38,6 +38,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
{
struct tc_action_net *tn = net_generic(net, act_nat_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
+ struct tcf_nat_parms *nparm, *oparm;
struct nlattr *tb[TCA_NAT_MAX + 1];
struct tcf_chain *goto_ch = NULL;
struct tc_nat *parm;
@@ -59,8 +60,8 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, index, est, a,
- &act_nat_ops, bind, false, flags);
+ ret = tcf_idr_create_from_flags(tn, index, est, a, &act_nat_ops,
+ bind, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -79,19 +80,31 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
if (err < 0)
goto release_idr;
+
+ nparm = kzalloc(sizeof(*nparm), GFP_KERNEL);
+ if (!nparm) {
+ err = -ENOMEM;
+ goto release_idr;
+ }
+
+ nparm->old_addr = parm->old_addr;
+ nparm->new_addr = parm->new_addr;
+ nparm->mask = parm->mask;
+ nparm->flags = parm->flags;
+
p = to_tcf_nat(*a);
spin_lock_bh(&p->tcf_lock);
- p->old_addr = parm->old_addr;
- p->new_addr = parm->new_addr;
- p->mask = parm->mask;
- p->flags = parm->flags;
-
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+ oparm = rcu_replace_pointer(p->parms, nparm, lockdep_is_held(&p->tcf_lock));
spin_unlock_bh(&p->tcf_lock);
+
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
+ if (oparm)
+ kfree_rcu(oparm, rcu);
+
return ret;
release_idr:
tcf_idr_release(*a, bind);
@@ -103,6 +116,7 @@ TC_INDIRECT_SCOPE int tcf_nat_act(struct sk_buff *skb,
struct tcf_result *res)
{
struct tcf_nat *p = to_tcf_nat(a);
+ struct tcf_nat_parms *parms;
struct iphdr *iph;
__be32 old_addr;
__be32 new_addr;
@@ -113,18 +127,16 @@ TC_INDIRECT_SCOPE int tcf_nat_act(struct sk_buff *skb,
int ihl;
int noff;
- spin_lock(&p->tcf_lock);
-
tcf_lastuse_update(&p->tcf_tm);
- old_addr = p->old_addr;
- new_addr = p->new_addr;
- mask = p->mask;
- egress = p->flags & TCA_NAT_FLAG_EGRESS;
- action = p->tcf_action;
+ tcf_action_update_bstats(&p->common, skb);
- bstats_update(&p->tcf_bstats, skb);
+ action = READ_ONCE(p->tcf_action);
- spin_unlock(&p->tcf_lock);
+ parms = rcu_dereference_bh(p->parms);
+ old_addr = parms->old_addr;
+ new_addr = parms->new_addr;
+ mask = parms->mask;
+ egress = parms->flags & TCA_NAT_FLAG_EGRESS;
if (unlikely(action == TC_ACT_SHOT))
goto drop;
@@ -248,9 +260,7 @@ out:
return action;
drop:
- spin_lock(&p->tcf_lock);
- p->tcf_qstats.drops++;
- spin_unlock(&p->tcf_lock);
+ tcf_action_inc_drop_qstats(&p->common);
return TC_ACT_SHOT;
}
@@ -264,15 +274,20 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
.refcnt = refcount_read(&p->tcf_refcnt) - ref,
.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
};
+ struct tcf_nat_parms *parms;
struct tcf_t t;
spin_lock_bh(&p->tcf_lock);
- opt.old_addr = p->old_addr;
- opt.new_addr = p->new_addr;
- opt.mask = p->mask;
- opt.flags = p->flags;
+
opt.action = p->tcf_action;
+ parms = rcu_dereference_protected(p->parms, lockdep_is_held(&p->tcf_lock));
+
+ opt.old_addr = parms->old_addr;
+ opt.new_addr = parms->new_addr;
+ opt.mask = parms->mask;
+ opt.flags = parms->flags;
+
if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -289,6 +304,16 @@ nla_put_failure:
return -1;
}
+static void tcf_nat_cleanup(struct tc_action *a)
+{
+ struct tcf_nat *p = to_tcf_nat(a);
+ struct tcf_nat_parms *parms;
+
+ parms = rcu_dereference_protected(p->parms, 1);
+ if (parms)
+ kfree_rcu(parms, rcu);
+}
+
static struct tc_action_ops act_nat_ops = {
.kind = "nat",
.id = TCA_ID_NAT,
@@ -296,6 +321,7 @@ static struct tc_action_ops act_nat_ops = {
.act = tcf_nat_act,
.dump = tcf_nat_dump,
.init = tcf_nat_init,
+ .cleanup = tcf_nat_cleanup,
.size = sizeof(struct tcf_nat),
};
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0378e9f0121..77d288d384ae 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -134,6 +134,17 @@ nla_failure:
return -EINVAL;
}
+static void tcf_pedit_cleanup_rcu(struct rcu_head *head)
+{
+ struct tcf_pedit_parms *parms =
+ container_of(head, struct tcf_pedit_parms, rcu);
+
+ kfree(parms->tcfp_keys_ex);
+ kfree(parms->tcfp_keys);
+
+ kfree(parms);
+}
+
static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
struct tcf_proto *tp, u32 flags,
@@ -141,10 +152,9 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
{
struct tc_action_net *tn = net_generic(net, act_pedit_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
- struct nlattr *tb[TCA_PEDIT_MAX + 1];
struct tcf_chain *goto_ch = NULL;
- struct tc_pedit_key *keys = NULL;
- struct tcf_pedit_key_ex *keys_ex;
+ struct tcf_pedit_parms *oparms, *nparms;
+ struct nlattr *tb[TCA_PEDIT_MAX + 1];
struct tc_pedit *parm;
struct nlattr *pattr;
struct tcf_pedit *p;
@@ -181,18 +191,25 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
return -EINVAL;
}
- keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
- if (IS_ERR(keys_ex))
- return PTR_ERR(keys_ex);
+ nparms = kzalloc(sizeof(*nparms), GFP_KERNEL);
+ if (!nparms)
+ return -ENOMEM;
+
+ nparms->tcfp_keys_ex =
+ tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
+ if (IS_ERR(nparms->tcfp_keys_ex)) {
+ ret = PTR_ERR(nparms->tcfp_keys_ex);
+ goto out_free;
+ }
index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, index, est, a,
- &act_pedit_ops, bind, false, flags);
+ ret = tcf_idr_create_from_flags(tn, index, est, a,
+ &act_pedit_ops, bind, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
- goto out_free;
+ goto out_free_ex;
}
ret = ACT_P_CREATED;
} else if (err > 0) {
@@ -204,7 +221,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
}
} else {
ret = err;
- goto out_free;
+ goto out_free_ex;
}
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
@@ -212,48 +229,50 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
ret = err;
goto out_release;
}
- p = to_pedit(*a);
- spin_lock_bh(&p->tcf_lock);
- if (ret == ACT_P_CREATED ||
- (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys)) {
- keys = kmalloc(ksize, GFP_ATOMIC);
- if (!keys) {
- spin_unlock_bh(&p->tcf_lock);
- ret = -ENOMEM;
- goto put_chain;
- }
- kfree(p->tcfp_keys);
- p->tcfp_keys = keys;
- p->tcfp_nkeys = parm->nkeys;
+ nparms->tcfp_off_max_hint = 0;
+ nparms->tcfp_flags = parm->flags;
+ nparms->tcfp_nkeys = parm->nkeys;
+
+ nparms->tcfp_keys = kmalloc(ksize, GFP_KERNEL);
+ if (!nparms->tcfp_keys) {
+ ret = -ENOMEM;
+ goto put_chain;
}
- memcpy(p->tcfp_keys, parm->keys, ksize);
- p->tcfp_off_max_hint = 0;
- for (i = 0; i < p->tcfp_nkeys; ++i) {
- u32 cur = p->tcfp_keys[i].off;
+
+ memcpy(nparms->tcfp_keys, parm->keys, ksize);
+
+ for (i = 0; i < nparms->tcfp_nkeys; ++i) {
+ u32 cur = nparms->tcfp_keys[i].off;
/* sanitize the shift value for any later use */
- p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1,
- p->tcfp_keys[i].shift);
+ nparms->tcfp_keys[i].shift = min_t(size_t,
+ BITS_PER_TYPE(int) - 1,
+ nparms->tcfp_keys[i].shift);
/* The AT option can read a single byte, we can bound the actual
* value with uchar max.
*/
- cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift;
+ cur += (0xff & nparms->tcfp_keys[i].offmask) >> nparms->tcfp_keys[i].shift;
/* Each key touches 4 bytes starting from the computed offset */
- p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4);
+ nparms->tcfp_off_max_hint =
+ max(nparms->tcfp_off_max_hint, cur + 4);
}
- p->tcfp_flags = parm->flags;
+ p = to_pedit(*a);
+
+ spin_lock_bh(&p->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+ oparms = rcu_replace_pointer(p->parms, nparms, 1);
+ spin_unlock_bh(&p->tcf_lock);
- kfree(p->tcfp_keys_ex);
- p->tcfp_keys_ex = keys_ex;
+ if (oparms)
+ call_rcu(&oparms->rcu, tcf_pedit_cleanup_rcu);
- spin_unlock_bh(&p->tcf_lock);
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
+
return ret;
put_chain:
@@ -261,19 +280,22 @@ put_chain:
tcf_chain_put_by_act(goto_ch);
out_release:
tcf_idr_release(*a, bind);
+out_free_ex:
+ kfree(nparms->tcfp_keys_ex);
out_free:
- kfree(keys_ex);
+ kfree(nparms);
return ret;
-
}
static void tcf_pedit_cleanup(struct tc_action *a)
{
struct tcf_pedit *p = to_pedit(a);
- struct tc_pedit_key *keys = p->tcfp_keys;
+ struct tcf_pedit_parms *parms;
- kfree(keys);
- kfree(p->tcfp_keys_ex);
+ parms = rcu_dereference_protected(p->parms, 1);
+
+ if (parms)
+ call_rcu(&parms->rcu, tcf_pedit_cleanup_rcu);
}
static bool offset_valid(struct sk_buff *skb, int offset)
@@ -324,109 +346,105 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
const struct tc_action *a,
struct tcf_result *res)
{
+ enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
struct tcf_pedit *p = to_pedit(a);
+ struct tcf_pedit_key_ex *tkey_ex;
+ struct tcf_pedit_parms *parms;
+ struct tc_pedit_key *tkey;
u32 max_offset;
int i;
- spin_lock(&p->tcf_lock);
+ parms = rcu_dereference_bh(p->parms);
max_offset = (skb_transport_header_was_set(skb) ?
skb_transport_offset(skb) :
skb_network_offset(skb)) +
- p->tcfp_off_max_hint;
+ parms->tcfp_off_max_hint;
if (skb_ensure_writable(skb, min(skb->len, max_offset)))
- goto unlock;
+ goto done;
tcf_lastuse_update(&p->tcf_tm);
+ tcf_action_update_bstats(&p->common, skb);
- if (p->tcfp_nkeys > 0) {
- struct tc_pedit_key *tkey = p->tcfp_keys;
- struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex;
- enum pedit_header_type htype =
- TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
- enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
-
- for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
- u32 *ptr, hdata;
- int offset = tkey->off;
- int hoffset;
- u32 val;
- int rc;
-
- if (tkey_ex) {
- htype = tkey_ex->htype;
- cmd = tkey_ex->cmd;
-
- tkey_ex++;
- }
+ tkey = parms->tcfp_keys;
+ tkey_ex = parms->tcfp_keys_ex;
- rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
- if (rc) {
- pr_info("tc action pedit bad header type specified (0x%x)\n",
- htype);
- goto bad;
- }
+ for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) {
+ int offset = tkey->off;
+ u32 *ptr, hdata;
+ int hoffset;
+ u32 val;
+ int rc;
- if (tkey->offmask) {
- u8 *d, _d;
-
- if (!offset_valid(skb, hoffset + tkey->at)) {
- pr_info("tc action pedit 'at' offset %d out of bounds\n",
- hoffset + tkey->at);
- goto bad;
- }
- d = skb_header_pointer(skb, hoffset + tkey->at,
- sizeof(_d), &_d);
- if (!d)
- goto bad;
- offset += (*d & tkey->offmask) >> tkey->shift;
- }
+ if (tkey_ex) {
+ htype = tkey_ex->htype;
+ cmd = tkey_ex->cmd;
- if (offset % 4) {
- pr_info("tc action pedit offset must be on 32 bit boundaries\n");
- goto bad;
- }
+ tkey_ex++;
+ }
- if (!offset_valid(skb, hoffset + offset)) {
- pr_info("tc action pedit offset %d out of bounds\n",
- hoffset + offset);
- goto bad;
- }
+ rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
+ if (rc) {
+ pr_info("tc action pedit bad header type specified (0x%x)\n",
+ htype);
+ goto bad;
+ }
- ptr = skb_header_pointer(skb, hoffset + offset,
- sizeof(hdata), &hdata);
- if (!ptr)
- goto bad;
- /* just do it, baby */
- switch (cmd) {
- case TCA_PEDIT_KEY_EX_CMD_SET:
- val = tkey->val;
- break;
- case TCA_PEDIT_KEY_EX_CMD_ADD:
- val = (*ptr + tkey->val) & ~tkey->mask;
- break;
- default:
- pr_info("tc action pedit bad command (%d)\n",
- cmd);
+ if (tkey->offmask) {
+ u8 *d, _d;
+
+ if (!offset_valid(skb, hoffset + tkey->at)) {
+ pr_info("tc action pedit 'at' offset %d out of bounds\n",
+ hoffset + tkey->at);
goto bad;
}
+ d = skb_header_pointer(skb, hoffset + tkey->at,
+ sizeof(_d), &_d);
+ if (!d)
+ goto bad;
+ offset += (*d & tkey->offmask) >> tkey->shift;
+ }
- *ptr = ((*ptr & tkey->mask) ^ val);
- if (ptr == &hdata)
- skb_store_bits(skb, hoffset + offset, ptr, 4);
+ if (offset % 4) {
+ pr_info("tc action pedit offset must be on 32 bit boundaries\n");
+ goto bad;
}
- goto done;
- } else {
- WARN(1, "pedit BUG: index %d\n", p->tcf_index);
+ if (!offset_valid(skb, hoffset + offset)) {
+ pr_info("tc action pedit offset %d out of bounds\n",
+ hoffset + offset);
+ goto bad;
+ }
+
+ ptr = skb_header_pointer(skb, hoffset + offset,
+ sizeof(hdata), &hdata);
+ if (!ptr)
+ goto bad;
+ /* just do it, baby */
+ switch (cmd) {
+ case TCA_PEDIT_KEY_EX_CMD_SET:
+ val = tkey->val;
+ break;
+ case TCA_PEDIT_KEY_EX_CMD_ADD:
+ val = (*ptr + tkey->val) & ~tkey->mask;
+ break;
+ default:
+ pr_info("tc action pedit bad command (%d)\n",
+ cmd);
+ goto bad;
+ }
+
+ *ptr = ((*ptr & tkey->mask) ^ val);
+ if (ptr == &hdata)
+ skb_store_bits(skb, hoffset + offset, ptr, 4);
}
+ goto done;
+
bad:
- p->tcf_qstats.overlimits++;
+ tcf_action_inc_overlimit_qstats(&p->common);
done:
- bstats_update(&p->tcf_bstats, skb);
-unlock:
- spin_unlock(&p->tcf_lock);
return p->tcf_action;
}
@@ -445,30 +463,33 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_pedit *p = to_pedit(a);
+ struct tcf_pedit_parms *parms;
struct tc_pedit *opt;
struct tcf_t t;
int s;
- s = struct_size(opt, keys, p->tcfp_nkeys);
+ spin_lock_bh(&p->tcf_lock);
+ parms = rcu_dereference_protected(p->parms, 1);
+ s = struct_size(opt, keys, parms->tcfp_nkeys);
- /* netlink spinlocks held above us - must use ATOMIC */
opt = kzalloc(s, GFP_ATOMIC);
- if (unlikely(!opt))
+ if (unlikely(!opt)) {
+ spin_unlock_bh(&p->tcf_lock);
return -ENOBUFS;
+ }
- spin_lock_bh(&p->tcf_lock);
- memcpy(opt->keys, p->tcfp_keys, flex_array_size(opt, keys, p->tcfp_nkeys));
+ memcpy(opt->keys, parms->tcfp_keys,
+ flex_array_size(opt, keys, parms->tcfp_nkeys));
opt->index = p->tcf_index;
- opt->nkeys = p->tcfp_nkeys;
- opt->flags = p->tcfp_flags;
+ opt->nkeys = parms->tcfp_nkeys;
+ opt->flags = parms->tcfp_flags;
opt->action = p->tcf_action;
opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
- if (p->tcfp_keys_ex) {
- if (tcf_pedit_key_ex_dump(skb,
- p->tcfp_keys_ex,
- p->tcfp_nkeys))
+ if (parms->tcfp_keys_ex) {
+ if (tcf_pedit_key_ex_dump(skb, parms->tcfp_keys_ex,
+ parms->tcfp_nkeys))
goto nla_put_failure;
if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
@@ -522,7 +543,28 @@ static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data,
}
*index_inc = k;
} else {
- return -EOPNOTSUPP;
+ struct flow_offload_action *fl_action = entry_data;
+ u32 cmd = tcf_pedit_cmd(act, 0);
+ int k;
+
+ switch (cmd) {
+ case TCA_PEDIT_KEY_EX_CMD_SET:
+ fl_action->id = FLOW_ACTION_MANGLE;
+ break;
+ case TCA_PEDIT_KEY_EX_CMD_ADD:
+ fl_action->id = FLOW_ACTION_ADD;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit command offload");
+ return -EOPNOTSUPP;
+ }
+
+ for (k = 1; k < tcf_pedit_nkeys(act); k++) {
+ if (cmd != tcf_pedit_cmd(act, k)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit command offload");
+ return -EOPNOTSUPP;
+ }
+ }
}
return 0;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 668130f08903..3569e2c3660c 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -22,6 +22,7 @@
#include <linux/idr.h>
#include <linux/jhash.h>
#include <linux/rculist.h>
+#include <linux/rhashtable.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -50,6 +51,109 @@ static LIST_HEAD(tcf_proto_base);
/* Protects list of registered TC modules. It is pure SMP lock. */
static DEFINE_RWLOCK(cls_mod_lock);
+static struct xarray tcf_exts_miss_cookies_xa;
+struct tcf_exts_miss_cookie_node {
+ const struct tcf_chain *chain;
+ const struct tcf_proto *tp;
+ const struct tcf_exts *exts;
+ u32 chain_index;
+ u32 tp_prio;
+ u32 handle;
+ u32 miss_cookie_base;
+ struct rcu_head rcu;
+};
+
+/* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
+ * action index in the exts tc actions array.
+ */
+union tcf_exts_miss_cookie {
+ struct {
+ u32 miss_cookie_base;
+ u32 act_index;
+ };
+ u64 miss_cookie;
+};
+
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static int
+tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
+ u32 handle)
+{
+ struct tcf_exts_miss_cookie_node *n;
+ static u32 next;
+ int err;
+
+ if (WARN_ON(!handle || !tp->ops->get_exts))
+ return -EINVAL;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ n->chain_index = tp->chain->index;
+ n->chain = tp->chain;
+ n->tp_prio = tp->prio;
+ n->tp = tp;
+ n->exts = exts;
+ n->handle = handle;
+
+ err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
+ n, xa_limit_32b, &next, GFP_KERNEL);
+ if (err)
+ goto err_xa_alloc;
+
+ exts->miss_cookie_node = n;
+ return 0;
+
+err_xa_alloc:
+ kfree(n);
+ return err;
+}
+
+static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
+{
+ struct tcf_exts_miss_cookie_node *n;
+
+ if (!exts->miss_cookie_node)
+ return;
+
+ n = exts->miss_cookie_node;
+ xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
+ kfree_rcu(n, rcu);
+}
+
+static struct tcf_exts_miss_cookie_node *
+tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
+{
+ union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
+
+ *act_index = mc.act_index;
+ return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
+}
+#else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
+static int
+tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
+ u32 handle)
+{
+ return 0;
+}
+
+static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
+{
+}
+#endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
+
+static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
+{
+ union tcf_exts_miss_cookie mc = { .act_index = act_index, };
+
+ if (!miss_cookie_base)
+ return 0;
+
+ mc.miss_cookie_base = miss_cookie_base;
+ return mc.miss_cookie;
+}
+
#ifdef CONFIG_NET_CLS_ACT
DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
EXPORT_SYMBOL(tc_skb_ext_tc);
@@ -488,7 +592,8 @@ static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
#endif
static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
- u32 seq, u16 flags, int event, bool unicast);
+ u32 seq, u16 flags, int event, bool unicast,
+ struct netlink_ext_ack *extack);
static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
u32 chain_index, bool create,
@@ -521,7 +626,7 @@ static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
*/
if (is_first_reference && !by_act)
tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
- RTM_NEWCHAIN, false);
+ RTM_NEWCHAIN, false, NULL);
return chain;
@@ -1548,6 +1653,8 @@ static inline int __tcf_classify(struct sk_buff *skb,
const struct tcf_proto *orig_tp,
struct tcf_result *res,
bool compat_mode,
+ struct tcf_exts_miss_cookie_node *n,
+ int act_index,
u32 *last_executed_chain)
{
#ifdef CONFIG_NET_CLS_ACT
@@ -1559,13 +1666,36 @@ reclassify:
#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
__be16 protocol = skb_protocol(skb, false);
- int err;
+ int err = 0;
- if (tp->protocol != protocol &&
- tp->protocol != htons(ETH_P_ALL))
- continue;
+ if (n) {
+ struct tcf_exts *exts;
+
+ if (n->tp_prio != tp->prio)
+ continue;
+
+ /* We re-lookup the tp and chain based on index instead
+ * of having hard refs and locks to them, so do a sanity
+ * check if any of tp,chain,exts was replaced by the
+ * time we got here with a cookie from hardware.
+ */
+ if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
+ !tp->ops->get_exts))
+ return TC_ACT_SHOT;
+
+ exts = tp->ops->get_exts(tp, n->handle);
+ if (unlikely(!exts || n->exts != exts))
+ return TC_ACT_SHOT;
- err = tc_classify(skb, tp, res);
+ n = NULL;
+ err = tcf_exts_exec_ex(skb, exts, act_index, res);
+ } else {
+ if (tp->protocol != protocol &&
+ tp->protocol != htons(ETH_P_ALL))
+ continue;
+
+ err = tc_classify(skb, tp, res);
+ }
#ifdef CONFIG_NET_CLS_ACT
if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
first_tp = orig_tp;
@@ -1581,6 +1711,9 @@ reclassify:
return err;
}
+ if (unlikely(n))
+ return TC_ACT_SHOT;
+
return TC_ACT_UNSPEC; /* signal: continue lookup */
#ifdef CONFIG_NET_CLS_ACT
reset:
@@ -1605,21 +1738,35 @@ int tcf_classify(struct sk_buff *skb,
#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 last_executed_chain = 0;
- return __tcf_classify(skb, tp, tp, res, compat_mode,
+ return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
&last_executed_chain);
#else
u32 last_executed_chain = tp ? tp->chain->index : 0;
+ struct tcf_exts_miss_cookie_node *n = NULL;
const struct tcf_proto *orig_tp = tp;
struct tc_skb_ext *ext;
+ int act_index = 0;
int ret;
if (block) {
ext = skb_ext_find(skb, TC_SKB_EXT);
- if (ext && ext->chain) {
+ if (ext && (ext->chain || ext->act_miss)) {
struct tcf_chain *fchain;
+ u32 chain;
+
+ if (ext->act_miss) {
+ n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
+ &act_index);
+ if (!n)
+ return TC_ACT_SHOT;
- fchain = tcf_chain_lookup_rcu(block, ext->chain);
+ chain = n->chain_index;
+ } else {
+ chain = ext->chain;
+ }
+
+ fchain = tcf_chain_lookup_rcu(block, chain);
if (!fchain)
return TC_ACT_SHOT;
@@ -1631,7 +1778,7 @@ int tcf_classify(struct sk_buff *skb,
}
}
- ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
+ ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
&last_executed_chain);
if (tc_skb_ext_tc_enabled()) {
@@ -1817,7 +1964,8 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
struct tcf_proto *tp, struct tcf_block *block,
struct Qdisc *q, u32 parent, void *fh,
u32 portid, u32 seq, u16 flags, int event,
- bool terse_dump, bool rtnl_held)
+ bool terse_dump, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
@@ -1857,7 +2005,13 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
goto nla_put_failure;
}
+
+ if (extack && extack->_msg &&
+ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
+ goto nla_put_failure;
+
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+
return skb->len;
out_nlmsg_trim:
@@ -1871,7 +2025,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct tcf_proto *tp,
struct tcf_block *block, struct Qdisc *q,
u32 parent, void *fh, int event, bool unicast,
- bool rtnl_held)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -1883,7 +2037,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
n->nlmsg_seq, n->nlmsg_flags, event,
- false, rtnl_held) <= 0) {
+ false, rtnl_held, extack) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -1912,7 +2066,7 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
- false, rtnl_held) <= 0) {
+ false, rtnl_held, extack) <= 0) {
NL_SET_ERR_MSG(extack, "Failed to build del event notification");
kfree_skb(skb);
return -EINVAL;
@@ -1938,14 +2092,15 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
struct tcf_block *block, struct Qdisc *q,
u32 parent, struct nlmsghdr *n,
- struct tcf_chain *chain, int event)
+ struct tcf_chain *chain, int event,
+ struct netlink_ext_ack *extack)
{
struct tcf_proto *tp;
for (tp = tcf_get_next_proto(chain, NULL);
tp; tp = tcf_get_next_proto(chain, tp))
- tfilter_notify(net, oskb, n, tp, block,
- q, parent, NULL, event, false, true);
+ tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
+ event, false, true, extack);
}
static void tfilter_put(struct tcf_proto *tp, void *fh)
@@ -2156,7 +2311,7 @@ replay:
flags, extack);
if (err == 0) {
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
- RTM_NEWTFILTER, false, rtnl_held);
+ RTM_NEWTFILTER, false, rtnl_held, extack);
tfilter_put(tp, fh);
/* q pointer is NULL for shared blocks */
if (q)
@@ -2284,7 +2439,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
if (prio == 0) {
tfilter_notify_chain(net, skb, block, q, parent, n,
- chain, RTM_DELTFILTER);
+ chain, RTM_DELTFILTER, extack);
tcf_chain_flush(chain, rtnl_held);
err = 0;
goto errout;
@@ -2308,7 +2463,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
tcf_proto_put(tp, rtnl_held, NULL);
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
- RTM_DELTFILTER, false, rtnl_held);
+ RTM_DELTFILTER, false, rtnl_held, extack);
err = 0;
goto errout;
}
@@ -2452,7 +2607,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
err = -ENOENT;
} else {
err = tfilter_notify(net, skb, n, tp, block, q, parent,
- fh, RTM_NEWTFILTER, true, rtnl_held);
+ fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
if (err < 0)
NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
}
@@ -2490,7 +2645,7 @@ static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
n, NETLINK_CB(a->cb->skb).portid,
a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWTFILTER, a->terse_dump, true);
+ RTM_NEWTFILTER, a->terse_dump, true, NULL);
}
static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
@@ -2524,7 +2679,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWTFILTER, false, true) <= 0)
+ RTM_NEWTFILTER, false, true, NULL) <= 0)
goto errout;
cb->args[1] = 1;
}
@@ -2667,7 +2822,8 @@ static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
void *tmplt_priv, u32 chain_index,
struct net *net, struct sk_buff *skb,
struct tcf_block *block,
- u32 portid, u32 seq, u16 flags, int event)
+ u32 portid, u32 seq, u16 flags, int event,
+ struct netlink_ext_ack *extack)
{
unsigned char *b = skb_tail_pointer(skb);
const struct tcf_proto_ops *ops;
@@ -2704,7 +2860,12 @@ static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
goto nla_put_failure;
}
+ if (extack && extack->_msg &&
+ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
+ goto out_nlmsg_trim;
+
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+
return skb->len;
out_nlmsg_trim:
@@ -2714,7 +2875,8 @@ nla_put_failure:
}
static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
- u32 seq, u16 flags, int event, bool unicast)
+ u32 seq, u16 flags, int event, bool unicast,
+ struct netlink_ext_ack *extack)
{
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
struct tcf_block *block = chain->block;
@@ -2728,7 +2890,7 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
chain->index, net, skb, block, portid,
- seq, flags, event) <= 0) {
+ seq, flags, event, extack) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -2756,7 +2918,7 @@ static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
return -ENOBUFS;
if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
- block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
+ block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -2908,11 +3070,11 @@ replay:
}
tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
- RTM_NEWCHAIN, false);
+ RTM_NEWCHAIN, false, extack);
break;
case RTM_DELCHAIN:
tfilter_notify_chain(net, skb, block, q, parent, n,
- chain, RTM_DELTFILTER);
+ chain, RTM_DELTFILTER, extack);
/* Flush the chain first as the user requested chain removal. */
tcf_chain_flush(chain, true);
/* In case the chain was successfully deleted, put a reference
@@ -2922,7 +3084,7 @@ replay:
break;
case RTM_GETCHAIN:
err = tc_chain_notify(chain, skb, n->nlmsg_seq,
- n->nlmsg_flags, n->nlmsg_type, true);
+ n->nlmsg_flags, n->nlmsg_type, true, extack);
if (err < 0)
NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
break;
@@ -3022,7 +3184,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
chain->index, net, skb, block,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWCHAIN);
+ RTM_NEWCHAIN, NULL);
if (err <= 0)
break;
index++;
@@ -3040,9 +3202,48 @@ out:
return skb->len;
}
+int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
+ int police, struct tcf_proto *tp, u32 handle,
+ bool use_action_miss)
+{
+ int err = 0;
+
+#ifdef CONFIG_NET_CLS_ACT
+ exts->type = 0;
+ exts->nr_actions = 0;
+ /* Note: we do not own yet a reference on net.
+ * This reference might be taken later from tcf_exts_get_net().
+ */
+ exts->net = net;
+ exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
+ GFP_KERNEL);
+ if (!exts->actions)
+ return -ENOMEM;
+#endif
+
+ exts->action = action;
+ exts->police = police;
+
+ if (!use_action_miss)
+ return 0;
+
+ err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
+ if (err)
+ goto err_miss_alloc;
+
+ return 0;
+
+err_miss_alloc:
+ tcf_exts_destroy(exts);
+ return err;
+}
+EXPORT_SYMBOL(tcf_exts_init_ex);
+
void tcf_exts_destroy(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
+ tcf_exts_miss_cookie_base_destroy(exts);
+
if (exts->actions) {
tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
kfree(exts->actions);
@@ -3474,28 +3675,28 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
}
EXPORT_SYMBOL(tc_setup_cb_reoffload);
-static int tcf_act_get_cookie(struct flow_action_entry *entry,
- const struct tc_action *act)
+static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
+ const struct tc_action *act)
{
- struct tc_cookie *cookie;
+ struct tc_cookie *user_cookie;
int err = 0;
rcu_read_lock();
- cookie = rcu_dereference(act->act_cookie);
- if (cookie) {
- entry->cookie = flow_action_cookie_create(cookie->data,
- cookie->len,
- GFP_ATOMIC);
- if (!entry->cookie)
+ user_cookie = rcu_dereference(act->user_cookie);
+ if (user_cookie) {
+ entry->user_cookie = flow_action_cookie_create(user_cookie->data,
+ user_cookie->len,
+ GFP_ATOMIC);
+ if (!entry->user_cookie)
err = -ENOMEM;
}
rcu_read_unlock();
return err;
}
-static void tcf_act_put_cookie(struct flow_action_entry *entry)
+static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
{
- flow_action_cookie_destroy(entry->cookie);
+ flow_action_cookie_destroy(entry->user_cookie);
}
void tc_cleanup_offload_action(struct flow_action *flow_action)
@@ -3504,7 +3705,7 @@ void tc_cleanup_offload_action(struct flow_action *flow_action)
int i;
flow_action_for_each(i, entry, flow_action) {
- tcf_act_put_cookie(entry);
+ tcf_act_put_user_cookie(entry);
if (entry->destructor)
entry->destructor(entry->destructor_priv);
}
@@ -3531,6 +3732,7 @@ static int tc_setup_offload_act(struct tc_action *act,
int tc_setup_action(struct flow_action *flow_action,
struct tc_action *actions[],
+ u32 miss_cookie_base,
struct netlink_ext_ack *extack)
{
int i, j, k, index, err = 0;
@@ -3549,7 +3751,7 @@ int tc_setup_action(struct flow_action *flow_action,
entry = &flow_action->entries[j];
spin_lock_bh(&act->tcfa_lock);
- err = tcf_act_get_cookie(entry, act);
+ err = tcf_act_get_user_cookie(entry, act);
if (err)
goto err_out_locked;
@@ -3561,6 +3763,9 @@ int tc_setup_action(struct flow_action *flow_action,
for (k = 0; k < index ; k++) {
entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
entry[k].hw_index = act->tcfa_index;
+ entry[k].cookie = (unsigned long)act;
+ entry[k].miss_cookie =
+ tcf_exts_miss_cookie_get(miss_cookie_base, i);
}
j += index;
@@ -3583,10 +3788,15 @@ int tc_setup_offload_action(struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
#ifdef CONFIG_NET_CLS_ACT
+ u32 miss_cookie_base;
+
if (!exts)
return 0;
- return tc_setup_action(flow_action, exts->actions, extack);
+ miss_cookie_base = exts->miss_cookie_node ?
+ exts->miss_cookie_node->miss_cookie_base : 0;
+ return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
+ extack);
#else
return 0;
#endif
@@ -3754,6 +3964,8 @@ static int __init tc_filter_init(void)
if (err)
goto err_register_pernet_subsys;
+ xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
+
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
RTNL_FLAG_DOIT_UNLOCKED);
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 0b15698b3531..e960a46b0520 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -502,12 +502,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
rtnl_held);
- tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes,
- cls_flower.stats.pkts,
- cls_flower.stats.drops,
- cls_flower.stats.lastused,
- cls_flower.stats.used_hw_stats,
- cls_flower.stats.used_hw_stats_valid);
+ tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
}
static void __fl_put(struct cls_fl_filter *f)
@@ -534,6 +529,15 @@ static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
return f;
}
+static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
+{
+ struct cls_fl_head *head = rcu_dereference_bh(tp->root);
+ struct cls_fl_filter *f;
+
+ f = idr_find(&head->handle_idr, handle);
+ return f ? &f->exts : NULL;
+}
+
static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
bool *last, bool rtnl_held,
struct netlink_ext_ack *extack)
@@ -2192,10 +2196,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
INIT_LIST_HEAD(&fnew->hw_list);
refcount_set(&fnew->refcnt, 1);
- err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
- if (err < 0)
- goto errout;
-
if (tb[TCA_FLOWER_FLAGS]) {
fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
@@ -2205,15 +2205,46 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
}
+ if (!fold) {
+ spin_lock(&tp->lock);
+ if (!handle) {
+ handle = 1;
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ INT_MAX, GFP_ATOMIC);
+ } else {
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ handle, GFP_ATOMIC);
+
+ /* Filter with specified handle was concurrently
+ * inserted after initial check in cls_api. This is not
+ * necessarily an error if NLM_F_EXCL is not set in
+ * message flags. Returning EAGAIN will cause cls_api to
+ * try to update concurrently inserted rule.
+ */
+ if (err == -ENOSPC)
+ err = -EAGAIN;
+ }
+ spin_unlock(&tp->lock);
+
+ if (err)
+ goto errout;
+ }
+ fnew->handle = handle;
+
+ err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
+ !tc_skip_hw(fnew->flags));
+ if (err < 0)
+ goto errout_idr;
+
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
tp->chain->tmplt_priv, flags, fnew->flags,
extack);
if (err)
- goto errout;
+ goto errout_idr;
err = fl_check_assign_mask(head, fnew, fold, mask);
if (err)
- goto errout;
+ goto errout_idr;
err = fl_ht_insert_unique(fnew, fold, &in_ht);
if (err)
@@ -2279,29 +2310,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
refcount_dec(&fold->refcnt);
__fl_put(fold);
} else {
- if (handle) {
- /* user specifies a handle and it doesn't exist */
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- handle, GFP_ATOMIC);
-
- /* Filter with specified handle was concurrently
- * inserted after initial check in cls_api. This is not
- * necessarily an error if NLM_F_EXCL is not set in
- * message flags. Returning EAGAIN will cause cls_api to
- * try to update concurrently inserted rule.
- */
- if (err == -ENOSPC)
- err = -EAGAIN;
- } else {
- handle = 1;
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- INT_MAX, GFP_ATOMIC);
- }
- if (err)
- goto errout_hw;
+ idr_replace(&head->handle_idr, fnew, fnew->handle);
refcount_inc(&fnew->refcnt);
- fnew->handle = handle;
list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
spin_unlock(&tp->lock);
}
@@ -2324,6 +2335,8 @@ errout_hw:
fnew->mask->filter_ht_params);
errout_mask:
fl_mask_put(head, fnew->mask);
+errout_idr:
+ idr_remove(&head->handle_idr, fnew->handle);
errout:
__fl_put(fnew);
errout_tb:
@@ -3441,6 +3454,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.tmplt_create = fl_tmplt_create,
.tmplt_destroy = fl_tmplt_destroy,
.tmplt_dump = fl_tmplt_dump,
+ .get_exts = fl_get_exts,
.owner = THIS_MODULE,
.flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
};
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 705f63da2c21..fa3bbd187eb9 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -331,11 +331,7 @@ static void mall_stats_hw_filter(struct tcf_proto *tp,
tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
- tcf_exts_hw_stats_update(&head->exts, cls_mall.stats.bytes,
- cls_mall.stats.pkts, cls_mall.stats.drops,
- cls_mall.stats.lastused,
- cls_mall.stats.used_hw_stats,
- cls_mall.stats.used_hw_stats_valid);
+ tcf_exts_hw_stats_update(&head->exts, &cls_mall.stats, cls_mall.use_act_stats);
}
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
diff --git a/net/sched/cls_rsvp.c b/net/sched/cls_rsvp.c
deleted file mode 100644
index 03d8619bd9c6..000000000000
--- a/net/sched/cls_rsvp.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * net/sched/cls_rsvp.c Special RSVP packet classifier for IPv4.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <net/ip.h>
-#include <net/netlink.h>
-#include <net/act_api.h>
-#include <net/pkt_cls.h>
-#include <net/tc_wrapper.h>
-
-#define RSVP_DST_LEN 1
-#define RSVP_ID "rsvp"
-#define RSVP_OPS cls_rsvp_ops
-#define RSVP_CLS rsvp_classify
-
-#include "cls_rsvp.h"
-MODULE_LICENSE("GPL");
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
deleted file mode 100644
index 869efba9f834..000000000000
--- a/net/sched/cls_rsvp.h
+++ /dev/null
@@ -1,764 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- */
-
-/*
- Comparing to general packet classification problem,
- RSVP needs only several relatively simple rules:
-
- * (dst, protocol) are always specified,
- so that we are able to hash them.
- * src may be exact, or may be wildcard, so that
- we can keep a hash table plus one wildcard entry.
- * source port (or flow label) is important only if src is given.
-
- IMPLEMENTATION.
-
- We use a two level hash table: The top level is keyed by
- destination address and protocol ID, every bucket contains a list
- of "rsvp sessions", identified by destination address, protocol and
- DPI(="Destination Port ID"): triple (key, mask, offset).
-
- Every bucket has a smaller hash table keyed by source address
- (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
- Every bucket is again a list of "RSVP flows", selected by
- source address and SPI(="Source Port ID" here rather than
- "security parameter index"): triple (key, mask, offset).
-
-
- NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
- and all fragmented packets go to the best-effort traffic class.
-
-
- NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
- only one "Generalized Port Identifier". So that for classic
- ah, esp (and udp,tcp) both *pi should coincide or one of them
- should be wildcard.
-
- At first sight, this redundancy is just a waste of CPU
- resources. But DPI and SPI add the possibility to assign different
- priorities to GPIs. Look also at note 4 about tunnels below.
-
-
- NOTE 3. One complication is the case of tunneled packets.
- We implement it as following: if the first lookup
- matches a special session with "tunnelhdr" value not zero,
- flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
- In this case, we pull tunnelhdr bytes and restart lookup
- with tunnel ID added to the list of keys. Simple and stupid 8)8)
- It's enough for PIMREG and IPIP.
-
-
- NOTE 4. Two GPIs make it possible to parse even GRE packets.
- F.e. DPI can select ETH_P_IP (and necessary flags to make
- tunnelhdr correct) in GRE protocol field and SPI matches
- GRE key. Is it not nice? 8)8)
-
-
- Well, as result, despite its simplicity, we get a pretty
- powerful classification engine. */
-
-
-struct rsvp_head {
- u32 tmap[256/32];
- u32 hgenerator;
- u8 tgenerator;
- struct rsvp_session __rcu *ht[256];
- struct rcu_head rcu;
-};
-
-struct rsvp_session {
- struct rsvp_session __rcu *next;
- __be32 dst[RSVP_DST_LEN];
- struct tc_rsvp_gpi dpi;
- u8 protocol;
- u8 tunnelid;
- /* 16 (src,sport) hash slots, and one wildcard source slot */
- struct rsvp_filter __rcu *ht[16 + 1];
- struct rcu_head rcu;
-};
-
-
-struct rsvp_filter {
- struct rsvp_filter __rcu *next;
- __be32 src[RSVP_DST_LEN];
- struct tc_rsvp_gpi spi;
- u8 tunnelhdr;
-
- struct tcf_result res;
- struct tcf_exts exts;
-
- u32 handle;
- struct rsvp_session *sess;
- struct rcu_work rwork;
-};
-
-static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
-{
- unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
-
- h ^= h>>16;
- h ^= h>>8;
- return (h ^ protocol ^ tunnelid) & 0xFF;
-}
-
-static inline unsigned int hash_src(__be32 *src)
-{
- unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
-
- h ^= h>>16;
- h ^= h>>8;
- h ^= h>>4;
- return h & 0xF;
-}
-
-#define RSVP_APPLY_RESULT() \
-{ \
- int r = tcf_exts_exec(skb, &f->exts, res); \
- if (r < 0) \
- continue; \
- else if (r > 0) \
- return r; \
-}
-
-TC_INDIRECT_SCOPE int RSVP_CLS(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res)
-{
- struct rsvp_head *head = rcu_dereference_bh(tp->root);
- struct rsvp_session *s;
- struct rsvp_filter *f;
- unsigned int h1, h2;
- __be32 *dst, *src;
- u8 protocol;
- u8 tunnelid = 0;
- u8 *xprt;
-#if RSVP_DST_LEN == 4
- struct ipv6hdr *nhptr;
-
- if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
- return -1;
- nhptr = ipv6_hdr(skb);
-#else
- struct iphdr *nhptr;
-
- if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
- return -1;
- nhptr = ip_hdr(skb);
-#endif
-restart:
-
-#if RSVP_DST_LEN == 4
- src = &nhptr->saddr.s6_addr32[0];
- dst = &nhptr->daddr.s6_addr32[0];
- protocol = nhptr->nexthdr;
- xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
-#else
- src = &nhptr->saddr;
- dst = &nhptr->daddr;
- protocol = nhptr->protocol;
- xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
- if (ip_is_fragment(nhptr))
- return -1;
-#endif
-
- h1 = hash_dst(dst, protocol, tunnelid);
- h2 = hash_src(src);
-
- for (s = rcu_dereference_bh(head->ht[h1]); s;
- s = rcu_dereference_bh(s->next)) {
- if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
- protocol == s->protocol &&
- !(s->dpi.mask &
- (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
-#if RSVP_DST_LEN == 4
- dst[0] == s->dst[0] &&
- dst[1] == s->dst[1] &&
- dst[2] == s->dst[2] &&
-#endif
- tunnelid == s->tunnelid) {
-
- for (f = rcu_dereference_bh(s->ht[h2]); f;
- f = rcu_dereference_bh(f->next)) {
- if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
- !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
-#if RSVP_DST_LEN == 4
- &&
- src[0] == f->src[0] &&
- src[1] == f->src[1] &&
- src[2] == f->src[2]
-#endif
- ) {
- *res = f->res;
- RSVP_APPLY_RESULT();
-
-matched:
- if (f->tunnelhdr == 0)
- return 0;
-
- tunnelid = f->res.classid;
- nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
- goto restart;
- }
- }
-
- /* And wildcard bucket... */
- for (f = rcu_dereference_bh(s->ht[16]); f;
- f = rcu_dereference_bh(f->next)) {
- *res = f->res;
- RSVP_APPLY_RESULT();
- goto matched;
- }
- return -1;
- }
- }
- return -1;
-}
-
-static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
-{
- struct rsvp_head *head = rtnl_dereference(tp->root);
- struct rsvp_session *s;
- struct rsvp_filter __rcu **ins;
- struct rsvp_filter *pins;
- unsigned int h1 = h & 0xFF;
- unsigned int h2 = (h >> 8) & 0xFF;
-
- for (s = rtnl_dereference(head->ht[h1]); s;
- s = rtnl_dereference(s->next)) {
- for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
- ins = &pins->next, pins = rtnl_dereference(*ins)) {
- if (pins->handle == h) {
- RCU_INIT_POINTER(n->next, pins->next);
- rcu_assign_pointer(*ins, n);
- return;
- }
- }
- }
-
- /* Something went wrong if we are trying to replace a non-existent
- * node. Mind as well halt instead of silently failing.
- */
- BUG_ON(1);
-}
-
-static void *rsvp_get(struct tcf_proto *tp, u32 handle)
-{
- struct rsvp_head *head = rtnl_dereference(tp->root);
- struct rsvp_session *s;
- struct rsvp_filter *f;
- unsigned int h1 = handle & 0xFF;
- unsigned int h2 = (handle >> 8) & 0xFF;
-
- if (h2 > 16)
- return NULL;
-
- for (s = rtnl_dereference(head->ht[h1]); s;
- s = rtnl_dereference(s->next)) {
- for (f = rtnl_dereference(s->ht[h2]); f;
- f = rtnl_dereference(f->next)) {
- if (f->handle == handle)
- return f;
- }
- }
- return NULL;
-}
-
-static int rsvp_init(struct tcf_proto *tp)
-{
- struct rsvp_head *data;
-
- data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
- if (data) {
- rcu_assign_pointer(tp->root, data);
- return 0;
- }
- return -ENOBUFS;
-}
-
-static void __rsvp_delete_filter(struct rsvp_filter *f)
-{
- tcf_exts_destroy(&f->exts);
- tcf_exts_put_net(&f->exts);
- kfree(f);
-}
-
-static void rsvp_delete_filter_work(struct work_struct *work)
-{
- struct rsvp_filter *f = container_of(to_rcu_work(work),
- struct rsvp_filter,
- rwork);
- rtnl_lock();
- __rsvp_delete_filter(f);
- rtnl_unlock();
-}
-
-static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
-{
- tcf_unbind_filter(tp, &f->res);
- /* all classifiers are required to call tcf_exts_destroy() after rcu
- * grace period, since converted-to-rcu actions are relying on that
- * in cleanup() callback
- */
- if (tcf_exts_get_net(&f->exts))
- tcf_queue_work(&f->rwork, rsvp_delete_filter_work);
- else
- __rsvp_delete_filter(f);
-}
-
-static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
- struct netlink_ext_ack *extack)
-{
- struct rsvp_head *data = rtnl_dereference(tp->root);
- int h1, h2;
-
- if (data == NULL)
- return;
-
- for (h1 = 0; h1 < 256; h1++) {
- struct rsvp_session *s;
-
- while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
- RCU_INIT_POINTER(data->ht[h1], s->next);
-
- for (h2 = 0; h2 <= 16; h2++) {
- struct rsvp_filter *f;
-
- while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
- rcu_assign_pointer(s->ht[h2], f->next);
- rsvp_delete_filter(tp, f);
- }
- }
- kfree_rcu(s, rcu);
- }
- }
- kfree_rcu(data, rcu);
-}
-
-static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
- bool rtnl_held, struct netlink_ext_ack *extack)
-{
- struct rsvp_head *head = rtnl_dereference(tp->root);
- struct rsvp_filter *nfp, *f = arg;
- struct rsvp_filter __rcu **fp;
- unsigned int h = f->handle;
- struct rsvp_session __rcu **sp;
- struct rsvp_session *nsp, *s = f->sess;
- int i, h1;
-
- fp = &s->ht[(h >> 8) & 0xFF];
- for (nfp = rtnl_dereference(*fp); nfp;
- fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
- if (nfp == f) {
- RCU_INIT_POINTER(*fp, f->next);
- rsvp_delete_filter(tp, f);
-
- /* Strip tree */
-
- for (i = 0; i <= 16; i++)
- if (s->ht[i])
- goto out;
-
- /* OK, session has no flows */
- sp = &head->ht[h & 0xFF];
- for (nsp = rtnl_dereference(*sp); nsp;
- sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
- if (nsp == s) {
- RCU_INIT_POINTER(*sp, s->next);
- kfree_rcu(s, rcu);
- goto out;
- }
- }
-
- break;
- }
- }
-
-out:
- *last = true;
- for (h1 = 0; h1 < 256; h1++) {
- if (rcu_access_pointer(head->ht[h1])) {
- *last = false;
- break;
- }
- }
-
- return 0;
-}
-
-static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
-{
- struct rsvp_head *data = rtnl_dereference(tp->root);
- int i = 0xFFFF;
-
- while (i-- > 0) {
- u32 h;
-
- if ((data->hgenerator += 0x10000) == 0)
- data->hgenerator = 0x10000;
- h = data->hgenerator|salt;
- if (!rsvp_get(tp, h))
- return h;
- }
- return 0;
-}
-
-static int tunnel_bts(struct rsvp_head *data)
-{
- int n = data->tgenerator >> 5;
- u32 b = 1 << (data->tgenerator & 0x1F);
-
- if (data->tmap[n] & b)
- return 0;
- data->tmap[n] |= b;
- return 1;
-}
-
-static void tunnel_recycle(struct rsvp_head *data)
-{
- struct rsvp_session __rcu **sht = data->ht;
- u32 tmap[256/32];
- int h1, h2;
-
- memset(tmap, 0, sizeof(tmap));
-
- for (h1 = 0; h1 < 256; h1++) {
- struct rsvp_session *s;
- for (s = rtnl_dereference(sht[h1]); s;
- s = rtnl_dereference(s->next)) {
- for (h2 = 0; h2 <= 16; h2++) {
- struct rsvp_filter *f;
-
- for (f = rtnl_dereference(s->ht[h2]); f;
- f = rtnl_dereference(f->next)) {
- if (f->tunnelhdr == 0)
- continue;
- data->tgenerator = f->res.classid;
- tunnel_bts(data);
- }
- }
- }
- }
-
- memcpy(data->tmap, tmap, sizeof(tmap));
-}
-
-static u32 gen_tunnel(struct rsvp_head *data)
-{
- int i, k;
-
- for (k = 0; k < 2; k++) {
- for (i = 255; i > 0; i--) {
- if (++data->tgenerator == 0)
- data->tgenerator = 1;
- if (tunnel_bts(data))
- return data->tgenerator;
- }
- tunnel_recycle(data);
- }
- return 0;
-}
-
-static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
- [TCA_RSVP_CLASSID] = { .type = NLA_U32 },
- [TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) },
- [TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) },
- [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
-};
-
-static int rsvp_change(struct net *net, struct sk_buff *in_skb,
- struct tcf_proto *tp, unsigned long base,
- u32 handle, struct nlattr **tca,
- void **arg, u32 flags,
- struct netlink_ext_ack *extack)
-{
- struct rsvp_head *data = rtnl_dereference(tp->root);
- struct rsvp_filter *f, *nfp;
- struct rsvp_filter __rcu **fp;
- struct rsvp_session *nsp, *s;
- struct rsvp_session __rcu **sp;
- struct tc_rsvp_pinfo *pinfo = NULL;
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_RSVP_MAX + 1];
- struct tcf_exts e;
- unsigned int h1, h2;
- __be32 *dst;
- int err;
-
- if (opt == NULL)
- return handle ? -EINVAL : 0;
-
- err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
- NULL);
- if (err < 0)
- return err;
-
- err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
- if (err < 0)
- return err;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, flags,
- extack);
- if (err < 0)
- goto errout2;
-
- f = *arg;
- if (f) {
- /* Node exists: adjust only classid */
- struct rsvp_filter *n;
-
- if (f->handle != handle && handle)
- goto errout2;
-
- n = kmemdup(f, sizeof(*f), GFP_KERNEL);
- if (!n) {
- err = -ENOMEM;
- goto errout2;
- }
-
- err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
- TCA_RSVP_POLICE);
- if (err < 0) {
- kfree(n);
- goto errout2;
- }
-
- if (tb[TCA_RSVP_CLASSID]) {
- n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
- tcf_bind_filter(tp, &n->res, base);
- }
-
- tcf_exts_change(&n->exts, &e);
- rsvp_replace(tp, n, handle);
- return 0;
- }
-
- /* Now more serious part... */
- err = -EINVAL;
- if (handle)
- goto errout2;
- if (tb[TCA_RSVP_DST] == NULL)
- goto errout2;
-
- err = -ENOBUFS;
- f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
- if (f == NULL)
- goto errout2;
-
- err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
- if (err < 0)
- goto errout;
- h2 = 16;
- if (tb[TCA_RSVP_SRC]) {
- memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
- h2 = hash_src(f->src);
- }
- if (tb[TCA_RSVP_PINFO]) {
- pinfo = nla_data(tb[TCA_RSVP_PINFO]);
- f->spi = pinfo->spi;
- f->tunnelhdr = pinfo->tunnelhdr;
- }
- if (tb[TCA_RSVP_CLASSID])
- f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
-
- dst = nla_data(tb[TCA_RSVP_DST]);
- h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
-
- err = -ENOMEM;
- if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
- goto errout;
-
- if (f->tunnelhdr) {
- err = -EINVAL;
- if (f->res.classid > 255)
- goto errout;
-
- err = -ENOMEM;
- if (f->res.classid == 0 &&
- (f->res.classid = gen_tunnel(data)) == 0)
- goto errout;
- }
-
- for (sp = &data->ht[h1];
- (s = rtnl_dereference(*sp)) != NULL;
- sp = &s->next) {
- if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
- pinfo && pinfo->protocol == s->protocol &&
- memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
-#if RSVP_DST_LEN == 4
- dst[0] == s->dst[0] &&
- dst[1] == s->dst[1] &&
- dst[2] == s->dst[2] &&
-#endif
- pinfo->tunnelid == s->tunnelid) {
-
-insert:
- /* OK, we found appropriate session */
-
- fp = &s->ht[h2];
-
- f->sess = s;
- if (f->tunnelhdr == 0)
- tcf_bind_filter(tp, &f->res, base);
-
- tcf_exts_change(&f->exts, &e);
-
- fp = &s->ht[h2];
- for (nfp = rtnl_dereference(*fp); nfp;
- fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
- __u32 mask = nfp->spi.mask & f->spi.mask;
-
- if (mask != f->spi.mask)
- break;
- }
- RCU_INIT_POINTER(f->next, nfp);
- rcu_assign_pointer(*fp, f);
-
- *arg = f;
- return 0;
- }
- }
-
- /* No session found. Create new one. */
-
- err = -ENOBUFS;
- s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
- if (s == NULL)
- goto errout;
- memcpy(s->dst, dst, sizeof(s->dst));
-
- if (pinfo) {
- s->dpi = pinfo->dpi;
- s->protocol = pinfo->protocol;
- s->tunnelid = pinfo->tunnelid;
- }
- sp = &data->ht[h1];
- for (nsp = rtnl_dereference(*sp); nsp;
- sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
- if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
- break;
- }
- RCU_INIT_POINTER(s->next, nsp);
- rcu_assign_pointer(*sp, s);
-
- goto insert;
-
-errout:
- tcf_exts_destroy(&f->exts);
- kfree(f);
-errout2:
- tcf_exts_destroy(&e);
- return err;
-}
-
-static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
- bool rtnl_held)
-{
- struct rsvp_head *head = rtnl_dereference(tp->root);
- unsigned int h, h1;
-
- if (arg->stop)
- return;
-
- for (h = 0; h < 256; h++) {
- struct rsvp_session *s;
-
- for (s = rtnl_dereference(head->ht[h]); s;
- s = rtnl_dereference(s->next)) {
- for (h1 = 0; h1 <= 16; h1++) {
- struct rsvp_filter *f;
-
- for (f = rtnl_dereference(s->ht[h1]); f;
- f = rtnl_dereference(f->next)) {
- if (!tc_cls_stats_dump(tp, arg, f))
- return;
- }
- }
- }
- }
-}
-
-static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
-{
- struct rsvp_filter *f = fh;
- struct rsvp_session *s;
- struct nlattr *nest;
- struct tc_rsvp_pinfo pinfo;
-
- if (f == NULL)
- return skb->len;
- s = f->sess;
-
- t->tcm_handle = f->handle;
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
-
- if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
- goto nla_put_failure;
- pinfo.dpi = s->dpi;
- pinfo.spi = f->spi;
- pinfo.protocol = s->protocol;
- pinfo.tunnelid = s->tunnelid;
- pinfo.tunnelhdr = f->tunnelhdr;
- pinfo.pad = 0;
- if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
- goto nla_put_failure;
- if (f->res.classid &&
- nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
- goto nla_put_failure;
- if (((f->handle >> 8) & 0xFF) != 16 &&
- nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
- goto nla_put_failure;
-
- if (tcf_exts_dump(skb, &f->exts) < 0)
- goto nla_put_failure;
-
- nla_nest_end(skb, nest);
-
- if (tcf_exts_dump_stats(skb, &f->exts) < 0)
- goto nla_put_failure;
- return skb->len;
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-
-static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
- unsigned long base)
-{
- struct rsvp_filter *f = fh;
-
- tc_cls_bind_class(classid, cl, q, &f->res, base);
-}
-
-static struct tcf_proto_ops RSVP_OPS __read_mostly = {
- .kind = RSVP_ID,
- .classify = RSVP_CLS,
- .init = rsvp_init,
- .destroy = rsvp_destroy,
- .get = rsvp_get,
- .change = rsvp_change,
- .delete = rsvp_delete,
- .walk = rsvp_walk,
- .dump = rsvp_dump,
- .bind_class = rsvp_bind_class,
- .owner = THIS_MODULE,
-};
-
-static int __init init_rsvp(void)
-{
- return register_tcf_proto_ops(&RSVP_OPS);
-}
-
-static void __exit exit_rsvp(void)
-{
- unregister_tcf_proto_ops(&RSVP_OPS);
-}
-
-module_init(init_rsvp)
-module_exit(exit_rsvp)
diff --git a/net/sched/cls_rsvp6.c b/net/sched/cls_rsvp6.c
deleted file mode 100644
index e627cc32d633..000000000000
--- a/net/sched/cls_rsvp6.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * net/sched/cls_rsvp6.c Special RSVP packet classifier for IPv6.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ipv6.h>
-#include <linux/skbuff.h>
-#include <net/act_api.h>
-#include <net/pkt_cls.h>
-#include <net/netlink.h>
-#include <net/tc_wrapper.h>
-
-#define RSVP_DST_LEN 4
-#define RSVP_ID "rsvp6"
-#define RSVP_OPS cls_rsvp6_ops
-#define RSVP_CLS rsvp6_classify
-
-#include "cls_rsvp.h"
-MODULE_LICENSE("GPL");
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
deleted file mode 100644
index 6640e75eaa02..000000000000
--- a/net/sched/cls_tcindex.c
+++ /dev/null
@@ -1,742 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
- *
- * Written 1998,1999 by Werner Almesberger, EPFL ICA
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/refcount.h>
-#include <linux/rcupdate.h>
-#include <net/act_api.h>
-#include <net/netlink.h>
-#include <net/pkt_cls.h>
-#include <net/sch_generic.h>
-#include <net/tc_wrapper.h>
-
-/*
- * Passing parameters to the root seems to be done more awkwardly than really
- * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
- * verified. FIXME.
- */
-
-#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
-#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
-
-
-struct tcindex_data;
-
-struct tcindex_filter_result {
- struct tcf_exts exts;
- struct tcf_result res;
- struct tcindex_data *p;
- struct rcu_work rwork;
-};
-
-struct tcindex_filter {
- u16 key;
- struct tcindex_filter_result result;
- struct tcindex_filter __rcu *next;
- struct rcu_work rwork;
-};
-
-
-struct tcindex_data {
- struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
- struct tcindex_filter __rcu **h; /* imperfect hash; */
- struct tcf_proto *tp;
- u16 mask; /* AND key with mask */
- u32 shift; /* shift ANDed key to the right */
- u32 hash; /* hash table size; 0 if undefined */
- u32 alloc_hash; /* allocated size */
- u32 fall_through; /* 0: only classify if explicit match */
- refcount_t refcnt; /* a temporary refcnt for perfect hash */
- struct rcu_work rwork;
-};
-
-static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
-{
- return tcf_exts_has_actions(&r->exts) || r->res.classid;
-}
-
-static void tcindex_data_get(struct tcindex_data *p)
-{
- refcount_inc(&p->refcnt);
-}
-
-static void tcindex_data_put(struct tcindex_data *p)
-{
- if (refcount_dec_and_test(&p->refcnt)) {
- kfree(p->perfect);
- kfree(p->h);
- kfree(p);
- }
-}
-
-static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
- u16 key)
-{
- if (p->perfect) {
- struct tcindex_filter_result *f = p->perfect + key;
-
- return tcindex_filter_is_set(f) ? f : NULL;
- } else if (p->h) {
- struct tcindex_filter __rcu **fp;
- struct tcindex_filter *f;
-
- fp = &p->h[key % p->hash];
- for (f = rcu_dereference_bh_rtnl(*fp);
- f;
- fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
- if (f->key == key)
- return &f->result;
- }
-
- return NULL;
-}
-
-TC_INDIRECT_SCOPE int tcindex_classify(struct sk_buff *skb,
- const struct tcf_proto *tp,
- struct tcf_result *res)
-{
- struct tcindex_data *p = rcu_dereference_bh(tp->root);
- struct tcindex_filter_result *f;
- int key = (skb->tc_index & p->mask) >> p->shift;
-
- pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
- skb, tp, res, p);
-
- f = tcindex_lookup(p, key);
- if (!f) {
- struct Qdisc *q = tcf_block_q(tp->chain->block);
-
- if (!p->fall_through)
- return -1;
- res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
- res->class = 0;
- pr_debug("alg 0x%x\n", res->classid);
- return 0;
- }
- *res = f->res;
- pr_debug("map 0x%x\n", res->classid);
-
- return tcf_exts_exec(skb, &f->exts, res);
-}
-
-
-static void *tcindex_get(struct tcf_proto *tp, u32 handle)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter_result *r;
-
- pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
- if (p->perfect && handle >= p->alloc_hash)
- return NULL;
- r = tcindex_lookup(p, handle);
- return r && tcindex_filter_is_set(r) ? r : NULL;
-}
-
-static int tcindex_init(struct tcf_proto *tp)
-{
- struct tcindex_data *p;
-
- pr_debug("tcindex_init(tp %p)\n", tp);
- p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- p->mask = 0xffff;
- p->hash = DEFAULT_HASH_SIZE;
- p->fall_through = 1;
- refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
-
- rcu_assign_pointer(tp->root, p);
- return 0;
-}
-
-static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
-{
- tcf_exts_destroy(&r->exts);
- tcf_exts_put_net(&r->exts);
- tcindex_data_put(r->p);
-}
-
-static void tcindex_destroy_rexts_work(struct work_struct *work)
-{
- struct tcindex_filter_result *r;
-
- r = container_of(to_rcu_work(work),
- struct tcindex_filter_result,
- rwork);
- rtnl_lock();
- __tcindex_destroy_rexts(r);
- rtnl_unlock();
-}
-
-static void __tcindex_destroy_fexts(struct tcindex_filter *f)
-{
- tcf_exts_destroy(&f->result.exts);
- tcf_exts_put_net(&f->result.exts);
- kfree(f);
-}
-
-static void tcindex_destroy_fexts_work(struct work_struct *work)
-{
- struct tcindex_filter *f = container_of(to_rcu_work(work),
- struct tcindex_filter,
- rwork);
-
- rtnl_lock();
- __tcindex_destroy_fexts(f);
- rtnl_unlock();
-}
-
-static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
- bool rtnl_held, struct netlink_ext_ack *extack)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter_result *r = arg;
- struct tcindex_filter __rcu **walk;
- struct tcindex_filter *f = NULL;
-
- pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
- if (p->perfect) {
- if (!r->res.class)
- return -ENOENT;
- } else {
- int i;
-
- for (i = 0; i < p->hash; i++) {
- walk = p->h + i;
- for (f = rtnl_dereference(*walk); f;
- walk = &f->next, f = rtnl_dereference(*walk)) {
- if (&f->result == r)
- goto found;
- }
- }
- return -ENOENT;
-
-found:
- rcu_assign_pointer(*walk, rtnl_dereference(f->next));
- }
- tcf_unbind_filter(tp, &r->res);
- /* all classifiers are required to call tcf_exts_destroy() after rcu
- * grace period, since converted-to-rcu actions are relying on that
- * in cleanup() callback
- */
- if (f) {
- if (tcf_exts_get_net(&f->result.exts))
- tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
- else
- __tcindex_destroy_fexts(f);
- } else {
- tcindex_data_get(p);
-
- if (tcf_exts_get_net(&r->exts))
- tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
- else
- __tcindex_destroy_rexts(r);
- }
-
- *last = false;
- return 0;
-}
-
-static void tcindex_destroy_work(struct work_struct *work)
-{
- struct tcindex_data *p = container_of(to_rcu_work(work),
- struct tcindex_data,
- rwork);
-
- tcindex_data_put(p);
-}
-
-static inline int
-valid_perfect_hash(struct tcindex_data *p)
-{
- return p->hash > (p->mask >> p->shift);
-}
-
-static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
- [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
- [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
- [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
- [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
- [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
-};
-
-static int tcindex_filter_result_init(struct tcindex_filter_result *r,
- struct tcindex_data *p,
- struct net *net)
-{
- memset(r, 0, sizeof(*r));
- r->p = p;
- return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
- TCA_TCINDEX_POLICE);
-}
-
-static void tcindex_free_perfect_hash(struct tcindex_data *cp);
-
-static void tcindex_partial_destroy_work(struct work_struct *work)
-{
- struct tcindex_data *p = container_of(to_rcu_work(work),
- struct tcindex_data,
- rwork);
-
- rtnl_lock();
- if (p->perfect)
- tcindex_free_perfect_hash(p);
- kfree(p);
- rtnl_unlock();
-}
-
-static void tcindex_free_perfect_hash(struct tcindex_data *cp)
-{
- int i;
-
- for (i = 0; i < cp->hash; i++)
- tcf_exts_destroy(&cp->perfect[i].exts);
- kfree(cp->perfect);
-}
-
-static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
-{
- int i, err = 0;
-
- cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
- GFP_KERNEL | __GFP_NOWARN);
- if (!cp->perfect)
- return -ENOMEM;
-
- for (i = 0; i < cp->hash; i++) {
- err = tcf_exts_init(&cp->perfect[i].exts, net,
- TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
- if (err < 0)
- goto errout;
- cp->perfect[i].p = cp;
- }
-
- return 0;
-
-errout:
- tcindex_free_perfect_hash(cp);
- return err;
-}
-
-static int
-tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
- u32 handle, struct tcindex_data *p,
- struct tcindex_filter_result *r, struct nlattr **tb,
- struct nlattr *est, u32 flags, struct netlink_ext_ack *extack)
-{
- struct tcindex_filter_result new_filter_result;
- struct tcindex_data *cp = NULL, *oldp;
- struct tcindex_filter *f = NULL; /* make gcc behave */
- struct tcf_result cr = {};
- int err, balloc = 0;
- struct tcf_exts e;
- bool update_h = false;
-
- err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
- if (err < 0)
- return err;
- err = tcf_exts_validate(net, tp, tb, est, &e, flags, extack);
- if (err < 0)
- goto errout;
-
- err = -ENOMEM;
- /* tcindex_data attributes must look atomic to classifier/lookup so
- * allocate new tcindex data and RCU assign it onto root. Keeping
- * perfect hash and hash pointers from old data.
- */
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
- if (!cp)
- goto errout;
-
- cp->mask = p->mask;
- cp->shift = p->shift;
- cp->hash = p->hash;
- cp->alloc_hash = p->alloc_hash;
- cp->fall_through = p->fall_through;
- cp->tp = tp;
- refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
-
- if (tb[TCA_TCINDEX_HASH])
- cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
-
- if (tb[TCA_TCINDEX_MASK])
- cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
-
- if (tb[TCA_TCINDEX_SHIFT]) {
- cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
- if (cp->shift > 16) {
- err = -EINVAL;
- goto errout;
- }
- }
- if (!cp->hash) {
- /* Hash not specified, use perfect hash if the upper limit
- * of the hashing index is below the threshold.
- */
- if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
- cp->hash = (cp->mask >> cp->shift) + 1;
- else
- cp->hash = DEFAULT_HASH_SIZE;
- }
-
- if (p->perfect) {
- int i;
-
- if (tcindex_alloc_perfect_hash(net, cp) < 0)
- goto errout;
- cp->alloc_hash = cp->hash;
- for (i = 0; i < min(cp->hash, p->hash); i++)
- cp->perfect[i].res = p->perfect[i].res;
- balloc = 1;
- }
- cp->h = p->h;
-
- err = tcindex_filter_result_init(&new_filter_result, cp, net);
- if (err < 0)
- goto errout_alloc;
- if (r)
- cr = r->res;
-
- err = -EBUSY;
-
- /* Hash already allocated, make sure that we still meet the
- * requirements for the allocated hash.
- */
- if (cp->perfect) {
- if (!valid_perfect_hash(cp) ||
- cp->hash > cp->alloc_hash)
- goto errout_alloc;
- } else if (cp->h && cp->hash != cp->alloc_hash) {
- goto errout_alloc;
- }
-
- err = -EINVAL;
- if (tb[TCA_TCINDEX_FALL_THROUGH])
- cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
-
- if (!cp->perfect && !cp->h)
- cp->alloc_hash = cp->hash;
-
- /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
- * but then, we'd fail handles that may become valid after some future
- * mask change. While this is extremely unlikely to ever matter,
- * the check below is safer (and also more backwards-compatible).
- */
- if (cp->perfect || valid_perfect_hash(cp))
- if (handle >= cp->alloc_hash)
- goto errout_alloc;
-
-
- err = -ENOMEM;
- if (!cp->perfect && !cp->h) {
- if (valid_perfect_hash(cp)) {
- if (tcindex_alloc_perfect_hash(net, cp) < 0)
- goto errout_alloc;
- balloc = 1;
- } else {
- struct tcindex_filter __rcu **hash;
-
- hash = kcalloc(cp->hash,
- sizeof(struct tcindex_filter *),
- GFP_KERNEL);
-
- if (!hash)
- goto errout_alloc;
-
- cp->h = hash;
- balloc = 2;
- }
- }
-
- if (cp->perfect) {
- r = cp->perfect + handle;
- } else {
- /* imperfect area is updated in-place using rcu */
- update_h = !!tcindex_lookup(cp, handle);
- r = &new_filter_result;
- }
-
- if (r == &new_filter_result) {
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- goto errout_alloc;
- f->key = handle;
- f->next = NULL;
- err = tcindex_filter_result_init(&f->result, cp, net);
- if (err < 0) {
- kfree(f);
- goto errout_alloc;
- }
- }
-
- if (tb[TCA_TCINDEX_CLASSID]) {
- cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
- tcf_bind_filter(tp, &cr, base);
- }
-
- oldp = p;
- r->res = cr;
- tcf_exts_change(&r->exts, &e);
-
- rcu_assign_pointer(tp->root, cp);
-
- if (update_h) {
- struct tcindex_filter __rcu **fp;
- struct tcindex_filter *cf;
-
- f->result.res = r->res;
- tcf_exts_change(&f->result.exts, &r->exts);
-
- /* imperfect area bucket */
- fp = cp->h + (handle % cp->hash);
-
- /* lookup the filter, guaranteed to exist */
- for (cf = rcu_dereference_bh_rtnl(*fp); cf;
- fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp))
- if (cf->key == (u16)handle)
- break;
-
- f->next = cf->next;
-
- cf = rcu_replace_pointer(*fp, f, 1);
- tcf_exts_get_net(&cf->result.exts);
- tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work);
- } else if (r == &new_filter_result) {
- struct tcindex_filter *nfp;
- struct tcindex_filter __rcu **fp;
-
- f->result.res = r->res;
- tcf_exts_change(&f->result.exts, &r->exts);
-
- fp = cp->h + (handle % cp->hash);
- for (nfp = rtnl_dereference(*fp);
- nfp;
- fp = &nfp->next, nfp = rtnl_dereference(*fp))
- ; /* nothing */
-
- rcu_assign_pointer(*fp, f);
- } else {
- tcf_exts_destroy(&new_filter_result.exts);
- }
-
- if (oldp)
- tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
- return 0;
-
-errout_alloc:
- if (balloc == 1)
- tcindex_free_perfect_hash(cp);
- else if (balloc == 2)
- kfree(cp->h);
- tcf_exts_destroy(&new_filter_result.exts);
-errout:
- kfree(cp);
- tcf_exts_destroy(&e);
- return err;
-}
-
-static int
-tcindex_change(struct net *net, struct sk_buff *in_skb,
- struct tcf_proto *tp, unsigned long base, u32 handle,
- struct nlattr **tca, void **arg, u32 flags,
- struct netlink_ext_ack *extack)
-{
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_TCINDEX_MAX + 1];
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter_result *r = *arg;
- int err;
-
- pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
- "p %p,r %p,*arg %p\n",
- tp, handle, tca, arg, opt, p, r, *arg);
-
- if (!opt)
- return 0;
-
- err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
- tcindex_policy, NULL);
- if (err < 0)
- return err;
-
- return tcindex_set_parms(net, tp, base, handle, p, r, tb,
- tca[TCA_RATE], flags, extack);
-}
-
-static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
- bool rtnl_held)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter *f, *next;
- int i;
-
- pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
- if (p->perfect) {
- for (i = 0; i < p->hash; i++) {
- if (!p->perfect[i].res.class)
- continue;
- if (!tc_cls_stats_dump(tp, walker, p->perfect + i))
- return;
- }
- }
- if (!p->h)
- return;
- for (i = 0; i < p->hash; i++) {
- for (f = rtnl_dereference(p->h[i]); f; f = next) {
- next = rtnl_dereference(f->next);
- if (!tc_cls_stats_dump(tp, walker, &f->result))
- return;
- }
- }
-}
-
-static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
- struct netlink_ext_ack *extack)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- int i;
-
- pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
-
- if (p->perfect) {
- for (i = 0; i < p->hash; i++) {
- struct tcindex_filter_result *r = p->perfect + i;
-
- /* tcf_queue_work() does not guarantee the ordering we
- * want, so we have to take this refcnt temporarily to
- * ensure 'p' is freed after all tcindex_filter_result
- * here. Imperfect hash does not need this, because it
- * uses linked lists rather than an array.
- */
- tcindex_data_get(p);
-
- tcf_unbind_filter(tp, &r->res);
- if (tcf_exts_get_net(&r->exts))
- tcf_queue_work(&r->rwork,
- tcindex_destroy_rexts_work);
- else
- __tcindex_destroy_rexts(r);
- }
- }
-
- for (i = 0; p->h && i < p->hash; i++) {
- struct tcindex_filter *f, *next;
- bool last;
-
- for (f = rtnl_dereference(p->h[i]); f; f = next) {
- next = rtnl_dereference(f->next);
- tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
- }
- }
-
- tcf_queue_work(&p->rwork, tcindex_destroy_work);
-}
-
-
-static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter_result *r = fh;
- struct nlattr *nest;
-
- pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
- tp, fh, skb, t, p, r);
- pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
-
- if (!fh) {
- t->tcm_handle = ~0; /* whatever ... */
- if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
- nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
- nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
- nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
- goto nla_put_failure;
- nla_nest_end(skb, nest);
- } else {
- if (p->perfect) {
- t->tcm_handle = r - p->perfect;
- } else {
- struct tcindex_filter *f;
- struct tcindex_filter __rcu **fp;
- int i;
-
- t->tcm_handle = 0;
- for (i = 0; !t->tcm_handle && i < p->hash; i++) {
- fp = &p->h[i];
- for (f = rtnl_dereference(*fp);
- !t->tcm_handle && f;
- fp = &f->next, f = rtnl_dereference(*fp)) {
- if (&f->result == r)
- t->tcm_handle = f->key;
- }
- }
- }
- pr_debug("handle = %d\n", t->tcm_handle);
- if (r->res.class &&
- nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
- goto nla_put_failure;
-
- if (tcf_exts_dump(skb, &r->exts) < 0)
- goto nla_put_failure;
- nla_nest_end(skb, nest);
-
- if (tcf_exts_dump_stats(skb, &r->exts) < 0)
- goto nla_put_failure;
- }
-
- return skb->len;
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-
-static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
- void *q, unsigned long base)
-{
- struct tcindex_filter_result *r = fh;
-
- tc_cls_bind_class(classid, cl, q, &r->res, base);
-}
-
-static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
- .kind = "tcindex",
- .classify = tcindex_classify,
- .init = tcindex_init,
- .destroy = tcindex_destroy,
- .get = tcindex_get,
- .change = tcindex_change,
- .delete = tcindex_delete,
- .walk = tcindex_walk,
- .dump = tcindex_dump,
- .bind_class = tcindex_bind_class,
- .owner = THIS_MODULE,
-};
-
-static int __init init_tcindex(void)
-{
- return register_tcf_proto_ops(&cls_tcindex_ops);
-}
-
-static void __exit exit_tcindex(void)
-{
- unregister_tcf_proto_ops(&cls_tcindex_ops);
-}
-
-module_init(init_tcindex)
-module_exit(exit_tcindex)
-MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 72d2c204d5f3..aba789c30a2e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -902,7 +902,8 @@ static void qdisc_offload_graft_root(struct net_device *dev,
}
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
- u32 portid, u32 seq, u16 flags, int event)
+ u32 portid, u32 seq, u16 flags, int event,
+ struct netlink_ext_ack *extack)
{
struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
struct gnet_stats_queue __percpu *cpu_qstats = NULL;
@@ -970,7 +971,12 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
if (gnet_stats_finish_copy(&d) < 0)
goto nla_put_failure;
+ if (extack && extack->_msg &&
+ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
+ goto out_nlmsg_trim;
+
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+
return skb->len;
out_nlmsg_trim:
@@ -991,7 +997,8 @@ static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
static int qdisc_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, u32 clid,
- struct Qdisc *old, struct Qdisc *new)
+ struct Qdisc *old, struct Qdisc *new,
+ struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -1002,12 +1009,12 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
if (old && !tc_qdisc_dump_ignore(old, false)) {
if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
- 0, RTM_DELQDISC) < 0)
+ 0, RTM_DELQDISC, extack) < 0)
goto err_out;
}
if (new && !tc_qdisc_dump_ignore(new, false)) {
if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
- old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
+ old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
goto err_out;
}
@@ -1022,10 +1029,11 @@ err_out:
static void notify_and_destroy(struct net *net, struct sk_buff *skb,
struct nlmsghdr *n, u32 clid,
- struct Qdisc *old, struct Qdisc *new)
+ struct Qdisc *old, struct Qdisc *new,
+ struct netlink_ext_ack *extack)
{
if (new || old)
- qdisc_notify(net, skb, n, clid, old, new);
+ qdisc_notify(net, skb, n, clid, old, new, extack);
if (old)
qdisc_put(old);
@@ -1105,12 +1113,12 @@ skip:
qdisc_refcount_inc(new);
rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
- notify_and_destroy(net, skb, n, classid, old, new);
+ notify_and_destroy(net, skb, n, classid, old, new, extack);
if (new && new->ops->attach)
new->ops->attach(new);
} else {
- notify_and_destroy(net, skb, n, classid, old, new);
+ notify_and_destroy(net, skb, n, classid, old, new, extack);
}
if (dev->flags & IFF_UP)
@@ -1141,7 +1149,7 @@ skip:
err = cops->graft(parent, cl, new, &old, extack);
if (err)
return err;
- notify_and_destroy(net, skb, n, classid, old, new);
+ notify_and_destroy(net, skb, n, classid, old, new, extack);
}
return 0;
}
@@ -1274,20 +1282,21 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
if (err)
goto err_out3;
- if (ops->init) {
- err = ops->init(sch, tca[TCA_OPTIONS], extack);
- if (err != 0)
- goto err_out5;
- }
-
if (tca[TCA_STAB]) {
stab = qdisc_get_stab(tca[TCA_STAB], extack);
if (IS_ERR(stab)) {
err = PTR_ERR(stab);
- goto err_out4;
+ goto err_out3;
}
rcu_assign_pointer(sch->stab, stab);
}
+
+ if (ops->init) {
+ err = ops->init(sch, tca[TCA_OPTIONS], extack);
+ if (err != 0)
+ goto err_out4;
+ }
+
if (tca[TCA_RATE]) {
err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT) {
@@ -1312,10 +1321,13 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
return sch;
-err_out5:
- /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
+err_out4:
+ /* Even if ops->init() failed, we call ops->destroy()
+ * like qdisc_create_dflt().
+ */
if (ops->destroy)
ops->destroy(sch);
+ qdisc_put_stab(rtnl_dereference(sch->stab));
err_out3:
netdev_put(dev, &sch->dev_tracker);
qdisc_free(sch);
@@ -1324,16 +1336,6 @@ err_out2:
err_out:
*errp = err;
return NULL;
-
-err_out4:
- /*
- * Any broken qdiscs that would require a ops->reset() here?
- * The qdisc was never in action so it shouldn't be necessary.
- */
- qdisc_put_stab(rtnl_dereference(sch->stab));
- if (ops->destroy)
- ops->destroy(sch);
- goto err_out3;
}
static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
@@ -1509,7 +1511,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
if (err != 0)
return err;
} else {
- qdisc_notify(net, skb, n, clid, NULL, q);
+ qdisc_notify(net, skb, n, clid, NULL, q, NULL);
}
return 0;
}
@@ -1648,7 +1650,7 @@ replay:
}
err = qdisc_change(q, tca, extack);
if (err == 0)
- qdisc_notify(net, skb, n, clid, NULL, q);
+ qdisc_notify(net, skb, n, clid, NULL, q, extack);
return err;
create_n_graft:
@@ -1715,7 +1717,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWQDISC) <= 0)
+ RTM_NEWQDISC, NULL) <= 0)
goto done;
q_idx++;
}
@@ -1737,7 +1739,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWQDISC) <= 0)
+ RTM_NEWQDISC, NULL) <= 0)
goto done;
q_idx++;
}
@@ -1810,8 +1812,8 @@ done:
************************************************/
static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
- unsigned long cl,
- u32 portid, u32 seq, u16 flags, int event)
+ unsigned long cl, u32 portid, u32 seq, u16 flags,
+ int event, struct netlink_ext_ack *extack)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
@@ -1846,7 +1848,12 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
if (gnet_stats_finish_copy(&d) < 0)
goto nla_put_failure;
+ if (extack && extack->_msg &&
+ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
+ goto out_nlmsg_trim;
+
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+
return skb->len;
out_nlmsg_trim:
@@ -1857,7 +1864,7 @@ nla_put_failure:
static int tclass_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct Qdisc *q,
- unsigned long cl, int event)
+ unsigned long cl, int event, struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -1866,7 +1873,7 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
if (!skb)
return -ENOBUFS;
- if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
+ if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -1893,7 +1900,7 @@ static int tclass_del_notify(struct net *net,
return -ENOBUFS;
if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
- RTM_DELTCLASS) < 0) {
+ RTM_DELTCLASS, extack) < 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -2100,7 +2107,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
tc_bind_tclass(q, portid, clid, 0);
goto out;
case RTM_GETTCLASS:
- err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
+ err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS, extack);
goto out;
default:
err = -EINVAL;
@@ -2118,7 +2125,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
if (cops->change)
err = cops->change(q, clid, portid, tca, &new_cl, extack);
if (err == 0) {
- tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
+ tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
/* We just create a new class, need to do reverse binding. */
if (cl != new_cl)
tc_bind_tclass(q, portid, clid, new_cl);
@@ -2140,7 +2147,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWTCLASS);
+ RTM_NEWTCLASS, NULL);
}
static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
deleted file mode 100644
index 4a981ca90b0b..000000000000
--- a/net/sched/sch_atm.c
+++ /dev/null
@@ -1,706 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
-
-/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <linux/atmdev.h>
-#include <linux/atmclip.h>
-#include <linux/rtnetlink.h>
-#include <linux/file.h> /* for fput */
-#include <net/netlink.h>
-#include <net/pkt_sched.h>
-#include <net/pkt_cls.h>
-
-/*
- * The ATM queuing discipline provides a framework for invoking classifiers
- * (aka "filters"), which in turn select classes of this queuing discipline.
- * Each class maps the flow(s) it is handling to a given VC. Multiple classes
- * may share the same VC.
- *
- * When creating a class, VCs are specified by passing the number of the open
- * socket descriptor by which the calling process references the VC. The kernel
- * keeps the VC open at least until all classes using it are removed.
- *
- * In this file, most functions are named atm_tc_* to avoid confusion with all
- * the atm_* in net/atm. This naming convention differs from what's used in the
- * rest of net/sched.
- *
- * Known bugs:
- * - sometimes messes up the IP stack
- * - any manipulations besides the few operations described in the README, are
- * untested and likely to crash the system
- * - should lock the flow while there is data in the queue (?)
- */
-
-#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
-
-struct atm_flow_data {
- struct Qdisc_class_common common;
- struct Qdisc *q; /* FIFO, TBF, etc. */
- struct tcf_proto __rcu *filter_list;
- struct tcf_block *block;
- struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
- void (*old_pop)(struct atm_vcc *vcc,
- struct sk_buff *skb); /* chaining */
- struct atm_qdisc_data *parent; /* parent qdisc */
- struct socket *sock; /* for closing */
- int ref; /* reference count */
- struct gnet_stats_basic_sync bstats;
- struct gnet_stats_queue qstats;
- struct list_head list;
- struct atm_flow_data *excess; /* flow for excess traffic;
- NULL to set CLP instead */
- int hdr_len;
- unsigned char hdr[]; /* header data; MUST BE LAST */
-};
-
-struct atm_qdisc_data {
- struct atm_flow_data link; /* unclassified skbs go here */
- struct list_head flows; /* NB: "link" is also on this
- list */
- struct tasklet_struct task; /* dequeue tasklet */
-};
-
-/* ------------------------- Class/flow operations ------------------------- */
-
-static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- list_for_each_entry(flow, &p->flows, list) {
- if (flow->common.classid == classid)
- return flow;
- }
- return NULL;
-}
-
-static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
- struct Qdisc *new, struct Qdisc **old,
- struct netlink_ext_ack *extack)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)arg;
-
- pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
- sch, p, flow, new, old);
- if (list_empty(&flow->list))
- return -EINVAL;
- if (!new)
- new = &noop_qdisc;
- *old = flow->q;
- flow->q = new;
- if (*old)
- qdisc_reset(*old);
- return 0;
-}
-
-static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
-{
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
-
- pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
- return flow ? flow->q : NULL;
-}
-
-static unsigned long atm_tc_find(struct Qdisc *sch, u32 classid)
-{
- struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
- flow = lookup_flow(sch, classid);
- pr_debug("%s: flow %p\n", __func__, flow);
- return (unsigned long)flow;
-}
-
-static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
- unsigned long parent, u32 classid)
-{
- struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
- flow = lookup_flow(sch, classid);
- if (flow)
- flow->ref++;
- pr_debug("%s: flow %p\n", __func__, flow);
- return (unsigned long)flow;
-}
-
-/*
- * atm_tc_put handles all destructions, including the ones that are explicitly
- * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
- * anything that still seems to be in use.
- */
-static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
-
- pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
- if (--flow->ref)
- return;
- pr_debug("atm_tc_put: destroying\n");
- list_del_init(&flow->list);
- pr_debug("atm_tc_put: qdisc %p\n", flow->q);
- qdisc_put(flow->q);
- tcf_block_put(flow->block);
- if (flow->sock) {
- pr_debug("atm_tc_put: f_count %ld\n",
- file_count(flow->sock->file));
- flow->vcc->pop = flow->old_pop;
- sockfd_put(flow->sock);
- }
- if (flow->excess)
- atm_tc_put(sch, (unsigned long)flow->excess);
- if (flow != &p->link)
- kfree(flow);
- /*
- * If flow == &p->link, the qdisc no longer works at this point and
- * needs to be removed. (By the caller of atm_tc_put.)
- */
-}
-
-static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
-{
- struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
-
- pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
- VCC2FLOW(vcc)->old_pop(vcc, skb);
- tasklet_schedule(&p->task);
-}
-
-static const u8 llc_oui_ip[] = {
- 0xaa, /* DSAP: non-ISO */
- 0xaa, /* SSAP: non-ISO */
- 0x03, /* Ctrl: Unnumbered Information Command PDU */
- 0x00, /* OUI: EtherType */
- 0x00, 0x00,
- 0x08, 0x00
-}; /* Ethertype IP (0800) */
-
-static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
- [TCA_ATM_FD] = { .type = NLA_U32 },
- [TCA_ATM_EXCESS] = { .type = NLA_U32 },
-};
-
-static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
- struct nlattr **tca, unsigned long *arg,
- struct netlink_ext_ack *extack)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
- struct atm_flow_data *excess = NULL;
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_ATM_MAX + 1];
- struct socket *sock;
- int fd, error, hdr_len;
- void *hdr;
-
- pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
- "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
- /*
- * The concept of parents doesn't apply for this qdisc.
- */
- if (parent && parent != TC_H_ROOT && parent != sch->handle)
- return -EINVAL;
- /*
- * ATM classes cannot be changed. In order to change properties of the
- * ATM connection, that socket needs to be modified directly (via the
- * native ATM API. In order to send a flow to a different VC, the old
- * class needs to be removed and a new one added. (This may be changed
- * later.)
- */
- if (flow)
- return -EBUSY;
- if (opt == NULL)
- return -EINVAL;
-
- error = nla_parse_nested_deprecated(tb, TCA_ATM_MAX, opt, atm_policy,
- NULL);
- if (error < 0)
- return error;
-
- if (!tb[TCA_ATM_FD])
- return -EINVAL;
- fd = nla_get_u32(tb[TCA_ATM_FD]);
- pr_debug("atm_tc_change: fd %d\n", fd);
- if (tb[TCA_ATM_HDR]) {
- hdr_len = nla_len(tb[TCA_ATM_HDR]);
- hdr = nla_data(tb[TCA_ATM_HDR]);
- } else {
- hdr_len = RFC1483LLC_LEN;
- hdr = NULL; /* default LLC/SNAP for IP */
- }
- if (!tb[TCA_ATM_EXCESS])
- excess = NULL;
- else {
- excess = (struct atm_flow_data *)
- atm_tc_find(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
- if (!excess)
- return -ENOENT;
- }
- pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
- opt->nla_type, nla_len(opt), hdr_len);
- sock = sockfd_lookup(fd, &error);
- if (!sock)
- return error; /* f_count++ */
- pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
- if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
- error = -EPROTOTYPE;
- goto err_out;
- }
- /* @@@ should check if the socket is really operational or we'll crash
- on vcc->send */
- if (classid) {
- if (TC_H_MAJ(classid ^ sch->handle)) {
- pr_debug("atm_tc_change: classid mismatch\n");
- error = -EINVAL;
- goto err_out;
- }
- } else {
- int i;
- unsigned long cl;
-
- for (i = 1; i < 0x8000; i++) {
- classid = TC_H_MAKE(sch->handle, 0x8000 | i);
- cl = atm_tc_find(sch, classid);
- if (!cl)
- break;
- }
- }
- pr_debug("atm_tc_change: new id %x\n", classid);
- flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
- pr_debug("atm_tc_change: flow %p\n", flow);
- if (!flow) {
- error = -ENOBUFS;
- goto err_out;
- }
-
- error = tcf_block_get(&flow->block, &flow->filter_list, sch,
- extack);
- if (error) {
- kfree(flow);
- goto err_out;
- }
-
- flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
- extack);
- if (!flow->q)
- flow->q = &noop_qdisc;
- pr_debug("atm_tc_change: qdisc %p\n", flow->q);
- flow->sock = sock;
- flow->vcc = ATM_SD(sock); /* speedup */
- flow->vcc->user_back = flow;
- pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
- flow->old_pop = flow->vcc->pop;
- flow->parent = p;
- flow->vcc->pop = sch_atm_pop;
- flow->common.classid = classid;
- flow->ref = 1;
- flow->excess = excess;
- list_add(&flow->list, &p->link.list);
- flow->hdr_len = hdr_len;
- if (hdr)
- memcpy(flow->hdr, hdr, hdr_len);
- else
- memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
- *arg = (unsigned long)flow;
- return 0;
-err_out:
- sockfd_put(sock);
- return error;
-}
-
-static int atm_tc_delete(struct Qdisc *sch, unsigned long arg,
- struct netlink_ext_ack *extack)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)arg;
-
- pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
- if (list_empty(&flow->list))
- return -EINVAL;
- if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
- return -EBUSY;
- /*
- * Reference count must be 2: one for "keepalive" (set at class
- * creation), and one for the reference held when calling delete.
- */
- if (flow->ref < 2) {
- pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
- return -EINVAL;
- }
- if (flow->ref > 2)
- return -EBUSY; /* catch references via excess, etc. */
- atm_tc_put(sch, arg);
- return 0;
-}
-
-static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
- if (walker->stop)
- return;
- list_for_each_entry(flow, &p->flows, list) {
- if (!tc_qdisc_stats_dump(sch, (unsigned long)flow, walker))
- break;
- }
-}
-
-static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
- struct netlink_ext_ack *extack)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
-
- pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
- return flow ? flow->block : p->link.block;
-}
-
-/* --------------------------- Qdisc operations ---------------------------- */
-
-static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
- struct tcf_result res;
- int result;
- int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-
- pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
- result = TC_ACT_OK; /* be nice to gcc */
- flow = NULL;
- if (TC_H_MAJ(skb->priority) != sch->handle ||
- !(flow = (struct atm_flow_data *)atm_tc_find(sch, skb->priority))) {
- struct tcf_proto *fl;
-
- list_for_each_entry(flow, &p->flows, list) {
- fl = rcu_dereference_bh(flow->filter_list);
- if (fl) {
- result = tcf_classify(skb, NULL, fl, &res, true);
- if (result < 0)
- continue;
- if (result == TC_ACT_SHOT)
- goto done;
-
- flow = (struct atm_flow_data *)res.class;
- if (!flow)
- flow = lookup_flow(sch, res.classid);
- goto drop;
- }
- }
- flow = NULL;
-done:
- ;
- }
- if (!flow) {
- flow = &p->link;
- } else {
- if (flow->vcc)
- ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
- /*@@@ looks good ... but it's not supposed to work :-) */
-#ifdef CONFIG_NET_CLS_ACT
- switch (result) {
- case TC_ACT_QUEUED:
- case TC_ACT_STOLEN:
- case TC_ACT_TRAP:
- __qdisc_drop(skb, to_free);
- return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
- case TC_ACT_SHOT:
- __qdisc_drop(skb, to_free);
- goto drop;
- case TC_ACT_RECLASSIFY:
- if (flow->excess)
- flow = flow->excess;
- else
- ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
- break;
- }
-#endif
- }
-
- ret = qdisc_enqueue(skb, flow->q, to_free);
- if (ret != NET_XMIT_SUCCESS) {
-drop: __maybe_unused
- if (net_xmit_drop_count(ret)) {
- qdisc_qstats_drop(sch);
- if (flow)
- flow->qstats.drops++;
- }
- return ret;
- }
- /*
- * Okay, this may seem weird. We pretend we've dropped the packet if
- * it goes via ATM. The reason for this is that the outer qdisc
- * expects to be able to q->dequeue the packet later on if we return
- * success at this place. Also, sch->q.qdisc needs to reflect whether
- * there is a packet egligible for dequeuing or not. Note that the
- * statistics of the outer qdisc are necessarily wrong because of all
- * this. There's currently no correct solution for this.
- */
- if (flow == &p->link) {
- sch->q.qlen++;
- return NET_XMIT_SUCCESS;
- }
- tasklet_schedule(&p->task);
- return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-}
-
-/*
- * Dequeue packets and send them over ATM. Note that we quite deliberately
- * avoid checking net_device's flow control here, simply because sch_atm
- * uses its own channels, which have nothing to do with any CLIP/LANE/or
- * non-ATM interfaces.
- */
-
-static void sch_atm_dequeue(struct tasklet_struct *t)
-{
- struct atm_qdisc_data *p = from_tasklet(p, t, task);
- struct Qdisc *sch = qdisc_from_priv(p);
- struct atm_flow_data *flow;
- struct sk_buff *skb;
-
- pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
- list_for_each_entry(flow, &p->flows, list) {
- if (flow == &p->link)
- continue;
- /*
- * If traffic is properly shaped, this won't generate nasty
- * little bursts. Otherwise, it may ... (but that's okay)
- */
- while ((skb = flow->q->ops->peek(flow->q))) {
- if (!atm_may_send(flow->vcc, skb->truesize))
- break;
-
- skb = qdisc_dequeue_peeked(flow->q);
- if (unlikely(!skb))
- break;
-
- qdisc_bstats_update(sch, skb);
- bstats_update(&flow->bstats, skb);
- pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
- /* remove any LL header somebody else has attached */
- skb_pull(skb, skb_network_offset(skb));
- if (skb_headroom(skb) < flow->hdr_len) {
- struct sk_buff *new;
-
- new = skb_realloc_headroom(skb, flow->hdr_len);
- dev_kfree_skb(skb);
- if (!new)
- continue;
- skb = new;
- }
- pr_debug("sch_atm_dequeue: ip %p, data %p\n",
- skb_network_header(skb), skb->data);
- ATM_SKB(skb)->vcc = flow->vcc;
- memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
- flow->hdr_len);
- refcount_add(skb->truesize,
- &sk_atm(flow->vcc)->sk_wmem_alloc);
- /* atm.atm_options are already set by atm_tc_enqueue */
- flow->vcc->send(flow->vcc, skb);
- }
- }
-}
-
-static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct sk_buff *skb;
-
- pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
- tasklet_schedule(&p->task);
- skb = qdisc_dequeue_peeked(p->link.q);
- if (skb)
- sch->q.qlen--;
- return skb;
-}
-
-static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
-
- return p->link.q->ops->peek(p->link.q);
-}
-
-static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- int err;
-
- pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
- INIT_LIST_HEAD(&p->flows);
- INIT_LIST_HEAD(&p->link.list);
- gnet_stats_basic_sync_init(&p->link.bstats);
- list_add(&p->link.list, &p->flows);
- p->link.q = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, sch->handle, extack);
- if (!p->link.q)
- p->link.q = &noop_qdisc;
- pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
- p->link.vcc = NULL;
- p->link.sock = NULL;
- p->link.common.classid = sch->handle;
- p->link.ref = 1;
-
- err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
- extack);
- if (err)
- return err;
-
- tasklet_setup(&p->task, sch_atm_dequeue);
- return 0;
-}
-
-static void atm_tc_reset(struct Qdisc *sch)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
- list_for_each_entry(flow, &p->flows, list)
- qdisc_reset(flow->q);
-}
-
-static void atm_tc_destroy(struct Qdisc *sch)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow, *tmp;
-
- pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
- list_for_each_entry(flow, &p->flows, list) {
- tcf_block_put(flow->block);
- flow->block = NULL;
- }
-
- list_for_each_entry_safe(flow, tmp, &p->flows, list) {
- if (flow->ref > 1)
- pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
- atm_tc_put(sch, (unsigned long)flow);
- }
- tasklet_kill(&p->task);
-}
-
-static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
- struct sk_buff *skb, struct tcmsg *tcm)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
- struct nlattr *nest;
-
- pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
- sch, p, flow, skb, tcm);
- if (list_empty(&flow->list))
- return -EINVAL;
- tcm->tcm_handle = flow->common.classid;
- tcm->tcm_info = flow->q->handle;
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
-
- if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
- goto nla_put_failure;
- if (flow->vcc) {
- struct sockaddr_atmpvc pvc;
- int state;
-
- memset(&pvc, 0, sizeof(pvc));
- pvc.sap_family = AF_ATMPVC;
- pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
- pvc.sap_addr.vpi = flow->vcc->vpi;
- pvc.sap_addr.vci = flow->vcc->vci;
- if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
- goto nla_put_failure;
- state = ATM_VF2VS(flow->vcc->flags);
- if (nla_put_u32(skb, TCA_ATM_STATE, state))
- goto nla_put_failure;
- }
- if (flow->excess) {
- if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
- goto nla_put_failure;
- } else {
- if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
- goto nla_put_failure;
- }
- return nla_nest_end(skb, nest);
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-static int
-atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
- struct gnet_dump *d)
-{
- struct atm_flow_data *flow = (struct atm_flow_data *)arg;
-
- if (gnet_stats_copy_basic(d, NULL, &flow->bstats, true) < 0 ||
- gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
- return -1;
-
- return 0;
-}
-
-static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
-{
- return 0;
-}
-
-static const struct Qdisc_class_ops atm_class_ops = {
- .graft = atm_tc_graft,
- .leaf = atm_tc_leaf,
- .find = atm_tc_find,
- .change = atm_tc_change,
- .delete = atm_tc_delete,
- .walk = atm_tc_walk,
- .tcf_block = atm_tc_tcf_block,
- .bind_tcf = atm_tc_bind_filter,
- .unbind_tcf = atm_tc_put,
- .dump = atm_tc_dump_class,
- .dump_stats = atm_tc_dump_class_stats,
-};
-
-static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
- .cl_ops = &atm_class_ops,
- .id = "atm",
- .priv_size = sizeof(struct atm_qdisc_data),
- .enqueue = atm_tc_enqueue,
- .dequeue = atm_tc_dequeue,
- .peek = atm_tc_peek,
- .init = atm_tc_init,
- .reset = atm_tc_reset,
- .destroy = atm_tc_destroy,
- .dump = atm_tc_dump,
- .owner = THIS_MODULE,
-};
-
-static int __init atm_init(void)
-{
- return register_qdisc(&atm_qdisc_ops);
-}
-
-static void __exit atm_exit(void)
-{
- unregister_qdisc(&atm_qdisc_ops);
-}
-
-module_init(atm_init)
-module_exit(atm_exit)
-MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 3ed0c3342189..7970217b565a 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1209,7 +1209,7 @@ static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
iph_check->daddr != iph->daddr)
continue;
- seglen = ntohs(iph_check->tot_len) -
+ seglen = iph_totlen(skb, iph_check) -
(4 * iph_check->ihl);
} else if (iph_check->version == 6) {
ipv6h = (struct ipv6hdr *)iph;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
deleted file mode 100644
index 36db5f6782f2..000000000000
--- a/net/sched/sch_cbq.c
+++ /dev/null
@@ -1,1727 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * net/sched/sch_cbq.c Class-Based Queueing discipline.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <net/netlink.h>
-#include <net/pkt_sched.h>
-#include <net/pkt_cls.h>
-
-
-/* Class-Based Queueing (CBQ) algorithm.
- =======================================
-
- Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
- Management Models for Packet Networks",
- IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
-
- [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
-
- [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
- Parameters", 1996
-
- [4] Sally Floyd and Michael Speer, "Experimental Results
- for Class-Based Queueing", 1998, not published.
-
- -----------------------------------------------------------------------
-
- Algorithm skeleton was taken from NS simulator cbq.cc.
- If someone wants to check this code against the LBL version,
- he should take into account that ONLY the skeleton was borrowed,
- the implementation is different. Particularly:
-
- --- The WRR algorithm is different. Our version looks more
- reasonable (I hope) and works when quanta are allowed to be
- less than MTU, which is always the case when real time classes
- have small rates. Note, that the statement of [3] is
- incomplete, delay may actually be estimated even if class
- per-round allotment is less than MTU. Namely, if per-round
- allotment is W*r_i, and r_1+...+r_k = r < 1
-
- delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
-
- In the worst case we have IntServ estimate with D = W*r+k*MTU
- and C = MTU*r. The proof (if correct at all) is trivial.
-
-
- --- It seems that cbq-2.0 is not very accurate. At least, I cannot
- interpret some places, which look like wrong translations
- from NS. Anyone is advised to find these differences
- and explain to me, why I am wrong 8).
-
- --- Linux has no EOI event, so that we cannot estimate true class
- idle time. Workaround is to consider the next dequeue event
- as sign that previous packet is finished. This is wrong because of
- internal device queueing, but on a permanently loaded link it is true.
- Moreover, combined with clock integrator, this scheme looks
- very close to an ideal solution. */
-
-struct cbq_sched_data;
-
-
-struct cbq_class {
- struct Qdisc_class_common common;
- struct cbq_class *next_alive; /* next class with backlog in this priority band */
-
-/* Parameters */
- unsigned char priority; /* class priority */
- unsigned char priority2; /* priority to be used after overlimit */
- unsigned char ewma_log; /* time constant for idle time calculation */
-
- u32 defmap;
-
- /* Link-sharing scheduler parameters */
- long maxidle; /* Class parameters: see below. */
- long offtime;
- long minidle;
- u32 avpkt;
- struct qdisc_rate_table *R_tab;
-
- /* General scheduler (WRR) parameters */
- long allot;
- long quantum; /* Allotment per WRR round */
- long weight; /* Relative allotment: see below */
-
- struct Qdisc *qdisc; /* Ptr to CBQ discipline */
- struct cbq_class *split; /* Ptr to split node */
- struct cbq_class *share; /* Ptr to LS parent in the class tree */
- struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
- struct cbq_class *borrow; /* NULL if class is bandwidth limited;
- parent otherwise */
- struct cbq_class *sibling; /* Sibling chain */
- struct cbq_class *children; /* Pointer to children chain */
-
- struct Qdisc *q; /* Elementary queueing discipline */
-
-
-/* Variables */
- unsigned char cpriority; /* Effective priority */
- unsigned char delayed;
- unsigned char level; /* level of the class in hierarchy:
- 0 for leaf classes, and maximal
- level of children + 1 for nodes.
- */
-
- psched_time_t last; /* Last end of service */
- psched_time_t undertime;
- long avgidle;
- long deficit; /* Saved deficit for WRR */
- psched_time_t penalized;
- struct gnet_stats_basic_sync bstats;
- struct gnet_stats_queue qstats;
- struct net_rate_estimator __rcu *rate_est;
- struct tc_cbq_xstats xstats;
-
- struct tcf_proto __rcu *filter_list;
- struct tcf_block *block;
-
- int filters;
-
- struct cbq_class *defaults[TC_PRIO_MAX + 1];
-};
-
-struct cbq_sched_data {
- struct Qdisc_class_hash clhash; /* Hash table of all classes */
- int nclasses[TC_CBQ_MAXPRIO + 1];
- unsigned int quanta[TC_CBQ_MAXPRIO + 1];
-
- struct cbq_class link;
-
- unsigned int activemask;
- struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
- with backlog */
-
-#ifdef CONFIG_NET_CLS_ACT
- struct cbq_class *rx_class;
-#endif
- struct cbq_class *tx_class;
- struct cbq_class *tx_borrowed;
- int tx_len;
- psched_time_t now; /* Cached timestamp */
- unsigned int pmask;
-
- struct qdisc_watchdog watchdog; /* Watchdog timer,
- started when CBQ has
- backlog, but cannot
- transmit just now */
- psched_tdiff_t wd_expires;
- int toplevel;
- u32 hgenerator;
-};
-
-
-#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
-
-static inline struct cbq_class *
-cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
-{
- struct Qdisc_class_common *clc;
-
- clc = qdisc_class_find(&q->clhash, classid);
- if (clc == NULL)
- return NULL;
- return container_of(clc, struct cbq_class, common);
-}
-
-#ifdef CONFIG_NET_CLS_ACT
-
-static struct cbq_class *
-cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
-{
- struct cbq_class *cl;
-
- for (cl = this->tparent; cl; cl = cl->tparent) {
- struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
-
- if (new != NULL && new != this)
- return new;
- }
- return NULL;
-}
-
-#endif
-
-/* Classify packet. The procedure is pretty complicated, but
- * it allows us to combine link sharing and priority scheduling
- * transparently.
- *
- * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
- * so that it resolves to split nodes. Then packets are classified
- * by logical priority, or a more specific classifier may be attached
- * to the split node.
- */
-
-static struct cbq_class *
-cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *head = &q->link;
- struct cbq_class **defmap;
- struct cbq_class *cl = NULL;
- u32 prio = skb->priority;
- struct tcf_proto *fl;
- struct tcf_result res;
-
- /*
- * Step 1. If skb->priority points to one of our classes, use it.
- */
- if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
- (cl = cbq_class_lookup(q, prio)) != NULL)
- return cl;
-
- *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- for (;;) {
- int result = 0;
- defmap = head->defaults;
-
- fl = rcu_dereference_bh(head->filter_list);
- /*
- * Step 2+n. Apply classifier.
- */
- result = tcf_classify(skb, NULL, fl, &res, true);
- if (!fl || result < 0)
- goto fallback;
- if (result == TC_ACT_SHOT)
- return NULL;
-
- cl = (void *)res.class;
- if (!cl) {
- if (TC_H_MAJ(res.classid))
- cl = cbq_class_lookup(q, res.classid);
- else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
- cl = defmap[TC_PRIO_BESTEFFORT];
-
- if (cl == NULL)
- goto fallback;
- }
- if (cl->level >= head->level)
- goto fallback;
-#ifdef CONFIG_NET_CLS_ACT
- switch (result) {
- case TC_ACT_QUEUED:
- case TC_ACT_STOLEN:
- case TC_ACT_TRAP:
- *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
- fallthrough;
- case TC_ACT_RECLASSIFY:
- return cbq_reclassify(skb, cl);
- }
-#endif
- if (cl->level == 0)
- return cl;
-
- /*
- * Step 3+n. If classifier selected a link sharing class,
- * apply agency specific classifier.
- * Repeat this procedure until we hit a leaf node.
- */
- head = cl;
- }
-
-fallback:
- cl = head;
-
- /*
- * Step 4. No success...
- */
- if (TC_H_MAJ(prio) == 0 &&
- !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
- !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
- return head;
-
- return cl;
-}
-
-/*
- * A packet has just been enqueued on the empty class.
- * cbq_activate_class adds it to the tail of active class list
- * of its priority band.
- */
-
-static inline void cbq_activate_class(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- int prio = cl->cpriority;
- struct cbq_class *cl_tail;
-
- cl_tail = q->active[prio];
- q->active[prio] = cl;
-
- if (cl_tail != NULL) {
- cl->next_alive = cl_tail->next_alive;
- cl_tail->next_alive = cl;
- } else {
- cl->next_alive = cl;
- q->activemask |= (1<<prio);
- }
-}
-
-/*
- * Unlink class from active chain.
- * Note that this same procedure is done directly in cbq_dequeue*
- * during round-robin procedure.
- */
-
-static void cbq_deactivate_class(struct cbq_class *this)
-{
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
- int prio = this->cpriority;
- struct cbq_class *cl;
- struct cbq_class *cl_prev = q->active[prio];
-
- do {
- cl = cl_prev->next_alive;
- if (cl == this) {
- cl_prev->next_alive = cl->next_alive;
- cl->next_alive = NULL;
-
- if (cl == q->active[prio]) {
- q->active[prio] = cl_prev;
- if (cl == q->active[prio]) {
- q->active[prio] = NULL;
- q->activemask &= ~(1<<prio);
- return;
- }
- }
- return;
- }
- } while ((cl_prev = cl) != q->active[prio]);
-}
-
-static void
-cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
-{
- int toplevel = q->toplevel;
-
- if (toplevel > cl->level) {
- psched_time_t now = psched_get_time();
-
- do {
- if (cl->undertime < now) {
- q->toplevel = cl->level;
- return;
- }
- } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
- }
-}
-
-static int
-cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- int ret;
- struct cbq_class *cl = cbq_classify(skb, sch, &ret);
-
-#ifdef CONFIG_NET_CLS_ACT
- q->rx_class = cl;
-#endif
- if (cl == NULL) {
- if (ret & __NET_XMIT_BYPASS)
- qdisc_qstats_drop(sch);
- __qdisc_drop(skb, to_free);
- return ret;
- }
-
- ret = qdisc_enqueue(skb, cl->q, to_free);
- if (ret == NET_XMIT_SUCCESS) {
- sch->q.qlen++;
- cbq_mark_toplevel(q, cl);
- if (!cl->next_alive)
- cbq_activate_class(cl);
- return ret;
- }
-
- if (net_xmit_drop_count(ret)) {
- qdisc_qstats_drop(sch);
- cbq_mark_toplevel(q, cl);
- cl->qstats.drops++;
- }
- return ret;
-}
-
-/* Overlimit action: penalize leaf class by adding offtime */
-static void cbq_overlimit(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- psched_tdiff_t delay = cl->undertime - q->now;
-
- if (!cl->delayed) {
- delay += cl->offtime;
-
- /*
- * Class goes to sleep, so that it will have no
- * chance to work avgidle. Let's forgive it 8)
- *
- * BTW cbq-2.0 has a crap in this
- * place, apparently they forgot to shift it by cl->ewma_log.
- */
- if (cl->avgidle < 0)
- delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
- if (cl->avgidle < cl->minidle)
- cl->avgidle = cl->minidle;
- if (delay <= 0)
- delay = 1;
- cl->undertime = q->now + delay;
-
- cl->xstats.overactions++;
- cl->delayed = 1;
- }
- if (q->wd_expires == 0 || q->wd_expires > delay)
- q->wd_expires = delay;
-
- /* Dirty work! We must schedule wakeups based on
- * real available rate, rather than leaf rate,
- * which may be tiny (even zero).
- */
- if (q->toplevel == TC_CBQ_MAXLEVEL) {
- struct cbq_class *b;
- psched_tdiff_t base_delay = q->wd_expires;
-
- for (b = cl->borrow; b; b = b->borrow) {
- delay = b->undertime - q->now;
- if (delay < base_delay) {
- if (delay <= 0)
- delay = 1;
- base_delay = delay;
- }
- }
-
- q->wd_expires = base_delay;
- }
-}
-
-/*
- * It is mission critical procedure.
- *
- * We "regenerate" toplevel cutoff, if transmitting class
- * has backlog and it is not regulated. It is not part of
- * original CBQ description, but looks more reasonable.
- * Probably, it is wrong. This question needs further investigation.
- */
-
-static inline void
-cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
- struct cbq_class *borrowed)
-{
- if (cl && q->toplevel >= borrowed->level) {
- if (cl->q->q.qlen > 1) {
- do {
- if (borrowed->undertime == PSCHED_PASTPERFECT) {
- q->toplevel = borrowed->level;
- return;
- }
- } while ((borrowed = borrowed->borrow) != NULL);
- }
-#if 0
- /* It is not necessary now. Uncommenting it
- will save CPU cycles, but decrease fairness.
- */
- q->toplevel = TC_CBQ_MAXLEVEL;
-#endif
- }
-}
-
-static void
-cbq_update(struct cbq_sched_data *q)
-{
- struct cbq_class *this = q->tx_class;
- struct cbq_class *cl = this;
- int len = q->tx_len;
- psched_time_t now;
-
- q->tx_class = NULL;
- /* Time integrator. We calculate EOS time
- * by adding expected packet transmission time.
- */
- now = q->now + L2T(&q->link, len);
-
- for ( ; cl; cl = cl->share) {
- long avgidle = cl->avgidle;
- long idle;
-
- _bstats_update(&cl->bstats, len, 1);
-
- /*
- * (now - last) is total time between packet right edges.
- * (last_pktlen/rate) is "virtual" busy time, so that
- *
- * idle = (now - last) - last_pktlen/rate
- */
-
- idle = now - cl->last;
- if ((unsigned long)idle > 128*1024*1024) {
- avgidle = cl->maxidle;
- } else {
- idle -= L2T(cl, len);
-
- /* true_avgidle := (1-W)*true_avgidle + W*idle,
- * where W=2^{-ewma_log}. But cl->avgidle is scaled:
- * cl->avgidle == true_avgidle/W,
- * hence:
- */
- avgidle += idle - (avgidle>>cl->ewma_log);
- }
-
- if (avgidle <= 0) {
- /* Overlimit or at-limit */
-
- if (avgidle < cl->minidle)
- avgidle = cl->minidle;
-
- cl->avgidle = avgidle;
-
- /* Calculate expected time, when this class
- * will be allowed to send.
- * It will occur, when:
- * (1-W)*true_avgidle + W*delay = 0, i.e.
- * idle = (1/W - 1)*(-true_avgidle)
- * or
- * idle = (1 - W)*(-cl->avgidle);
- */
- idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
-
- /*
- * That is not all.
- * To maintain the rate allocated to the class,
- * we add to undertime virtual clock,
- * necessary to complete transmitted packet.
- * (len/phys_bandwidth has been already passed
- * to the moment of cbq_update)
- */
-
- idle -= L2T(&q->link, len);
- idle += L2T(cl, len);
-
- cl->undertime = now + idle;
- } else {
- /* Underlimit */
-
- cl->undertime = PSCHED_PASTPERFECT;
- if (avgidle > cl->maxidle)
- cl->avgidle = cl->maxidle;
- else
- cl->avgidle = avgidle;
- }
- if ((s64)(now - cl->last) > 0)
- cl->last = now;
- }
-
- cbq_update_toplevel(q, this, q->tx_borrowed);
-}
-
-static inline struct cbq_class *
-cbq_under_limit(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- struct cbq_class *this_cl = cl;
-
- if (cl->tparent == NULL)
- return cl;
-
- if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
- cl->delayed = 0;
- return cl;
- }
-
- do {
- /* It is very suspicious place. Now overlimit
- * action is generated for not bounded classes
- * only if link is completely congested.
- * Though it is in agree with ancestor-only paradigm,
- * it looks very stupid. Particularly,
- * it means that this chunk of code will either
- * never be called or result in strong amplification
- * of burstiness. Dangerous, silly, and, however,
- * no another solution exists.
- */
- cl = cl->borrow;
- if (!cl) {
- this_cl->qstats.overlimits++;
- cbq_overlimit(this_cl);
- return NULL;
- }
- if (cl->level > q->toplevel)
- return NULL;
- } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
-
- cl->delayed = 0;
- return cl;
-}
-
-static inline struct sk_buff *
-cbq_dequeue_prio(struct Qdisc *sch, int prio)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl_tail, *cl_prev, *cl;
- struct sk_buff *skb;
- int deficit;
-
- cl_tail = cl_prev = q->active[prio];
- cl = cl_prev->next_alive;
-
- do {
- deficit = 0;
-
- /* Start round */
- do {
- struct cbq_class *borrow = cl;
-
- if (cl->q->q.qlen &&
- (borrow = cbq_under_limit(cl)) == NULL)
- goto skip_class;
-
- if (cl->deficit <= 0) {
- /* Class exhausted its allotment per
- * this round. Switch to the next one.
- */
- deficit = 1;
- cl->deficit += cl->quantum;
- goto next_class;
- }
-
- skb = cl->q->dequeue(cl->q);
-
- /* Class did not give us any skb :-(
- * It could occur even if cl->q->q.qlen != 0
- * f.e. if cl->q == "tbf"
- */
- if (skb == NULL)
- goto skip_class;
-
- cl->deficit -= qdisc_pkt_len(skb);
- q->tx_class = cl;
- q->tx_borrowed = borrow;
- if (borrow != cl) {
-#ifndef CBQ_XSTATS_BORROWS_BYTES
- borrow->xstats.borrows++;
- cl->xstats.borrows++;
-#else
- borrow->xstats.borrows += qdisc_pkt_len(skb);
- cl->xstats.borrows += qdisc_pkt_len(skb);
-#endif
- }
- q->tx_len = qdisc_pkt_len(skb);
-
- if (cl->deficit <= 0) {
- q->active[prio] = cl;
- cl = cl->next_alive;
- cl->deficit += cl->quantum;
- }
- return skb;
-
-skip_class:
- if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
- /* Class is empty or penalized.
- * Unlink it from active chain.
- */
- cl_prev->next_alive = cl->next_alive;
- cl->next_alive = NULL;
-
- /* Did cl_tail point to it? */
- if (cl == cl_tail) {
- /* Repair it! */
- cl_tail = cl_prev;
-
- /* Was it the last class in this band? */
- if (cl == cl_tail) {
- /* Kill the band! */
- q->active[prio] = NULL;
- q->activemask &= ~(1<<prio);
- if (cl->q->q.qlen)
- cbq_activate_class(cl);
- return NULL;
- }
-
- q->active[prio] = cl_tail;
- }
- if (cl->q->q.qlen)
- cbq_activate_class(cl);
-
- cl = cl_prev;
- }
-
-next_class:
- cl_prev = cl;
- cl = cl->next_alive;
- } while (cl_prev != cl_tail);
- } while (deficit);
-
- q->active[prio] = cl_prev;
-
- return NULL;
-}
-
-static inline struct sk_buff *
-cbq_dequeue_1(struct Qdisc *sch)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct sk_buff *skb;
- unsigned int activemask;
-
- activemask = q->activemask & 0xFF;
- while (activemask) {
- int prio = ffz(~activemask);
- activemask &= ~(1<<prio);
- skb = cbq_dequeue_prio(sch, prio);
- if (skb)
- return skb;
- }
- return NULL;
-}
-
-static struct sk_buff *
-cbq_dequeue(struct Qdisc *sch)
-{
- struct sk_buff *skb;
- struct cbq_sched_data *q = qdisc_priv(sch);
- psched_time_t now;
-
- now = psched_get_time();
-
- if (q->tx_class)
- cbq_update(q);
-
- q->now = now;
-
- for (;;) {
- q->wd_expires = 0;
-
- skb = cbq_dequeue_1(sch);
- if (skb) {
- qdisc_bstats_update(sch, skb);
- sch->q.qlen--;
- return skb;
- }
-
- /* All the classes are overlimit.
- *
- * It is possible, if:
- *
- * 1. Scheduler is empty.
- * 2. Toplevel cutoff inhibited borrowing.
- * 3. Root class is overlimit.
- *
- * Reset 2d and 3d conditions and retry.
- *
- * Note, that NS and cbq-2.0 are buggy, peeking
- * an arbitrary class is appropriate for ancestor-only
- * sharing, but not for toplevel algorithm.
- *
- * Our version is better, but slower, because it requires
- * two passes, but it is unavoidable with top-level sharing.
- */
-
- if (q->toplevel == TC_CBQ_MAXLEVEL &&
- q->link.undertime == PSCHED_PASTPERFECT)
- break;
-
- q->toplevel = TC_CBQ_MAXLEVEL;
- q->link.undertime = PSCHED_PASTPERFECT;
- }
-
- /* No packets in scheduler or nobody wants to give them to us :-(
- * Sigh... start watchdog timer in the last case.
- */
-
- if (sch->q.qlen) {
- qdisc_qstats_overlimit(sch);
- if (q->wd_expires)
- qdisc_watchdog_schedule(&q->watchdog,
- now + q->wd_expires);
- }
- return NULL;
-}
-
-/* CBQ class maintenance routines */
-
-static void cbq_adjust_levels(struct cbq_class *this)
-{
- if (this == NULL)
- return;
-
- do {
- int level = 0;
- struct cbq_class *cl;
-
- cl = this->children;
- if (cl) {
- do {
- if (cl->level > level)
- level = cl->level;
- } while ((cl = cl->sibling) != this->children);
- }
- this->level = level + 1;
- } while ((this = this->tparent) != NULL);
-}
-
-static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
-{
- struct cbq_class *cl;
- unsigned int h;
-
- if (q->quanta[prio] == 0)
- return;
-
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
- /* BUGGGG... Beware! This expression suffer of
- * arithmetic overflows!
- */
- if (cl->priority == prio) {
- cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
- q->quanta[prio];
- }
- if (cl->quantum <= 0 ||
- cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
- pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
- cl->common.classid, cl->quantum);
- cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
- }
- }
- }
-}
-
-static void cbq_sync_defmap(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- struct cbq_class *split = cl->split;
- unsigned int h;
- int i;
-
- if (split == NULL)
- return;
-
- for (i = 0; i <= TC_PRIO_MAX; i++) {
- if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
- split->defaults[i] = NULL;
- }
-
- for (i = 0; i <= TC_PRIO_MAX; i++) {
- int level = split->level;
-
- if (split->defaults[i])
- continue;
-
- for (h = 0; h < q->clhash.hashsize; h++) {
- struct cbq_class *c;
-
- hlist_for_each_entry(c, &q->clhash.hash[h],
- common.hnode) {
- if (c->split == split && c->level < level &&
- c->defmap & (1<<i)) {
- split->defaults[i] = c;
- level = c->level;
- }
- }
- }
- }
-}
-
-static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
-{
- struct cbq_class *split = NULL;
-
- if (splitid == 0) {
- split = cl->split;
- if (!split)
- return;
- splitid = split->common.classid;
- }
-
- if (split == NULL || split->common.classid != splitid) {
- for (split = cl->tparent; split; split = split->tparent)
- if (split->common.classid == splitid)
- break;
- }
-
- if (split == NULL)
- return;
-
- if (cl->split != split) {
- cl->defmap = 0;
- cbq_sync_defmap(cl);
- cl->split = split;
- cl->defmap = def & mask;
- } else
- cl->defmap = (cl->defmap & ~mask) | (def & mask);
-
- cbq_sync_defmap(cl);
-}
-
-static void cbq_unlink_class(struct cbq_class *this)
-{
- struct cbq_class *cl, **clp;
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
-
- qdisc_class_hash_remove(&q->clhash, &this->common);
-
- if (this->tparent) {
- clp = &this->sibling;
- cl = *clp;
- do {
- if (cl == this) {
- *clp = cl->sibling;
- break;
- }
- clp = &cl->sibling;
- } while ((cl = *clp) != this->sibling);
-
- if (this->tparent->children == this) {
- this->tparent->children = this->sibling;
- if (this->sibling == this)
- this->tparent->children = NULL;
- }
- } else {
- WARN_ON(this->sibling != this);
- }
-}
-
-static void cbq_link_class(struct cbq_class *this)
-{
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
- struct cbq_class *parent = this->tparent;
-
- this->sibling = this;
- qdisc_class_hash_insert(&q->clhash, &this->common);
-
- if (parent == NULL)
- return;
-
- if (parent->children == NULL) {
- parent->children = this;
- } else {
- this->sibling = parent->children->sibling;
- parent->children->sibling = this;
- }
-}
-
-static void
-cbq_reset(struct Qdisc *sch)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl;
- int prio;
- unsigned int h;
-
- q->activemask = 0;
- q->pmask = 0;
- q->tx_class = NULL;
- q->tx_borrowed = NULL;
- qdisc_watchdog_cancel(&q->watchdog);
- q->toplevel = TC_CBQ_MAXLEVEL;
- q->now = psched_get_time();
-
- for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
- q->active[prio] = NULL;
-
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
- qdisc_reset(cl->q);
-
- cl->next_alive = NULL;
- cl->undertime = PSCHED_PASTPERFECT;
- cl->avgidle = cl->maxidle;
- cl->deficit = cl->quantum;
- cl->cpriority = cl->priority;
- }
- }
-}
-
-
-static void cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
-{
- if (lss->change & TCF_CBQ_LSS_FLAGS) {
- cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
- cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
- }
- if (lss->change & TCF_CBQ_LSS_EWMA)
- cl->ewma_log = lss->ewma_log;
- if (lss->change & TCF_CBQ_LSS_AVPKT)
- cl->avpkt = lss->avpkt;
- if (lss->change & TCF_CBQ_LSS_MINIDLE)
- cl->minidle = -(long)lss->minidle;
- if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
- cl->maxidle = lss->maxidle;
- cl->avgidle = lss->maxidle;
- }
- if (lss->change & TCF_CBQ_LSS_OFFTIME)
- cl->offtime = lss->offtime;
-}
-
-static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
-{
- q->nclasses[cl->priority]--;
- q->quanta[cl->priority] -= cl->weight;
- cbq_normalize_quanta(q, cl->priority);
-}
-
-static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
-{
- q->nclasses[cl->priority]++;
- q->quanta[cl->priority] += cl->weight;
- cbq_normalize_quanta(q, cl->priority);
-}
-
-static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
- if (wrr->allot)
- cl->allot = wrr->allot;
- if (wrr->weight)
- cl->weight = wrr->weight;
- if (wrr->priority) {
- cl->priority = wrr->priority - 1;
- cl->cpriority = cl->priority;
- if (cl->priority >= cl->priority2)
- cl->priority2 = TC_CBQ_MAXPRIO - 1;
- }
-
- cbq_addprio(q, cl);
- return 0;
-}
-
-static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
-{
- cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
- return 0;
-}
-
-static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
- [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
- [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
- [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
- [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
- [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
- [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
- [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
-};
-
-static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
- struct nlattr *opt,
- struct netlink_ext_ack *extack)
-{
- int err;
-
- if (!opt) {
- NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
- return -EINVAL;
- }
-
- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
- cbq_policy, extack);
- if (err < 0)
- return err;
-
- if (tb[TCA_CBQ_WRROPT]) {
- const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
-
- if (wrr->priority > TC_CBQ_MAXPRIO) {
- NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
- err = -EINVAL;
- }
- }
- return err;
-}
-
-static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct nlattr *tb[TCA_CBQ_MAX + 1];
- struct tc_ratespec *r;
- int err;
-
- qdisc_watchdog_init(&q->watchdog, sch);
-
- err = cbq_opt_parse(tb, opt, extack);
- if (err < 0)
- return err;
-
- if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
- NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
- return -EINVAL;
- }
-
- r = nla_data(tb[TCA_CBQ_RATE]);
-
- q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
- if (!q->link.R_tab)
- return -EINVAL;
-
- err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
- if (err)
- goto put_rtab;
-
- err = qdisc_class_hash_init(&q->clhash);
- if (err < 0)
- goto put_block;
-
- q->link.sibling = &q->link;
- q->link.common.classid = sch->handle;
- q->link.qdisc = sch;
- q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- sch->handle, NULL);
- if (!q->link.q)
- q->link.q = &noop_qdisc;
- else
- qdisc_hash_add(q->link.q, true);
-
- q->link.priority = TC_CBQ_MAXPRIO - 1;
- q->link.priority2 = TC_CBQ_MAXPRIO - 1;
- q->link.cpriority = TC_CBQ_MAXPRIO - 1;
- q->link.allot = psched_mtu(qdisc_dev(sch));
- q->link.quantum = q->link.allot;
- q->link.weight = q->link.R_tab->rate.rate;
-
- q->link.ewma_log = TC_CBQ_DEF_EWMA;
- q->link.avpkt = q->link.allot/2;
- q->link.minidle = -0x7FFFFFFF;
-
- q->toplevel = TC_CBQ_MAXLEVEL;
- q->now = psched_get_time();
-
- cbq_link_class(&q->link);
-
- if (tb[TCA_CBQ_LSSOPT])
- cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
-
- cbq_addprio(q, &q->link);
- return 0;
-
-put_block:
- tcf_block_put(q->link.block);
-
-put_rtab:
- qdisc_put_rtab(q->link.R_tab);
- return err;
-}
-
-static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
-
- if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
- goto nla_put_failure;
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
-static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cbq_lssopt opt;
-
- opt.flags = 0;
- if (cl->borrow == NULL)
- opt.flags |= TCF_CBQ_LSS_BOUNDED;
- if (cl->share == NULL)
- opt.flags |= TCF_CBQ_LSS_ISOLATED;
- opt.ewma_log = cl->ewma_log;
- opt.level = cl->level;
- opt.avpkt = cl->avpkt;
- opt.maxidle = cl->maxidle;
- opt.minidle = (u32)(-cl->minidle);
- opt.offtime = cl->offtime;
- opt.change = ~0;
- if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
- goto nla_put_failure;
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
-static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cbq_wrropt opt;
-
- memset(&opt, 0, sizeof(opt));
- opt.flags = 0;
- opt.allot = cl->allot;
- opt.priority = cl->priority + 1;
- opt.cpriority = cl->cpriority + 1;
- opt.weight = cl->weight;
- if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
- goto nla_put_failure;
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
-static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cbq_fopt opt;
-
- if (cl->split || cl->defmap) {
- opt.split = cl->split ? cl->split->common.classid : 0;
- opt.defmap = cl->defmap;
- opt.defchange = ~0;
- if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
- goto nla_put_failure;
- }
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
-static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
-{
- if (cbq_dump_lss(skb, cl) < 0 ||
- cbq_dump_rate(skb, cl) < 0 ||
- cbq_dump_wrr(skb, cl) < 0 ||
- cbq_dump_fopt(skb, cl) < 0)
- return -1;
- return 0;
-}
-
-static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct nlattr *nest;
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
- if (cbq_dump_attr(skb, &q->link) < 0)
- goto nla_put_failure;
- return nla_nest_end(skb, nest);
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-
-static int
-cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
-
- q->link.xstats.avgidle = q->link.avgidle;
- return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
-}
-
-static int
-cbq_dump_class(struct Qdisc *sch, unsigned long arg,
- struct sk_buff *skb, struct tcmsg *tcm)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
- struct nlattr *nest;
-
- if (cl->tparent)
- tcm->tcm_parent = cl->tparent->common.classid;
- else
- tcm->tcm_parent = TC_H_ROOT;
- tcm->tcm_handle = cl->common.classid;
- tcm->tcm_info = cl->q->handle;
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
- if (cbq_dump_attr(skb, cl) < 0)
- goto nla_put_failure;
- return nla_nest_end(skb, nest);
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-
-static int
-cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
- struct gnet_dump *d)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class *)arg;
- __u32 qlen;
-
- cl->xstats.avgidle = cl->avgidle;
- cl->xstats.undertime = 0;
- qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
-
- if (cl->undertime != PSCHED_PASTPERFECT)
- cl->xstats.undertime = cl->undertime - q->now;
-
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
- return -1;
-
- return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
-}
-
-static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old, struct netlink_ext_ack *extack)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- if (new == NULL) {
- new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- cl->common.classid, extack);
- if (new == NULL)
- return -ENOBUFS;
- }
-
- *old = qdisc_replace(sch, new, &cl->q);
- return 0;
-}
-
-static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- return cl->q;
-}
-
-static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- cbq_deactivate_class(cl);
-}
-
-static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
-
- return (unsigned long)cbq_class_lookup(q, classid);
-}
-
-static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
-
- WARN_ON(cl->filters);
-
- tcf_block_put(cl->block);
- qdisc_put(cl->q);
- qdisc_put_rtab(cl->R_tab);
- gen_kill_estimator(&cl->rate_est);
- if (cl != &q->link)
- kfree(cl);
-}
-
-static void cbq_destroy(struct Qdisc *sch)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct hlist_node *next;
- struct cbq_class *cl;
- unsigned int h;
-
-#ifdef CONFIG_NET_CLS_ACT
- q->rx_class = NULL;
-#endif
- /*
- * Filters must be destroyed first because we don't destroy the
- * classes from root to leafs which means that filters can still
- * be bound to classes which have been destroyed already. --TGR '04
- */
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
- tcf_block_put(cl->block);
- cl->block = NULL;
- }
- }
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
- common.hnode)
- cbq_destroy_class(sch, cl);
- }
- qdisc_class_hash_destroy(&q->clhash);
-}
-
-static int
-cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
- unsigned long *arg, struct netlink_ext_ack *extack)
-{
- int err;
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class *)*arg;
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_CBQ_MAX + 1];
- struct cbq_class *parent;
- struct qdisc_rate_table *rtab = NULL;
-
- err = cbq_opt_parse(tb, opt, extack);
- if (err < 0)
- return err;
-
- if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
- NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
- return -EOPNOTSUPP;
- }
-
- if (cl) {
- /* Check parent */
- if (parentid) {
- if (cl->tparent &&
- cl->tparent->common.classid != parentid) {
- NL_SET_ERR_MSG(extack, "Invalid parent id");
- return -EINVAL;
- }
- if (!cl->tparent && parentid != TC_H_ROOT) {
- NL_SET_ERR_MSG(extack, "Parent must be root");
- return -EINVAL;
- }
- }
-
- if (tb[TCA_CBQ_RATE]) {
- rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
- tb[TCA_CBQ_RTAB], extack);
- if (rtab == NULL)
- return -EINVAL;
- }
-
- if (tca[TCA_RATE]) {
- err = gen_replace_estimator(&cl->bstats, NULL,
- &cl->rate_est,
- NULL,
- true,
- tca[TCA_RATE]);
- if (err) {
- NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
- qdisc_put_rtab(rtab);
- return err;
- }
- }
-
- /* Change class parameters */
- sch_tree_lock(sch);
-
- if (cl->next_alive != NULL)
- cbq_deactivate_class(cl);
-
- if (rtab) {
- qdisc_put_rtab(cl->R_tab);
- cl->R_tab = rtab;
- }
-
- if (tb[TCA_CBQ_LSSOPT])
- cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
-
- if (tb[TCA_CBQ_WRROPT]) {
- cbq_rmprio(q, cl);
- cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
- }
-
- if (tb[TCA_CBQ_FOPT])
- cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
-
- if (cl->q->q.qlen)
- cbq_activate_class(cl);
-
- sch_tree_unlock(sch);
-
- return 0;
- }
-
- if (parentid == TC_H_ROOT)
- return -EINVAL;
-
- if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
- NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
- return -EINVAL;
- }
-
- rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
- extack);
- if (rtab == NULL)
- return -EINVAL;
-
- if (classid) {
- err = -EINVAL;
- if (TC_H_MAJ(classid ^ sch->handle) ||
- cbq_class_lookup(q, classid)) {
- NL_SET_ERR_MSG(extack, "Specified class not found");
- goto failure;
- }
- } else {
- int i;
- classid = TC_H_MAKE(sch->handle, 0x8000);
-
- for (i = 0; i < 0x8000; i++) {
- if (++q->hgenerator >= 0x8000)
- q->hgenerator = 1;
- if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
- break;
- }
- err = -ENOSR;
- if (i >= 0x8000) {
- NL_SET_ERR_MSG(extack, "Unable to generate classid");
- goto failure;
- }
- classid = classid|q->hgenerator;
- }
-
- parent = &q->link;
- if (parentid) {
- parent = cbq_class_lookup(q, parentid);
- err = -EINVAL;
- if (!parent) {
- NL_SET_ERR_MSG(extack, "Failed to find parentid");
- goto failure;
- }
- }
-
- err = -ENOBUFS;
- cl = kzalloc(sizeof(*cl), GFP_KERNEL);
- if (cl == NULL)
- goto failure;
-
- gnet_stats_basic_sync_init(&cl->bstats);
- err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
- if (err) {
- kfree(cl);
- goto failure;
- }
-
- if (tca[TCA_RATE]) {
- err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
- NULL, true, tca[TCA_RATE]);
- if (err) {
- NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
- tcf_block_put(cl->block);
- kfree(cl);
- goto failure;
- }
- }
-
- cl->R_tab = rtab;
- rtab = NULL;
- cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
- NULL);
- if (!cl->q)
- cl->q = &noop_qdisc;
- else
- qdisc_hash_add(cl->q, true);
-
- cl->common.classid = classid;
- cl->tparent = parent;
- cl->qdisc = sch;
- cl->allot = parent->allot;
- cl->quantum = cl->allot;
- cl->weight = cl->R_tab->rate.rate;
-
- sch_tree_lock(sch);
- cbq_link_class(cl);
- cl->borrow = cl->tparent;
- if (cl->tparent != &q->link)
- cl->share = cl->tparent;
- cbq_adjust_levels(parent);
- cl->minidle = -0x7FFFFFFF;
- cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
- cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
- if (cl->ewma_log == 0)
- cl->ewma_log = q->link.ewma_log;
- if (cl->maxidle == 0)
- cl->maxidle = q->link.maxidle;
- if (cl->avpkt == 0)
- cl->avpkt = q->link.avpkt;
- if (tb[TCA_CBQ_FOPT])
- cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
- sch_tree_unlock(sch);
-
- qdisc_class_hash_grow(sch, &q->clhash);
-
- *arg = (unsigned long)cl;
- return 0;
-
-failure:
- qdisc_put_rtab(rtab);
- return err;
-}
-
-static int cbq_delete(struct Qdisc *sch, unsigned long arg,
- struct netlink_ext_ack *extack)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- if (cl->filters || cl->children || cl == &q->link)
- return -EBUSY;
-
- sch_tree_lock(sch);
-
- qdisc_purge_queue(cl->q);
-
- if (cl->next_alive)
- cbq_deactivate_class(cl);
-
- if (q->tx_borrowed == cl)
- q->tx_borrowed = q->tx_class;
- if (q->tx_class == cl) {
- q->tx_class = NULL;
- q->tx_borrowed = NULL;
- }
-#ifdef CONFIG_NET_CLS_ACT
- if (q->rx_class == cl)
- q->rx_class = NULL;
-#endif
-
- cbq_unlink_class(cl);
- cbq_adjust_levels(cl->tparent);
- cl->defmap = 0;
- cbq_sync_defmap(cl);
-
- cbq_rmprio(q, cl);
- sch_tree_unlock(sch);
-
- cbq_destroy_class(sch, cl);
- return 0;
-}
-
-static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
- struct netlink_ext_ack *extack)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- if (cl == NULL)
- cl = &q->link;
-
- return cl->block;
-}
-
-static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
- u32 classid)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *p = (struct cbq_class *)parent;
- struct cbq_class *cl = cbq_class_lookup(q, classid);
-
- if (cl) {
- if (p && p->level <= cl->level)
- return 0;
- cl->filters++;
- return (unsigned long)cl;
- }
- return 0;
-}
-
-static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- cl->filters--;
-}
-
-static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl;
- unsigned int h;
-
- if (arg->stop)
- return;
-
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
- if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
- return;
- }
- }
-}
-
-static const struct Qdisc_class_ops cbq_class_ops = {
- .graft = cbq_graft,
- .leaf = cbq_leaf,
- .qlen_notify = cbq_qlen_notify,
- .find = cbq_find,
- .change = cbq_change_class,
- .delete = cbq_delete,
- .walk = cbq_walk,
- .tcf_block = cbq_tcf_block,
- .bind_tcf = cbq_bind_filter,
- .unbind_tcf = cbq_unbind_filter,
- .dump = cbq_dump_class,
- .dump_stats = cbq_dump_class_stats,
-};
-
-static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
- .next = NULL,
- .cl_ops = &cbq_class_ops,
- .id = "cbq",
- .priv_size = sizeof(struct cbq_sched_data),
- .enqueue = cbq_enqueue,
- .dequeue = cbq_dequeue,
- .peek = qdisc_peek_dequeued,
- .init = cbq_init,
- .reset = cbq_reset,
- .destroy = cbq_destroy,
- .change = NULL,
- .dump = cbq_dump,
- .dump_stats = cbq_dump_stats,
- .owner = THIS_MODULE,
-};
-
-static int __init cbq_module_init(void)
-{
- return register_qdisc(&cbq_qdisc_ops);
-}
-static void __exit cbq_module_exit(void)
-{
- unregister_qdisc(&cbq_qdisc_ops);
-}
-module_init(cbq_module_init)
-module_exit(cbq_module_exit)
-MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
deleted file mode 100644
index 401ffaf87d62..000000000000
--- a/net/sched/sch_dsmark.c
+++ /dev/null
@@ -1,518 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* net/sched/sch_dsmark.c - Differentiated Services field marker */
-
-/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
-
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/bitops.h>
-#include <net/pkt_sched.h>
-#include <net/pkt_cls.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <asm/byteorder.h>
-
-/*
- * classid class marking
- * ------- ----- -------
- * n/a 0 n/a
- * x:0 1 use entry [0]
- * ... ... ...
- * x:y y>0 y+1 use entry [y]
- * ... ... ...
- * x:indices-1 indices use entry [indices-1]
- * ... ... ...
- * x:y y+1 use entry [y & (indices-1)]
- * ... ... ...
- * 0xffff 0x10000 use entry [indices-1]
- */
-
-
-#define NO_DEFAULT_INDEX (1 << 16)
-
-struct mask_value {
- u8 mask;
- u8 value;
-};
-
-struct dsmark_qdisc_data {
- struct Qdisc *q;
- struct tcf_proto __rcu *filter_list;
- struct tcf_block *block;
- struct mask_value *mv;
- u16 indices;
- u8 set_tc_index;
- u32 default_index; /* index range is 0...0xffff */
-#define DSMARK_EMBEDDED_SZ 16
- struct mask_value embedded[DSMARK_EMBEDDED_SZ];
-};
-
-static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
-{
- return index <= p->indices && index > 0;
-}
-
-/* ------------------------- Class/flow operations ------------------------- */
-
-static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
- struct Qdisc *new, struct Qdisc **old,
- struct netlink_ext_ack *extack)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
- __func__, sch, p, new, old);
-
- if (new == NULL) {
- new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- sch->handle, NULL);
- if (new == NULL)
- new = &noop_qdisc;
- }
-
- *old = qdisc_replace(sch, new, &p->q);
- return 0;
-}
-
-static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- return p->q;
-}
-
-static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
-{
- return TC_H_MIN(classid) + 1;
-}
-
-static unsigned long dsmark_bind_filter(struct Qdisc *sch,
- unsigned long parent, u32 classid)
-{
- pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
- __func__, sch, qdisc_priv(sch), classid);
-
- return dsmark_find(sch, classid);
-}
-
-static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
-{
-}
-
-static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
- [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
- [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
- [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
- [TCA_DSMARK_MASK] = { .type = NLA_U8 },
- [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
-};
-
-static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
- struct nlattr **tca, unsigned long *arg,
- struct netlink_ext_ack *extack)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_DSMARK_MAX + 1];
- int err = -EINVAL;
-
- pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
- __func__, sch, p, classid, parent, *arg);
-
- if (!dsmark_valid_index(p, *arg)) {
- err = -ENOENT;
- goto errout;
- }
-
- if (!opt)
- goto errout;
-
- err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
- dsmark_policy, NULL);
- if (err < 0)
- goto errout;
-
- if (tb[TCA_DSMARK_VALUE])
- p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
-
- if (tb[TCA_DSMARK_MASK])
- p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
-
- err = 0;
-
-errout:
- return err;
-}
-
-static int dsmark_delete(struct Qdisc *sch, unsigned long arg,
- struct netlink_ext_ack *extack)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- if (!dsmark_valid_index(p, arg))
- return -EINVAL;
-
- p->mv[arg - 1].mask = 0xff;
- p->mv[arg - 1].value = 0;
-
- return 0;
-}
-
-static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- int i;
-
- pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
- __func__, sch, p, walker);
-
- if (walker->stop)
- return;
-
- for (i = 0; i < p->indices; i++) {
- if (p->mv[i].mask == 0xff && !p->mv[i].value) {
- walker->count++;
- continue;
- }
- if (!tc_qdisc_stats_dump(sch, i + 1, walker))
- break;
- }
-}
-
-static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
- struct netlink_ext_ack *extack)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- return p->block;
-}
-
-/* --------------------------- Qdisc operations ---------------------------- */
-
-static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- unsigned int len = qdisc_pkt_len(skb);
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- int err;
-
- pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
-
- if (p->set_tc_index) {
- int wlen = skb_network_offset(skb);
-
- switch (skb_protocol(skb, true)) {
- case htons(ETH_P_IP):
- wlen += sizeof(struct iphdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
- goto drop;
-
- skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
- & ~INET_ECN_MASK;
- break;
-
- case htons(ETH_P_IPV6):
- wlen += sizeof(struct ipv6hdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
- goto drop;
-
- skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
- & ~INET_ECN_MASK;
- break;
- default:
- skb->tc_index = 0;
- break;
- }
- }
-
- if (TC_H_MAJ(skb->priority) == sch->handle)
- skb->tc_index = TC_H_MIN(skb->priority);
- else {
- struct tcf_result res;
- struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
- int result = tcf_classify(skb, NULL, fl, &res, false);
-
- pr_debug("result %d class 0x%04x\n", result, res.classid);
-
- switch (result) {
-#ifdef CONFIG_NET_CLS_ACT
- case TC_ACT_QUEUED:
- case TC_ACT_STOLEN:
- case TC_ACT_TRAP:
- __qdisc_drop(skb, to_free);
- return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-
- case TC_ACT_SHOT:
- goto drop;
-#endif
- case TC_ACT_OK:
- skb->tc_index = TC_H_MIN(res.classid);
- break;
-
- default:
- if (p->default_index != NO_DEFAULT_INDEX)
- skb->tc_index = p->default_index;
- break;
- }
- }
-
- err = qdisc_enqueue(skb, p->q, to_free);
- if (err != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(err))
- qdisc_qstats_drop(sch);
- return err;
- }
-
- sch->qstats.backlog += len;
- sch->q.qlen++;
-
- return NET_XMIT_SUCCESS;
-
-drop:
- qdisc_drop(skb, sch, to_free);
- return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-}
-
-static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct sk_buff *skb;
- u32 index;
-
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
-
- skb = qdisc_dequeue_peeked(p->q);
- if (skb == NULL)
- return NULL;
-
- qdisc_bstats_update(sch, skb);
- qdisc_qstats_backlog_dec(sch, skb);
- sch->q.qlen--;
-
- index = skb->tc_index & (p->indices - 1);
- pr_debug("index %d->%d\n", skb->tc_index, index);
-
- switch (skb_protocol(skb, true)) {
- case htons(ETH_P_IP):
- ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
- p->mv[index].value);
- break;
- case htons(ETH_P_IPV6):
- ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
- p->mv[index].value);
- break;
- default:
- /*
- * Only complain if a change was actually attempted.
- * This way, we can send non-IP traffic through dsmark
- * and don't need yet another qdisc as a bypass.
- */
- if (p->mv[index].mask != 0xff || p->mv[index].value)
- pr_warn("%s: unsupported protocol %d\n",
- __func__, ntohs(skb_protocol(skb, true)));
- break;
- }
-
- return skb;
-}
-
-static struct sk_buff *dsmark_peek(struct Qdisc *sch)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
-
- return p->q->ops->peek(p->q);
-}
-
-static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct nlattr *tb[TCA_DSMARK_MAX + 1];
- int err = -EINVAL;
- u32 default_index = NO_DEFAULT_INDEX;
- u16 indices;
- int i;
-
- pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
-
- if (!opt)
- goto errout;
-
- err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
- if (err)
- return err;
-
- err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
- dsmark_policy, NULL);
- if (err < 0)
- goto errout;
-
- err = -EINVAL;
- if (!tb[TCA_DSMARK_INDICES])
- goto errout;
- indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
-
- if (hweight32(indices) != 1)
- goto errout;
-
- if (tb[TCA_DSMARK_DEFAULT_INDEX])
- default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
-
- if (indices <= DSMARK_EMBEDDED_SZ)
- p->mv = p->embedded;
- else
- p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
- if (!p->mv) {
- err = -ENOMEM;
- goto errout;
- }
- for (i = 0; i < indices; i++) {
- p->mv[i].mask = 0xff;
- p->mv[i].value = 0;
- }
- p->indices = indices;
- p->default_index = default_index;
- p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
-
- p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
- NULL);
- if (p->q == NULL)
- p->q = &noop_qdisc;
- else
- qdisc_hash_add(p->q, true);
-
- pr_debug("%s: qdisc %p\n", __func__, p->q);
-
- err = 0;
-errout:
- return err;
-}
-
-static void dsmark_reset(struct Qdisc *sch)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
- if (p->q)
- qdisc_reset(p->q);
-}
-
-static void dsmark_destroy(struct Qdisc *sch)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
-
- tcf_block_put(p->block);
- qdisc_put(p->q);
- if (p->mv != p->embedded)
- kfree(p->mv);
-}
-
-static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
- struct sk_buff *skb, struct tcmsg *tcm)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct nlattr *opts = NULL;
-
- pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
-
- if (!dsmark_valid_index(p, cl))
- return -EINVAL;
-
- tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
- tcm->tcm_info = p->q->handle;
-
- opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (opts == NULL)
- goto nla_put_failure;
- if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
- nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
- goto nla_put_failure;
-
- return nla_nest_end(skb, opts);
-
-nla_put_failure:
- nla_nest_cancel(skb, opts);
- return -EMSGSIZE;
-}
-
-static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct nlattr *opts = NULL;
-
- opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (opts == NULL)
- goto nla_put_failure;
- if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
- goto nla_put_failure;
-
- if (p->default_index != NO_DEFAULT_INDEX &&
- nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
- goto nla_put_failure;
-
- if (p->set_tc_index &&
- nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
- goto nla_put_failure;
-
- return nla_nest_end(skb, opts);
-
-nla_put_failure:
- nla_nest_cancel(skb, opts);
- return -EMSGSIZE;
-}
-
-static const struct Qdisc_class_ops dsmark_class_ops = {
- .graft = dsmark_graft,
- .leaf = dsmark_leaf,
- .find = dsmark_find,
- .change = dsmark_change,
- .delete = dsmark_delete,
- .walk = dsmark_walk,
- .tcf_block = dsmark_tcf_block,
- .bind_tcf = dsmark_bind_filter,
- .unbind_tcf = dsmark_unbind_filter,
- .dump = dsmark_dump_class,
-};
-
-static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
- .next = NULL,
- .cl_ops = &dsmark_class_ops,
- .id = "dsmark",
- .priv_size = sizeof(struct dsmark_qdisc_data),
- .enqueue = dsmark_enqueue,
- .dequeue = dsmark_dequeue,
- .peek = dsmark_peek,
- .init = dsmark_init,
- .reset = dsmark_reset,
- .destroy = dsmark_destroy,
- .change = NULL,
- .dump = dsmark_dump,
- .owner = THIS_MODULE,
-};
-
-static int __init dsmark_module_init(void)
-{
- return register_qdisc(&dsmark_qdisc_ops);
-}
-
-static void __exit dsmark_module_exit(void)
-{
- unregister_qdisc(&dsmark_qdisc_ops);
-}
-
-module_init(dsmark_module_init)
-module_exit(dsmark_module_exit)
-
-MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 4c68abaa289b..48ed87b91086 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -17,6 +17,8 @@
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
+#include "sch_mqprio_lib.h"
+
struct mqprio_sched {
struct Qdisc **qdiscs;
u16 mode;
@@ -27,6 +29,62 @@ struct mqprio_sched {
u64 max_rate[TC_QOPT_MAX_QUEUE];
};
+static int mqprio_enable_offload(struct Qdisc *sch,
+ const struct tc_mqprio_qopt *qopt,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ int err, i;
+
+ switch (priv->mode) {
+ case TC_MQPRIO_MODE_DCB:
+ if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
+ return -EINVAL;
+ break;
+ case TC_MQPRIO_MODE_CHANNEL:
+ mqprio.flags = priv->flags;
+ if (priv->flags & TC_MQPRIO_F_MODE)
+ mqprio.mode = priv->mode;
+ if (priv->flags & TC_MQPRIO_F_SHAPER)
+ mqprio.shaper = priv->shaper;
+ if (priv->flags & TC_MQPRIO_F_MIN_RATE)
+ for (i = 0; i < mqprio.qopt.num_tc; i++)
+ mqprio.min_rate[i] = priv->min_rate[i];
+ if (priv->flags & TC_MQPRIO_F_MAX_RATE)
+ for (i = 0; i < mqprio.qopt.num_tc; i++)
+ mqprio.max_rate[i] = priv->max_rate[i];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
+ &mqprio);
+ if (err)
+ return err;
+
+ priv->hw_offload = mqprio.qopt.hw;
+
+ return 0;
+}
+
+static void mqprio_disable_offload(struct Qdisc *sch)
+{
+ struct tc_mqprio_qopt_offload mqprio = { { 0 } };
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+
+ switch (priv->mode) {
+ case TC_MQPRIO_MODE_DCB:
+ case TC_MQPRIO_MODE_CHANNEL:
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
+ &mqprio);
+ break;
+ }
+}
+
static void mqprio_destroy(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
@@ -41,37 +99,17 @@ static void mqprio_destroy(struct Qdisc *sch)
kfree(priv->qdiscs);
}
- if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
- struct tc_mqprio_qopt_offload mqprio = { { 0 } };
-
- switch (priv->mode) {
- case TC_MQPRIO_MODE_DCB:
- case TC_MQPRIO_MODE_CHANNEL:
- dev->netdev_ops->ndo_setup_tc(dev,
- TC_SETUP_QDISC_MQPRIO,
- &mqprio);
- break;
- default:
- return;
- }
- } else {
+ if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc)
+ mqprio_disable_offload(sch);
+ else
netdev_set_num_tc(dev, 0);
- }
}
-static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
+static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
+ const struct tc_mqprio_caps *caps,
+ struct netlink_ext_ack *extack)
{
- int i, j;
-
- /* Verify num_tc is not out of max range */
- if (qopt->num_tc > TC_MAX_QUEUE)
- return -EINVAL;
-
- /* Verify priority mapping uses valid tcs */
- for (i = 0; i < TC_BITMASK + 1; i++) {
- if (qopt->prio_tc_map[i] >= qopt->num_tc)
- return -EINVAL;
- }
+ int err;
/* Limit qopt->hw to maximum supported offload value. Drivers have
* the option of overriding this later if they don't support the a
@@ -80,31 +118,23 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
- /* If hardware offload is requested we will leave it to the device
- * to either populate the queue counts itself or to validate the
- * provided queue counts. If ndo_setup_tc is not present then
- * hardware doesn't support offload and we should return an error.
+ /* If hardware offload is requested, we will leave 3 options to the
+ * device driver:
+ * - populate the queue counts itself (and ignore what was requested)
+ * - validate the provided queue counts by itself (and apply them)
+ * - request queue count validation here (and apply them)
*/
- if (qopt->hw)
- return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
-
- for (i = 0; i < qopt->num_tc; i++) {
- unsigned int last = qopt->offset[i] + qopt->count[i];
-
- /* Verify the queue count is in tx range being equal to the
- * real_num_tx_queues indicates the last queue is in use.
- */
- if (qopt->offset[i] >= dev->real_num_tx_queues ||
- !qopt->count[i] ||
- last > dev->real_num_tx_queues)
- return -EINVAL;
-
- /* Verify that the offset and counts do not overlap */
- for (j = i + 1; j < qopt->num_tc; j++) {
- if (last > qopt->offset[j])
- return -EINVAL;
- }
- }
+ err = mqprio_validate_qopt(dev, qopt,
+ !qopt->hw || caps->validate_queue_counts,
+ false, extack);
+ if (err)
+ return err;
+
+ /* If ndo_setup_tc is not present then hardware doesn't support offload
+ * and we should return an error.
+ */
+ if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
+ return -EINVAL;
return 0;
}
@@ -130,6 +160,67 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
return 0;
}
+static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
+ struct nlattr *opt)
+{
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct nlattr *tb[TCA_MQPRIO_MAX + 1];
+ struct nlattr *attr;
+ int i, rem, err;
+
+ err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
+ sizeof(*qopt));
+ if (err < 0)
+ return err;
+
+ if (!qopt->hw)
+ return -EINVAL;
+
+ if (tb[TCA_MQPRIO_MODE]) {
+ priv->flags |= TC_MQPRIO_F_MODE;
+ priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
+ }
+
+ if (tb[TCA_MQPRIO_SHAPER]) {
+ priv->flags |= TC_MQPRIO_F_SHAPER;
+ priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
+ }
+
+ if (tb[TCA_MQPRIO_MIN_RATE64]) {
+ if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
+ return -EINVAL;
+ i = 0;
+ nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
+ rem) {
+ if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
+ return -EINVAL;
+ if (i >= qopt->num_tc)
+ break;
+ priv->min_rate[i] = *(u64 *)nla_data(attr);
+ i++;
+ }
+ priv->flags |= TC_MQPRIO_F_MIN_RATE;
+ }
+
+ if (tb[TCA_MQPRIO_MAX_RATE64]) {
+ if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
+ return -EINVAL;
+ i = 0;
+ nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
+ rem) {
+ if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
+ return -EINVAL;
+ if (i >= qopt->num_tc)
+ break;
+ priv->max_rate[i] = *(u64 *)nla_data(attr);
+ i++;
+ }
+ priv->flags |= TC_MQPRIO_F_MAX_RATE;
+ }
+
+ return 0;
+}
+
static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -139,9 +230,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
struct Qdisc *qdisc;
int i, err = -EOPNOTSUPP;
struct tc_mqprio_qopt *qopt = NULL;
- struct nlattr *tb[TCA_MQPRIO_MAX + 1];
- struct nlattr *attr;
- int rem;
+ struct tc_mqprio_caps caps;
int len;
BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
@@ -160,61 +249,18 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
if (!opt || nla_len(opt) < sizeof(*qopt))
return -EINVAL;
+ qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO,
+ &caps, sizeof(caps));
+
qopt = nla_data(opt);
- if (mqprio_parse_opt(dev, qopt))
+ if (mqprio_parse_opt(dev, qopt, &caps, extack))
return -EINVAL;
len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
if (len > 0) {
- err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
- sizeof(*qopt));
- if (err < 0)
+ err = mqprio_parse_nlattr(sch, qopt, opt);
+ if (err)
return err;
-
- if (!qopt->hw)
- return -EINVAL;
-
- if (tb[TCA_MQPRIO_MODE]) {
- priv->flags |= TC_MQPRIO_F_MODE;
- priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
- }
-
- if (tb[TCA_MQPRIO_SHAPER]) {
- priv->flags |= TC_MQPRIO_F_SHAPER;
- priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
- }
-
- if (tb[TCA_MQPRIO_MIN_RATE64]) {
- if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
- return -EINVAL;
- i = 0;
- nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
- rem) {
- if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
- return -EINVAL;
- if (i >= qopt->num_tc)
- break;
- priv->min_rate[i] = *(u64 *)nla_data(attr);
- i++;
- }
- priv->flags |= TC_MQPRIO_F_MIN_RATE;
- }
-
- if (tb[TCA_MQPRIO_MAX_RATE64]) {
- if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
- return -EINVAL;
- i = 0;
- nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
- rem) {
- if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
- return -EINVAL;
- if (i >= qopt->num_tc)
- break;
- priv->max_rate[i] = *(u64 *)nla_data(attr);
- i++;
- }
- priv->flags |= TC_MQPRIO_F_MAX_RATE;
- }
}
/* pre-allocate qdisc, attachment can't fail */
@@ -241,36 +287,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
* supplied and verified mapping
*/
if (qopt->hw) {
- struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
-
- switch (priv->mode) {
- case TC_MQPRIO_MODE_DCB:
- if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
- return -EINVAL;
- break;
- case TC_MQPRIO_MODE_CHANNEL:
- mqprio.flags = priv->flags;
- if (priv->flags & TC_MQPRIO_F_MODE)
- mqprio.mode = priv->mode;
- if (priv->flags & TC_MQPRIO_F_SHAPER)
- mqprio.shaper = priv->shaper;
- if (priv->flags & TC_MQPRIO_F_MIN_RATE)
- for (i = 0; i < mqprio.qopt.num_tc; i++)
- mqprio.min_rate[i] = priv->min_rate[i];
- if (priv->flags & TC_MQPRIO_F_MAX_RATE)
- for (i = 0; i < mqprio.qopt.num_tc; i++)
- mqprio.max_rate[i] = priv->max_rate[i];
- break;
- default:
- return -EINVAL;
- }
- err = dev->netdev_ops->ndo_setup_tc(dev,
- TC_SETUP_QDISC_MQPRIO,
- &mqprio);
+ err = mqprio_enable_offload(sch, qopt, extack);
if (err)
return err;
-
- priv->hw_offload = mqprio.qopt.hw;
} else {
netdev_set_num_tc(dev, qopt->num_tc);
for (i = 0; i < qopt->num_tc; i++)
@@ -387,7 +406,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
struct tc_mqprio_qopt opt = { 0 };
struct Qdisc *qdisc;
- unsigned int ntx, tc;
+ unsigned int ntx;
sch->q.qlen = 0;
gnet_stats_basic_sync_init(&sch->bstats);
@@ -411,15 +430,9 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
spin_unlock_bh(qdisc_lock(qdisc));
}
- opt.num_tc = netdev_get_num_tc(dev);
- memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
+ mqprio_qopt_reconstruct(dev, &opt);
opt.hw = priv->hw_offload;
- for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
- opt.count[tc] = dev->tc_to_txq[tc].count;
- opt.offset[tc] = dev->tc_to_txq[tc].offset;
- }
-
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
diff --git a/net/sched/sch_mqprio_lib.c b/net/sched/sch_mqprio_lib.c
new file mode 100644
index 000000000000..c58a533b8ec5
--- /dev/null
+++ b/net/sched/sch_mqprio_lib.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/types.h>
+#include <net/pkt_sched.h>
+
+#include "sch_mqprio_lib.h"
+
+/* Returns true if the intervals [a, b) and [c, d) overlap. */
+static bool intervals_overlap(int a, int b, int c, int d)
+{
+ int left = max(a, c), right = min(b, d);
+
+ return left < right;
+}
+
+static int mqprio_validate_queue_counts(struct net_device *dev,
+ const struct tc_mqprio_qopt *qopt,
+ bool allow_overlapping_txqs,
+ struct netlink_ext_ack *extack)
+{
+ int i, j;
+
+ for (i = 0; i < qopt->num_tc; i++) {
+ unsigned int last = qopt->offset[i] + qopt->count[i];
+
+ if (!qopt->count[i]) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No queues for TC %d",
+ i);
+ return -EINVAL;
+ }
+
+ /* Verify the queue count is in tx range being equal to the
+ * real_num_tx_queues indicates the last queue is in use.
+ */
+ if (qopt->offset[i] >= dev->real_num_tx_queues ||
+ last > dev->real_num_tx_queues) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Queues %d:%d for TC %d exceed the %d TX queues available",
+ qopt->count[i], qopt->offset[i],
+ i, dev->real_num_tx_queues);
+ return -EINVAL;
+ }
+
+ if (allow_overlapping_txqs)
+ continue;
+
+ /* Verify that the offset and counts do not overlap */
+ for (j = i + 1; j < qopt->num_tc; j++) {
+ if (intervals_overlap(qopt->offset[i], last,
+ qopt->offset[j],
+ qopt->offset[j] +
+ qopt->count[j])) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TC %d queues %d@%d overlap with TC %d queues %d@%d",
+ i, qopt->count[i], qopt->offset[i],
+ j, qopt->count[j], qopt->offset[j]);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int mqprio_validate_qopt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
+ bool validate_queue_counts,
+ bool allow_overlapping_txqs,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ /* Verify num_tc is not out of max range */
+ if (qopt->num_tc > TC_MAX_QUEUE) {
+ NL_SET_ERR_MSG(extack,
+ "Number of traffic classes is outside valid range");
+ return -EINVAL;
+ }
+
+ /* Verify priority mapping uses valid tcs */
+ for (i = 0; i <= TC_BITMASK; i++) {
+ if (qopt->prio_tc_map[i] >= qopt->num_tc) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid traffic class in priority to traffic class mapping");
+ return -EINVAL;
+ }
+ }
+
+ if (validate_queue_counts) {
+ err = mqprio_validate_queue_counts(dev, qopt,
+ allow_overlapping_txqs,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mqprio_validate_qopt);
+
+void mqprio_qopt_reconstruct(struct net_device *dev, struct tc_mqprio_qopt *qopt)
+{
+ int tc, num_tc = netdev_get_num_tc(dev);
+
+ qopt->num_tc = num_tc;
+ memcpy(qopt->prio_tc_map, dev->prio_tc_map, sizeof(qopt->prio_tc_map));
+
+ for (tc = 0; tc < num_tc; tc++) {
+ qopt->count[tc] = dev->tc_to_txq[tc].count;
+ qopt->offset[tc] = dev->tc_to_txq[tc].offset;
+ }
+}
+EXPORT_SYMBOL_GPL(mqprio_qopt_reconstruct);
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_mqprio_lib.h b/net/sched/sch_mqprio_lib.h
new file mode 100644
index 000000000000..63f725ab8761
--- /dev/null
+++ b/net/sched/sch_mqprio_lib.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SCH_MQPRIO_LIB_H
+#define __SCH_MQPRIO_LIB_H
+
+#include <linux/types.h>
+
+struct net_device;
+struct netlink_ext_ack;
+struct tc_mqprio_qopt;
+
+int mqprio_validate_qopt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
+ bool validate_queue_counts,
+ bool allow_overlapping_txqs,
+ struct netlink_ext_ack *extack);
+void mqprio_qopt_reconstruct(struct net_device *dev,
+ struct tc_mqprio_qopt *qopt);
+
+#endif
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index c322a61eaeea..1f469861eae3 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -26,7 +26,11 @@
#include <net/sock.h>
#include <net/tcp.h>
+#include "sch_mqprio_lib.h"
+
static LIST_HEAD(taprio_list);
+static struct static_key_false taprio_have_broken_mqprio;
+static struct static_key_false taprio_have_working_mqprio;
#define TAPRIO_ALL_GATES_OPEN -1
@@ -35,15 +39,19 @@ static LIST_HEAD(taprio_list);
#define TAPRIO_FLAGS_INVALID U32_MAX
struct sched_entry {
- struct list_head list;
-
- /* The instant that this entry "closes" and the next one
- * should open, the qdisc will make some effort so that no
- * packet leaves after this time.
+ /* Durations between this GCL entry and the GCL entry where the
+ * respective traffic class gate closes
*/
- ktime_t close_time;
+ u64 gate_duration[TC_MAX_QUEUE];
+ atomic_t budget[TC_MAX_QUEUE];
+ /* The qdisc makes some effort so that no packet leaves
+ * after this time
+ */
+ ktime_t gate_close_time[TC_MAX_QUEUE];
+ struct list_head list;
+ /* Used to calculate when to advance the schedule */
+ ktime_t end_time;
ktime_t next_txtime;
- atomic_t budget;
int index;
u32 gate_mask;
u32 interval;
@@ -51,10 +59,16 @@ struct sched_entry {
};
struct sched_gate_list {
+ /* Longest non-zero contiguous gate durations per traffic class,
+ * or 0 if a traffic class gate never opens during the schedule.
+ */
+ u64 max_open_gate_duration[TC_MAX_QUEUE];
+ u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
+ u32 max_sdu[TC_MAX_QUEUE]; /* for dump */
struct rcu_head rcu;
struct list_head entries;
size_t num_entries;
- ktime_t cycle_close_time;
+ ktime_t cycle_end_time;
s64 cycle_time;
s64 cycle_time_extension;
s64 base_time;
@@ -67,6 +81,8 @@ struct taprio_sched {
enum tk_offsets tk_offset;
int clockid;
bool offloaded;
+ bool detected_mqprio;
+ bool broken_mqprio;
atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
* speeds it's sub-nanoseconds per byte
*/
@@ -78,8 +94,8 @@ struct taprio_sched {
struct sched_gate_list __rcu *admin_sched;
struct hrtimer advance_timer;
struct list_head taprio_list;
- u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
- u32 max_sdu[TC_MAX_QUEUE]; /* for dump and offloading */
+ int cur_txq[TC_MAX_QUEUE];
+ u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */
u32 txtime_delay;
};
@@ -88,6 +104,57 @@ struct __tc_taprio_qopt_offload {
struct tc_taprio_qopt_offload offload;
};
+static void taprio_calculate_gate_durations(struct taprio_sched *q,
+ struct sched_gate_list *sched)
+{
+ struct net_device *dev = qdisc_dev(q->root);
+ int num_tc = netdev_get_num_tc(dev);
+ struct sched_entry *entry, *cur;
+ int tc;
+
+ list_for_each_entry(entry, &sched->entries, list) {
+ u32 gates_still_open = entry->gate_mask;
+
+ /* For each traffic class, calculate each open gate duration,
+ * starting at this schedule entry and ending at the schedule
+ * entry containing a gate close event for that TC.
+ */
+ cur = entry;
+
+ do {
+ if (!gates_still_open)
+ break;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ if (!(gates_still_open & BIT(tc)))
+ continue;
+
+ if (cur->gate_mask & BIT(tc))
+ entry->gate_duration[tc] += cur->interval;
+ else
+ gates_still_open &= ~BIT(tc);
+ }
+
+ cur = list_next_entry_circular(cur, &sched->entries, list);
+ } while (cur != entry);
+
+ /* Keep track of the maximum gate duration for each traffic
+ * class, taking care to not confuse a traffic class which is
+ * temporarily closed with one that is always closed.
+ */
+ for (tc = 0; tc < num_tc; tc++)
+ if (entry->gate_duration[tc] &&
+ sched->max_open_gate_duration[tc] < entry->gate_duration[tc])
+ sched->max_open_gate_duration[tc] = entry->gate_duration[tc];
+ }
+}
+
+static bool taprio_entry_allows_tx(ktime_t skb_end_time,
+ struct sched_entry *entry, int tc)
+{
+ return ktime_before(skb_end_time, entry->gate_close_time[tc]);
+}
+
static ktime_t sched_base_time(const struct sched_gate_list *sched)
{
if (!sched)
@@ -180,6 +247,63 @@ static int length_to_duration(struct taprio_sched *q, int len)
return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
}
+static int duration_to_length(struct taprio_sched *q, u64 duration)
+{
+ return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte));
+}
+
+/* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the
+ * q->max_sdu[] requested by the user and the max_sdu dynamically determined by
+ * the maximum open gate durations at the given link speed.
+ */
+static void taprio_update_queue_max_sdu(struct taprio_sched *q,
+ struct sched_gate_list *sched,
+ struct qdisc_size_table *stab)
+{
+ struct net_device *dev = qdisc_dev(q->root);
+ int num_tc = netdev_get_num_tc(dev);
+ u32 max_sdu_from_user;
+ u32 max_sdu_dynamic;
+ u32 max_sdu;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX;
+
+ /* TC gate never closes => keep the queueMaxSDU
+ * selected by the user
+ */
+ if (sched->max_open_gate_duration[tc] == sched->cycle_time) {
+ max_sdu_dynamic = U32_MAX;
+ } else {
+ u32 max_frm_len;
+
+ max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]);
+ /* Compensate for L1 overhead from size table,
+ * but don't let the frame size go negative
+ */
+ if (stab) {
+ max_frm_len -= stab->szopts.overhead;
+ max_frm_len = max_t(int, max_frm_len,
+ dev->hard_header_len + 1);
+ }
+ max_sdu_dynamic = max_frm_len - dev->hard_header_len;
+ if (max_sdu_dynamic > dev->max_mtu)
+ max_sdu_dynamic = U32_MAX;
+ }
+
+ max_sdu = min(max_sdu_dynamic, max_sdu_from_user);
+
+ if (max_sdu != U32_MAX) {
+ sched->max_frm_len[tc] = max_sdu + dev->hard_header_len;
+ sched->max_sdu[tc] = max_sdu;
+ } else {
+ sched->max_frm_len[tc] = U32_MAX; /* never oversized */
+ sched->max_sdu[tc] = 0;
+ }
+ }
+}
+
/* Returns the entry corresponding to next available interval. If
* validate_interval is set, it only validates whether the timestamp occurs
* when the gate corresponding to the skb's traffic class is open.
@@ -413,14 +537,33 @@ done:
return txtime;
}
-static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
- struct Qdisc *child, struct sk_buff **to_free)
+/* Devices with full offload are expected to honor this in hardware */
+static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch,
+ struct sk_buff *skb)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
+ struct sched_gate_list *sched;
int prio = skb->priority;
+ bool exceeds = false;
u8 tc;
+ tc = netdev_get_prio_tc_map(dev, prio);
+
+ rcu_read_lock();
+ sched = rcu_dereference(q->oper_sched);
+ if (sched && skb->len > sched->max_frm_len[tc])
+ exceeds = true;
+ rcu_read_unlock();
+
+ return exceeds;
+}
+
+static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
+ struct Qdisc *child, struct sk_buff **to_free)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+
/* sk_flags are only safe to use on full sockets. */
if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
if (!is_valid_interval(skb, sch))
@@ -431,17 +574,53 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
return qdisc_drop(skb, sch, to_free);
}
- /* Devices with full offload are expected to honor this in hardware */
- tc = netdev_get_prio_tc_map(dev, prio);
- if (skb->len > q->max_frm_len[tc])
- return qdisc_drop(skb, sch, to_free);
-
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return qdisc_enqueue(skb, child, to_free);
}
+static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
+ struct Qdisc *child,
+ struct sk_buff **to_free)
+{
+ unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
+ netdev_features_t features = netif_skb_features(skb);
+ struct sk_buff *segs, *nskb;
+ int ret;
+
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ if (IS_ERR_OR_NULL(segs))
+ return qdisc_drop(skb, sch, to_free);
+
+ skb_list_walk_safe(segs, segs, nskb) {
+ skb_mark_not_on_list(segs);
+ qdisc_skb_cb(segs)->pkt_len = segs->len;
+ slen += segs->len;
+
+ /* FIXME: we should be segmenting to a smaller size
+ * rather than dropping these
+ */
+ if (taprio_skb_exceeds_queue_max_sdu(sch, segs))
+ ret = qdisc_drop(segs, sch, to_free);
+ else
+ ret = taprio_enqueue_one(segs, sch, child, to_free);
+
+ if (ret != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(ret))
+ qdisc_qstats_drop(sch);
+ } else {
+ numsegs++;
+ }
+ }
+
+ if (numsegs > 1)
+ qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
+ consume_skb(skb);
+
+ return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+}
+
/* Will not be called in the full offload case, since the TX queues are
* attached to the Qdisc created using qdisc_create_dflt()
*/
@@ -458,97 +637,190 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (unlikely(!child))
return qdisc_drop(skb, sch, to_free);
- /* Large packets might not be transmitted when the transmission duration
- * exceeds any configured interval. Therefore, segment the skb into
- * smaller chunks. Drivers with full offload are expected to handle
- * this in hardware.
- */
- if (skb_is_gso(skb)) {
- unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
- netdev_features_t features = netif_skb_features(skb);
- struct sk_buff *segs, *nskb;
- int ret;
-
- segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
- if (IS_ERR_OR_NULL(segs))
- return qdisc_drop(skb, sch, to_free);
+ if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) {
+ /* Large packets might not be transmitted when the transmission
+ * duration exceeds any configured interval. Therefore, segment
+ * the skb into smaller chunks. Drivers with full offload are
+ * expected to handle this in hardware.
+ */
+ if (skb_is_gso(skb))
+ return taprio_enqueue_segmented(skb, sch, child,
+ to_free);
- skb_list_walk_safe(segs, segs, nskb) {
- skb_mark_not_on_list(segs);
- qdisc_skb_cb(segs)->pkt_len = segs->len;
- slen += segs->len;
+ return qdisc_drop(skb, sch, to_free);
+ }
- ret = taprio_enqueue_one(segs, sch, child, to_free);
- if (ret != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(ret))
- qdisc_qstats_drop(sch);
- } else {
- numsegs++;
- }
- }
+ return taprio_enqueue_one(skb, sch, child, to_free);
+}
- if (numsegs > 1)
- qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
- consume_skb(skb);
+static struct sk_buff *taprio_peek(struct Qdisc *sch)
+{
+ WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented");
+ return NULL;
+}
+
+static void taprio_set_budgets(struct taprio_sched *q,
+ struct sched_gate_list *sched,
+ struct sched_entry *entry)
+{
+ struct net_device *dev = qdisc_dev(q->root);
+ int num_tc = netdev_get_num_tc(dev);
+ int tc, budget;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ /* Traffic classes which never close have infinite budget */
+ if (entry->gate_duration[tc] == sched->cycle_time)
+ budget = INT_MAX;
+ else
+ budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC,
+ atomic64_read(&q->picos_per_byte));
+
+ atomic_set(&entry->budget[tc], budget);
+ }
+}
- return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+/* When an skb is sent, it consumes from the budget of all traffic classes */
+static int taprio_update_budgets(struct sched_entry *entry, size_t len,
+ int tc_consumed, int num_tc)
+{
+ int tc, budget, new_budget = 0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ budget = atomic_read(&entry->budget[tc]);
+ /* Don't consume from infinite budget */
+ if (budget == INT_MAX) {
+ if (tc == tc_consumed)
+ new_budget = budget;
+ continue;
+ }
+
+ if (tc == tc_consumed)
+ new_budget = atomic_sub_return(len, &entry->budget[tc]);
+ else
+ atomic_sub(len, &entry->budget[tc]);
}
- return taprio_enqueue_one(skb, sch, child, to_free);
+ return new_budget;
}
-/* Will not be called in the full offload case, since the TX queues are
- * attached to the Qdisc created using qdisc_create_dflt()
- */
-static struct sk_buff *taprio_peek(struct Qdisc *sch)
+static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq,
+ struct sched_entry *entry,
+ u32 gate_mask)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
- struct sched_entry *entry;
+ struct Qdisc *child = q->qdiscs[txq];
+ int num_tc = netdev_get_num_tc(dev);
struct sk_buff *skb;
- u32 gate_mask;
- int i;
+ ktime_t guard;
+ int prio;
+ int len;
+ u8 tc;
- rcu_read_lock();
- entry = rcu_dereference(q->current_entry);
- gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
- rcu_read_unlock();
+ if (unlikely(!child))
+ return NULL;
- if (!gate_mask)
+ if (TXTIME_ASSIST_IS_ENABLED(q->flags))
+ goto skip_peek_checks;
+
+ skb = child->ops->peek(child);
+ if (!skb)
return NULL;
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct Qdisc *child = q->qdiscs[i];
- int prio;
- u8 tc;
+ prio = skb->priority;
+ tc = netdev_get_prio_tc_map(dev, prio);
- if (unlikely(!child))
- continue;
+ if (!(gate_mask & BIT(tc)))
+ return NULL;
- skb = child->ops->peek(child);
- if (!skb)
- continue;
+ len = qdisc_pkt_len(skb);
+ guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len));
- if (TXTIME_ASSIST_IS_ENABLED(q->flags))
- return skb;
+ /* In the case that there's no gate entry, there's no
+ * guard band ...
+ */
+ if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
+ !taprio_entry_allows_tx(guard, entry, tc))
+ return NULL;
- prio = skb->priority;
- tc = netdev_get_prio_tc_map(dev, prio);
+ /* ... and no budget. */
+ if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
+ taprio_update_budgets(entry, len, tc, num_tc) < 0)
+ return NULL;
+
+skip_peek_checks:
+ skb = child->ops->dequeue(child);
+ if (unlikely(!skb))
+ return NULL;
+
+ qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
+ sch->q.qlen--;
+
+ return skb;
+}
+
+static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq)
+{
+ int offset = dev->tc_to_txq[tc].offset;
+ int count = dev->tc_to_txq[tc].count;
+
+ (*txq)++;
+ if (*txq == offset + count)
+ *txq = offset;
+}
+
+/* Prioritize higher traffic classes, and select among TXQs belonging to the
+ * same TC using round robin
+ */
+static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
+ struct sched_entry *entry,
+ u32 gate_mask)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ int num_tc = netdev_get_num_tc(dev);
+ struct sk_buff *skb;
+ int tc;
+
+ for (tc = num_tc - 1; tc >= 0; tc--) {
+ int first_txq = q->cur_txq[tc];
if (!(gate_mask & BIT(tc)))
continue;
- return skb;
+ do {
+ skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc],
+ entry, gate_mask);
+
+ taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
+
+ if (skb)
+ return skb;
+ } while (q->cur_txq[tc] != first_txq);
}
return NULL;
}
-static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
+/* Broken way of prioritizing smaller TXQ indices and ignoring the traffic
+ * class other than to determine whether the gate is open or not
+ */
+static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch,
+ struct sched_entry *entry,
+ u32 gate_mask)
{
- atomic_set(&entry->budget,
- div64_u64((u64)entry->interval * PSEC_PER_NSEC,
- atomic64_read(&q->picos_per_byte)));
+ struct net_device *dev = qdisc_dev(sch);
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask);
+ if (skb)
+ return skb;
+ }
+
+ return NULL;
}
/* Will not be called in the full offload case, since the TX queues are
@@ -557,11 +829,9 @@ static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
- struct net_device *dev = qdisc_dev(sch);
struct sk_buff *skb = NULL;
struct sched_entry *entry;
u32 gate_mask;
- int i;
rcu_read_lock();
entry = rcu_dereference(q->current_entry);
@@ -571,69 +841,23 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
* "AdminGateStates"
*/
gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
-
if (!gate_mask)
goto done;
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct Qdisc *child = q->qdiscs[i];
- ktime_t guard;
- int prio;
- int len;
- u8 tc;
-
- if (unlikely(!child))
- continue;
-
- if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
- skb = child->ops->dequeue(child);
- if (!skb)
- continue;
- goto skb_found;
- }
-
- skb = child->ops->peek(child);
- if (!skb)
- continue;
-
- prio = skb->priority;
- tc = netdev_get_prio_tc_map(dev, prio);
-
- if (!(gate_mask & BIT(tc))) {
- skb = NULL;
- continue;
- }
-
- len = qdisc_pkt_len(skb);
- guard = ktime_add_ns(taprio_get_time(q),
- length_to_duration(q, len));
-
- /* In the case that there's no gate entry, there's no
- * guard band ...
- */
- if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
- ktime_after(guard, entry->close_time)) {
- skb = NULL;
- continue;
- }
-
- /* ... and no budget. */
- if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
- atomic_sub_return(len, &entry->budget) < 0) {
- skb = NULL;
- continue;
- }
-
- skb = child->ops->dequeue(child);
- if (unlikely(!skb))
- goto done;
-
-skb_found:
- qdisc_bstats_update(sch, skb);
- qdisc_qstats_backlog_dec(sch, skb);
- sch->q.qlen--;
-
- goto done;
+ if (static_branch_unlikely(&taprio_have_broken_mqprio) &&
+ !static_branch_likely(&taprio_have_working_mqprio)) {
+ /* Single NIC kind which is broken */
+ skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
+ } else if (static_branch_likely(&taprio_have_working_mqprio) &&
+ !static_branch_unlikely(&taprio_have_broken_mqprio)) {
+ /* Single NIC kind which prioritizes properly */
+ skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
+ } else {
+ /* Mixed NIC kinds present in system, need dynamic testing */
+ if (q->broken_mqprio)
+ skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
+ else
+ skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
}
done:
@@ -648,7 +872,7 @@ static bool should_restart_cycle(const struct sched_gate_list *oper,
if (list_is_last(&entry->list, &oper->entries))
return true;
- if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
+ if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0)
return true;
return false;
@@ -656,7 +880,7 @@ static bool should_restart_cycle(const struct sched_gate_list *oper,
static bool should_change_schedules(const struct sched_gate_list *admin,
const struct sched_gate_list *oper,
- ktime_t close_time)
+ ktime_t end_time)
{
ktime_t next_base_time, extension_time;
@@ -665,18 +889,18 @@ static bool should_change_schedules(const struct sched_gate_list *admin,
next_base_time = sched_base_time(admin);
- /* This is the simple case, the close_time would fall after
+ /* This is the simple case, the end_time would fall after
* the next schedule base_time.
*/
- if (ktime_compare(next_base_time, close_time) <= 0)
+ if (ktime_compare(next_base_time, end_time) <= 0)
return true;
- /* This is the cycle_time_extension case, if the close_time
+ /* This is the cycle_time_extension case, if the end_time
* plus the amount that can be extended would fall after the
* next schedule base_time, we can extend the current schedule
* for that amount.
*/
- extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
+ extension_time = ktime_add_ns(end_time, oper->cycle_time_extension);
/* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
* how precisely the extension should be made. So after
@@ -692,10 +916,13 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
{
struct taprio_sched *q = container_of(timer, struct taprio_sched,
advance_timer);
+ struct net_device *dev = qdisc_dev(q->root);
struct sched_gate_list *oper, *admin;
+ int num_tc = netdev_get_num_tc(dev);
struct sched_entry *entry, *next;
struct Qdisc *sch = q->root;
- ktime_t close_time;
+ ktime_t end_time;
+ int tc;
spin_lock(&q->current_entry_lock);
entry = rcu_dereference_protected(q->current_entry,
@@ -714,41 +941,49 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
* entry of all schedules are pre-calculated during the
* schedule initialization.
*/
- if (unlikely(!entry || entry->close_time == oper->base_time)) {
+ if (unlikely(!entry || entry->end_time == oper->base_time)) {
next = list_first_entry(&oper->entries, struct sched_entry,
list);
- close_time = next->close_time;
+ end_time = next->end_time;
goto first_run;
}
if (should_restart_cycle(oper, entry)) {
next = list_first_entry(&oper->entries, struct sched_entry,
list);
- oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
- oper->cycle_time);
+ oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time,
+ oper->cycle_time);
} else {
next = list_next_entry(entry, list);
}
- close_time = ktime_add_ns(entry->close_time, next->interval);
- close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
+ end_time = ktime_add_ns(entry->end_time, next->interval);
+ end_time = min_t(ktime_t, end_time, oper->cycle_end_time);
+
+ for (tc = 0; tc < num_tc; tc++) {
+ if (next->gate_duration[tc] == oper->cycle_time)
+ next->gate_close_time[tc] = KTIME_MAX;
+ else
+ next->gate_close_time[tc] = ktime_add_ns(entry->end_time,
+ next->gate_duration[tc]);
+ }
- if (should_change_schedules(admin, oper, close_time)) {
+ if (should_change_schedules(admin, oper, end_time)) {
/* Set things so the next time this runs, the new
* schedule runs.
*/
- close_time = sched_base_time(admin);
+ end_time = sched_base_time(admin);
switch_schedules(q, &admin, &oper);
}
- next->close_time = close_time;
- taprio_set_budget(q, next);
+ next->end_time = end_time;
+ taprio_set_budgets(q, oper, next);
first_run:
rcu_assign_pointer(q->current_entry, next);
spin_unlock(&q->current_entry_lock);
- hrtimer_set_expires(&q->advance_timer, close_time);
+ hrtimer_set_expires(&q->advance_timer, end_time);
rcu_read_lock();
__netif_schedule(sch);
@@ -916,6 +1151,8 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
new->cycle_time = cycle;
}
+ taprio_calculate_gate_durations(q, new);
+
return 0;
}
@@ -924,7 +1161,7 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
struct netlink_ext_ack *extack,
u32 taprio_flags)
{
- int i, j;
+ bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
if (!qopt && !dev->num_tc) {
NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
@@ -937,52 +1174,17 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
if (dev->num_tc)
return 0;
- /* Verify num_tc is not out of max range */
- if (qopt->num_tc > TC_MAX_QUEUE) {
- NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
- return -EINVAL;
- }
-
/* taprio imposes that traffic classes map 1:n to tx queues */
if (qopt->num_tc > dev->num_tx_queues) {
NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
return -EINVAL;
}
- /* Verify priority mapping uses valid tcs */
- for (i = 0; i <= TC_BITMASK; i++) {
- if (qopt->prio_tc_map[i] >= qopt->num_tc) {
- NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
- return -EINVAL;
- }
- }
-
- for (i = 0; i < qopt->num_tc; i++) {
- unsigned int last = qopt->offset[i] + qopt->count[i];
-
- /* Verify the queue count is in tx range being equal to the
- * real_num_tx_queues indicates the last queue is in use.
- */
- if (qopt->offset[i] >= dev->num_tx_queues ||
- !qopt->count[i] ||
- last > dev->real_num_tx_queues) {
- NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
- return -EINVAL;
- }
-
- if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
- continue;
-
- /* Verify that the offset and counts do not overlap */
- for (j = i + 1; j < qopt->num_tc; j++) {
- if (last > qopt->offset[j]) {
- NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
- return -EINVAL;
- }
- }
- }
-
- return 0;
+ /* For some reason, in txtime-assist mode, we allow TXQ ranges for
+ * different TCs to overlap, and just validate the TXQ ranges.
+ */
+ return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs,
+ extack);
}
static int taprio_get_start_time(struct Qdisc *sch,
@@ -1019,11 +1221,14 @@ static int taprio_get_start_time(struct Qdisc *sch,
return 0;
}
-static void setup_first_close_time(struct taprio_sched *q,
- struct sched_gate_list *sched, ktime_t base)
+static void setup_first_end_time(struct taprio_sched *q,
+ struct sched_gate_list *sched, ktime_t base)
{
+ struct net_device *dev = qdisc_dev(q->root);
+ int num_tc = netdev_get_num_tc(dev);
struct sched_entry *first;
ktime_t cycle;
+ int tc;
first = list_first_entry(&sched->entries,
struct sched_entry, list);
@@ -1031,10 +1236,18 @@ static void setup_first_close_time(struct taprio_sched *q,
cycle = sched->cycle_time;
/* FIXME: find a better place to do this */
- sched->cycle_close_time = ktime_add_ns(base, cycle);
+ sched->cycle_end_time = ktime_add_ns(base, cycle);
+
+ first->end_time = ktime_add_ns(base, first->interval);
+ taprio_set_budgets(q, sched, first);
+
+ for (tc = 0; tc < num_tc; tc++) {
+ if (first->gate_duration[tc] == sched->cycle_time)
+ first->gate_close_time[tc] = KTIME_MAX;
+ else
+ first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]);
+ }
- first->close_time = ktime_add_ns(base, first->interval);
- taprio_set_budget(q, first);
rcu_assign_pointer(q->current_entry, NULL);
}
@@ -1088,6 +1301,8 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct sched_gate_list *oper, *admin;
+ struct qdisc_size_table *stab;
struct taprio_sched *q;
ASSERT_RTNL();
@@ -1100,6 +1315,17 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
continue;
taprio_set_picos_per_byte(dev, q);
+
+ stab = rtnl_dereference(q->root->stab);
+
+ oper = rtnl_dereference(q->oper_sched);
+ if (oper)
+ taprio_update_queue_max_sdu(q, oper, stab);
+
+ admin = rtnl_dereference(q->admin_sched);
+ if (admin)
+ taprio_update_queue_max_sdu(q, admin, stab);
+
break;
}
@@ -1203,7 +1429,8 @@ static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
static void taprio_sched_to_offload(struct net_device *dev,
struct sched_gate_list *sched,
- struct tc_taprio_qopt_offload *offload)
+ struct tc_taprio_qopt_offload *offload,
+ const struct tc_taprio_caps *caps)
{
struct sched_entry *entry;
int i = 0;
@@ -1217,7 +1444,11 @@ static void taprio_sched_to_offload(struct net_device *dev,
e->command = entry->command;
e->interval = entry->interval;
- e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
+ if (caps->gate_mask_per_txq)
+ e->gate_mask = tc_map_to_queue_mask(dev,
+ entry->gate_mask);
+ else
+ e->gate_mask = entry->gate_mask;
i++;
}
@@ -1225,6 +1456,34 @@ static void taprio_sched_to_offload(struct net_device *dev,
offload->num_entries = i;
}
+static void taprio_detect_broken_mqprio(struct taprio_sched *q)
+{
+ struct net_device *dev = qdisc_dev(q->root);
+ struct tc_taprio_caps caps;
+
+ qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
+ &caps, sizeof(caps));
+
+ q->broken_mqprio = caps.broken_mqprio;
+ if (q->broken_mqprio)
+ static_branch_inc(&taprio_have_broken_mqprio);
+ else
+ static_branch_inc(&taprio_have_working_mqprio);
+
+ q->detected_mqprio = true;
+}
+
+static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
+{
+ if (!q->detected_mqprio)
+ return;
+
+ if (q->broken_mqprio)
+ static_branch_dec(&taprio_have_broken_mqprio);
+ else
+ static_branch_dec(&taprio_have_working_mqprio);
+}
+
static int taprio_enable_offload(struct net_device *dev,
struct taprio_sched *q,
struct sched_gate_list *sched,
@@ -1261,7 +1520,8 @@ static int taprio_enable_offload(struct net_device *dev,
return -ENOMEM;
}
offload->enable = 1;
- taprio_sched_to_offload(dev, sched, offload);
+ mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt);
+ taprio_sched_to_offload(dev, sched, offload, &caps);
for (tc = 0; tc < TC_MAX_QUEUE; tc++)
offload->max_sdu[tc] = q->max_sdu[tc];
@@ -1452,7 +1712,6 @@ static int taprio_parse_tc_entries(struct Qdisc *sch,
struct netlink_ext_ack *extack)
{
struct taprio_sched *q = qdisc_priv(sch);
- struct net_device *dev = qdisc_dev(sch);
u32 max_sdu[TC_QOPT_MAX_QUEUE];
unsigned long seen_tcs = 0;
struct nlattr *n;
@@ -1466,18 +1725,14 @@ static int taprio_parse_tc_entries(struct Qdisc *sch,
if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
continue;
- err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs, extack);
+ err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs,
+ extack);
if (err)
goto out;
}
- for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
+ for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
q->max_sdu[tc] = max_sdu[tc];
- if (max_sdu[tc])
- q->max_frm_len[tc] = max_sdu[tc] + dev->hard_header_len;
- else
- q->max_frm_len[tc] = U32_MAX; /* never oversized */
- }
out:
return err;
@@ -1533,6 +1788,7 @@ static int taprio_new_flags(const struct nlattr *attr, u32 old,
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ struct qdisc_size_table *stab = rtnl_dereference(sch->stab);
struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
struct sched_gate_list *oper, *admin, *new_admin;
struct taprio_sched *q = qdisc_priv(sch);
@@ -1585,6 +1841,23 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
goto free_sched;
}
+ if (mqprio) {
+ err = netdev_set_num_tc(dev, mqprio->num_tc);
+ if (err)
+ goto free_sched;
+ for (i = 0; i < mqprio->num_tc; i++) {
+ netdev_set_tc_queue(dev, i,
+ mqprio->count[i],
+ mqprio->offset[i]);
+ q->cur_txq[i] = mqprio->offset[i];
+ }
+
+ /* Always use supplied priority mappings */
+ for (i = 0; i <= TC_BITMASK; i++)
+ netdev_set_prio_tc_map(dev, i,
+ mqprio->prio_tc_map[i]);
+ }
+
err = parse_taprio_schedule(q, tb, new_admin, extack);
if (err < 0)
goto free_sched;
@@ -1600,21 +1873,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
goto free_sched;
taprio_set_picos_per_byte(dev, q);
-
- if (mqprio) {
- err = netdev_set_num_tc(dev, mqprio->num_tc);
- if (err)
- goto free_sched;
- for (i = 0; i < mqprio->num_tc; i++)
- netdev_set_tc_queue(dev, i,
- mqprio->count[i],
- mqprio->offset[i]);
-
- /* Always use supplied priority mappings */
- for (i = 0; i <= TC_BITMASK; i++)
- netdev_set_prio_tc_map(dev, i,
- mqprio->prio_tc_map[i]);
- }
+ taprio_update_queue_max_sdu(q, new_admin, stab);
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
err = taprio_enable_offload(dev, q, new_admin, extack);
@@ -1663,7 +1922,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb);
} else {
- setup_first_close_time(q, new_admin, start);
+ setup_first_end_time(q, new_admin, start);
/* Protects against advance_sched() */
spin_lock_irqsave(&q->current_entry_lock, flags);
@@ -1683,6 +1942,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
new_admin = NULL;
err = 0;
+ if (!stab)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Size table not specified, frame length estimations may be inaccurate");
+
unlock:
spin_unlock_bh(qdisc_lock(sch));
@@ -1743,6 +2006,8 @@ static void taprio_destroy(struct Qdisc *sch)
if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb);
+
+ taprio_cleanup_broken_mqprio(q);
}
static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
@@ -1807,6 +2072,8 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
q->qdiscs[i] = qdisc;
}
+ taprio_detect_broken_mqprio(q);
+
return taprio_change(sch, opt, extack);
}
@@ -1947,7 +2214,8 @@ error_nest:
return -1;
}
-static int taprio_dump_tc_entries(struct taprio_sched *q, struct sk_buff *skb)
+static int taprio_dump_tc_entries(struct sk_buff *skb,
+ struct sched_gate_list *sched)
{
struct nlattr *n;
int tc;
@@ -1961,7 +2229,7 @@ static int taprio_dump_tc_entries(struct taprio_sched *q, struct sk_buff *skb)
goto nla_put_failure;
if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
- q->max_sdu[tc]))
+ sched->max_sdu[tc]))
goto nla_put_failure;
nla_nest_end(skb, n);
@@ -1981,18 +2249,11 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct sched_gate_list *oper, *admin;
struct tc_mqprio_qopt opt = { 0 };
struct nlattr *nest, *sched_nest;
- unsigned int i;
oper = rtnl_dereference(q->oper_sched);
admin = rtnl_dereference(q->admin_sched);
- opt.num_tc = netdev_get_num_tc(dev);
- memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
-
- for (i = 0; i < netdev_get_num_tc(dev); i++) {
- opt.count[i] = dev->tc_to_txq[i].count;
- opt.offset[i] = dev->tc_to_txq[i].offset;
- }
+ mqprio_qopt_reconstruct(dev, &opt);
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (!nest)
@@ -2012,7 +2273,7 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
goto options_error;
- if (taprio_dump_tc_entries(q, skb))
+ if (oper && taprio_dump_tc_entries(skb, oper))
goto options_error;
if (oper && dump_schedule(skb, oper))
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 097bd60ce964..62b436a2c8fe 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -807,8 +807,6 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
- sk_refcnt_debug_inc(newsk);
-
if (newsk->sk_prot->init(newsk)) {
sk_common_release(newsk);
newsk = NULL;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 909a89a1cff4..c365df24ad33 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -601,8 +601,6 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
- sk_refcnt_debug_inc(newsk);
-
if (newsk->sk_prot->init(newsk)) {
sk_common_release(newsk);
newsk = NULL;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 84021a6c4f9d..b91616f819de 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -59,6 +59,7 @@
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/busy_poll.h>
+#include <trace/events/sock.h>
#include <linux/socket.h> /* for sa_family_t */
#include <linux/export.h>
@@ -8321,7 +8322,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
int low, high, remaining, index;
unsigned int rover;
- inet_get_local_port_range(net, &low, &high);
+ inet_sk_get_local_port_range(sk, &low, &high);
remaining = (high - low) + 1;
rover = get_random_u32_below(remaining) + low;
@@ -9244,6 +9245,8 @@ void sctp_data_ready(struct sock *sk)
{
struct socket_wq *wq;
+ trace_sk_data_ready(sk);
+
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index e12d4fa5aece..a4cccdfdc00a 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -27,6 +27,7 @@
#include <linux/if_vlan.h>
#include <linux/rcupdate_wait.h>
#include <linux/ctype.h>
+#include <linux/splice.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -359,8 +360,6 @@ static void smc_destruct(struct sock *sk)
return;
if (!sock_flag(sk, SOCK_DEAD))
return;
-
- sk_refcnt_debug_dec(sk);
}
static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
@@ -389,7 +388,6 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
spin_lock_init(&smc->accept_q_lock);
spin_lock_init(&smc->conn.send_lock);
sk->sk_prot->hash(sk);
- sk_refcnt_debug_inc(sk);
mutex_init(&smc->clcsock_release_lock);
smc_init_saved_callbacks(smc);
@@ -501,7 +499,7 @@ static int smcr_lgr_reg_sndbufs(struct smc_link *link,
return -EINVAL;
/* protect against parallel smcr_link_reg_buf() */
- mutex_lock(&lgr->llc_conf_mutex);
+ down_write(&lgr->llc_conf_mutex);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (!smc_link_active(&lgr->lnk[i]))
continue;
@@ -509,7 +507,7 @@ static int smcr_lgr_reg_sndbufs(struct smc_link *link,
if (rc)
break;
}
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_write(&lgr->llc_conf_mutex);
return rc;
}
@@ -518,15 +516,30 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link,
struct smc_buf_desc *rmb_desc)
{
struct smc_link_group *lgr = link->lgr;
+ bool do_slow = false;
int i, rc = 0;
rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
if (rc)
return rc;
+
+ down_read(&lgr->llc_conf_mutex);
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+ if (!smc_link_active(&lgr->lnk[i]))
+ continue;
+ if (!rmb_desc->is_reg_mr[link->link_idx]) {
+ up_read(&lgr->llc_conf_mutex);
+ goto slow_path;
+ }
+ }
+ /* mr register already */
+ goto fast_path;
+slow_path:
+ do_slow = true;
/* protect against parallel smc_llc_cli_rkey_exchange() and
* parallel smcr_link_reg_buf()
*/
- mutex_lock(&lgr->llc_conf_mutex);
+ down_write(&lgr->llc_conf_mutex);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (!smc_link_active(&lgr->lnk[i]))
continue;
@@ -534,7 +547,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link,
if (rc)
goto out;
}
-
+fast_path:
/* exchange confirm_rkey msg with peer */
rc = smc_llc_do_confirm_rkey(link, rmb_desc);
if (rc) {
@@ -543,7 +556,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link,
}
rmb_desc->is_conf_rkey = true;
out:
- mutex_unlock(&lgr->llc_conf_mutex);
+ do_slow ? up_write(&lgr->llc_conf_mutex) : up_read(&lgr->llc_conf_mutex);
smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
return rc;
}
@@ -1826,8 +1839,10 @@ static int smcr_serv_conf_first_link(struct smc_sock *smc)
smc_llc_link_active(link);
smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
+ down_write(&link->lgr->llc_conf_mutex);
/* initial contact - try to establish second link */
smc_llc_srv_add_link(link, NULL);
+ up_write(&link->lgr->llc_conf_mutex);
return 0;
}
@@ -3382,12 +3397,14 @@ static int __init smc_init(void)
if (rc)
goto out_pernet_subsys;
- smc_ism_init();
+ rc = smc_ism_init();
+ if (rc)
+ goto out_pernet_subsys_stat;
smc_clc_init();
rc = smc_nl_init();
if (rc)
- goto out_pernet_subsys_stat;
+ goto out_ism;
rc = smc_pnet_init();
if (rc)
@@ -3480,6 +3497,8 @@ out_pnet:
smc_pnet_exit();
out_nl:
smc_nl_exit();
+out_ism:
+ smc_ism_exit();
out_pernet_subsys_stat:
unregister_pernet_subsys(&smc_net_stat_ops);
out_pernet_subsys:
@@ -3495,6 +3514,7 @@ static void __exit smc_exit(void)
sock_unregister(PF_SMC);
smc_core_exit();
smc_ib_unregister_client();
+ smc_ism_exit();
destroy_workqueue(smc_close_wq);
destroy_workqueue(smc_tcp_ls_wq);
destroy_workqueue(smc_hs_wq);
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index dfb9797f7bc6..b9b8b07aa702 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -813,6 +813,7 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
struct smc_clc_v2_extension *v2_ext;
struct smc_clc_msg_smcd *pclc_smcd;
struct smc_clc_msg_trail *trl;
+ struct smcd_dev *smcd;
int len, i, plen, rc;
int reason_code = 0;
struct kvec vec[8];
@@ -868,7 +869,9 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
if (smcd_indicated(ini->smc_type_v1)) {
/* add SMC-D specifics */
if (ini->ism_dev[0]) {
- pclc_smcd->ism.gid = htonll(ini->ism_dev[0]->local_gid);
+ smcd = ini->ism_dev[0];
+ pclc_smcd->ism.gid =
+ htonll(smcd->ops->get_local_gid(smcd));
pclc_smcd->ism.chid =
htons(smc_ism_get_chid(ini->ism_dev[0]));
}
@@ -914,8 +917,9 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
plen += sizeof(*smcd_v2_ext);
if (ini->ism_offered_cnt) {
for (i = 1; i <= ini->ism_offered_cnt; i++) {
+ smcd = ini->ism_dev[i];
gidchids[i - 1].gid =
- htonll(ini->ism_dev[i]->local_gid);
+ htonll(smcd->ops->get_local_gid(smcd));
gidchids[i - 1].chid =
htons(smc_ism_get_chid(ini->ism_dev[i]));
}
@@ -1000,7 +1004,8 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
memcpy(clc->hdr.eyecatcher, SMCD_EYECATCHER,
sizeof(SMCD_EYECATCHER));
clc->hdr.typev1 = SMC_TYPE_D;
- clc->d0.gid = conn->lgr->smcd->local_gid;
+ clc->d0.gid =
+ conn->lgr->smcd->ops->get_local_gid(conn->lgr->smcd);
clc->d0.token = conn->rmb_desc->token;
clc->d0.dmbe_size = conn->rmbe_size_short;
clc->d0.dmbe_idx = 0;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index c305d8dd23f8..d52060b2680c 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -500,6 +500,7 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
struct netlink_callback *cb)
{
char smc_pnet[SMC_MAX_PNETID_LEN + 1];
+ struct smcd_dev *smcd = lgr->smcd;
struct nlattr *attrs;
void *nlh;
@@ -515,8 +516,9 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id)))
goto errattr;
- if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID, lgr->smcd->local_gid,
- SMC_NLA_LGR_D_PAD))
+ if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID,
+ smcd->ops->get_local_gid(smcd),
+ SMC_NLA_LGR_D_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid,
SMC_NLA_LGR_D_PAD))
@@ -820,6 +822,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
{
struct smc_link_group *lgr;
struct list_head *lgr_list;
+ struct smcd_dev *smcd;
struct smc_link *lnk;
spinlock_t *lgr_lock;
u8 link_idx;
@@ -851,8 +854,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lgr->freeing = 0;
lgr->vlan_id = ini->vlan_id;
refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */
- mutex_init(&lgr->sndbufs_lock);
- mutex_init(&lgr->rmbs_lock);
+ init_rwsem(&lgr->sndbufs_lock);
+ init_rwsem(&lgr->rmbs_lock);
rwlock_init(&lgr->conns_lock);
for (i = 0; i < SMC_RMBE_SIZES; i++) {
INIT_LIST_HEAD(&lgr->sndbufs[i]);
@@ -866,7 +869,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lgr->conns_all = RB_ROOT;
if (ini->is_smcd) {
/* SMC-D specific settings */
- get_device(&ini->ism_dev[ini->ism_selected]->dev);
+ smcd = ini->ism_dev[ini->ism_selected];
+ get_device(smcd->ops->get_dev(smcd));
lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
lgr->smcd = ini->ism_dev[ini->ism_selected];
lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
@@ -1094,7 +1098,7 @@ err_out:
static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
struct smc_link_group *lgr)
{
- struct mutex *lock; /* lock buffer list */
+ struct rw_semaphore *lock; /* lock buffer list */
int rc;
if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) {
@@ -1102,10 +1106,10 @@ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
if (!rc) {
/* protect against smc_llc_cli_rkey_exchange() */
- mutex_lock(&lgr->llc_conf_mutex);
+ down_read(&lgr->llc_conf_mutex);
smc_llc_do_delete_rkey(lgr, buf_desc);
buf_desc->is_conf_rkey = false;
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_read(&lgr->llc_conf_mutex);
smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
}
}
@@ -1114,14 +1118,15 @@ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
/* buf registration failed, reuse not possible */
lock = is_rmb ? &lgr->rmbs_lock :
&lgr->sndbufs_lock;
- mutex_lock(lock);
+ down_write(lock);
list_del(&buf_desc->list);
- mutex_unlock(lock);
+ up_write(lock);
smc_buf_free(lgr, is_rmb, buf_desc);
} else {
- buf_desc->used = 0;
- memset(buf_desc->cpu_addr, 0, buf_desc->len);
+ /* memzero_explicit provides potential memory barrier semantics */
+ memzero_explicit(buf_desc->cpu_addr, buf_desc->len);
+ WRITE_ONCE(buf_desc->used, 0);
}
}
@@ -1132,19 +1137,17 @@ static void smc_buf_unuse(struct smc_connection *conn,
if (!lgr->is_smcd && conn->sndbuf_desc->is_vm) {
smcr_buf_unuse(conn->sndbuf_desc, false, lgr);
} else {
- conn->sndbuf_desc->used = 0;
- memset(conn->sndbuf_desc->cpu_addr, 0,
- conn->sndbuf_desc->len);
+ memzero_explicit(conn->sndbuf_desc->cpu_addr, conn->sndbuf_desc->len);
+ WRITE_ONCE(conn->sndbuf_desc->used, 0);
}
}
if (conn->rmb_desc) {
if (!lgr->is_smcd) {
smcr_buf_unuse(conn->rmb_desc, true, lgr);
} else {
- conn->rmb_desc->used = 0;
- memset(conn->rmb_desc->cpu_addr, 0,
- conn->rmb_desc->len +
- sizeof(struct smcd_cdc_msg));
+ memzero_explicit(conn->rmb_desc->cpu_addr,
+ conn->rmb_desc->len + sizeof(struct smcd_cdc_msg));
+ WRITE_ONCE(conn->rmb_desc->used, 0);
}
}
}
@@ -1220,15 +1223,16 @@ static void smcr_buf_unmap_lgr(struct smc_link *lnk)
int i;
for (i = 0; i < SMC_RMBE_SIZES; i++) {
- mutex_lock(&lgr->rmbs_lock);
+ down_write(&lgr->rmbs_lock);
list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
smcr_buf_unmap_link(buf_desc, true, lnk);
- mutex_unlock(&lgr->rmbs_lock);
- mutex_lock(&lgr->sndbufs_lock);
+ up_write(&lgr->rmbs_lock);
+
+ down_write(&lgr->sndbufs_lock);
list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
list)
smcr_buf_unmap_link(buf_desc, false, lnk);
- mutex_unlock(&lgr->sndbufs_lock);
+ up_write(&lgr->sndbufs_lock);
}
}
@@ -1373,19 +1377,19 @@ static void smc_lgr_free(struct smc_link_group *lgr)
int i;
if (!lgr->is_smcd) {
- mutex_lock(&lgr->llc_conf_mutex);
+ down_write(&lgr->llc_conf_mutex);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (lgr->lnk[i].state != SMC_LNK_UNUSED)
smcr_link_clear(&lgr->lnk[i], false);
}
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_write(&lgr->llc_conf_mutex);
smc_llc_lgr_clear(lgr);
}
destroy_workqueue(lgr->tx_wq);
if (lgr->is_smcd) {
smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
- put_device(&lgr->smcd->dev);
+ put_device(lgr->smcd->ops->get_dev(lgr->smcd));
}
smc_lgr_put(lgr); /* theoretically last lgr_put */
}
@@ -1692,12 +1696,12 @@ static void smcr_link_down(struct smc_link *lnk)
} else {
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* another llc task is ongoing */
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_write(&lgr->llc_conf_mutex);
wait_event_timeout(lgr->llc_flow_waiter,
(list_empty(&lgr->list) ||
lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
SMC_LLC_WAIT_TIME);
- mutex_lock(&lgr->llc_conf_mutex);
+ down_write(&lgr->llc_conf_mutex);
}
if (!list_empty(&lgr->list)) {
smc_llc_send_delete_link(to_lnk, del_link_id,
@@ -1757,9 +1761,9 @@ static void smc_link_down_work(struct work_struct *work)
if (list_empty(&lgr->list))
return;
wake_up_all(&lgr->llc_msg_waiter);
- mutex_lock(&lgr->llc_conf_mutex);
+ down_write(&lgr->llc_conf_mutex);
smcr_link_down(link);
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_write(&lgr->llc_conf_mutex);
}
static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
@@ -1986,19 +1990,19 @@ int smc_uncompress_bufsize(u8 compressed)
* buffer size; if not available, return NULL
*/
static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
- struct mutex *lock,
+ struct rw_semaphore *lock,
struct list_head *buf_list)
{
struct smc_buf_desc *buf_slot;
- mutex_lock(lock);
+ down_read(lock);
list_for_each_entry(buf_slot, buf_list, list) {
if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
- mutex_unlock(lock);
+ up_read(lock);
return buf_slot;
}
}
- mutex_unlock(lock);
+ up_read(lock);
return NULL;
}
@@ -2107,13 +2111,13 @@ int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc)
return 0;
}
-static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
+static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
struct list_head *lst, bool is_rmb)
{
struct smc_buf_desc *buf_desc, *bf;
int rc = 0;
- mutex_lock(lock);
+ down_write(lock);
list_for_each_entry_safe(buf_desc, bf, lst, list) {
if (!buf_desc->used)
continue;
@@ -2122,7 +2126,7 @@ static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
goto out;
}
out:
- mutex_unlock(lock);
+ up_write(lock);
return rc;
}
@@ -2155,37 +2159,37 @@ int smcr_buf_reg_lgr(struct smc_link *lnk)
int i, rc = 0;
/* reg all RMBs for a new link */
- mutex_lock(&lgr->rmbs_lock);
+ down_write(&lgr->rmbs_lock);
for (i = 0; i < SMC_RMBE_SIZES; i++) {
list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
if (!buf_desc->used)
continue;
rc = smcr_link_reg_buf(lnk, buf_desc);
if (rc) {
- mutex_unlock(&lgr->rmbs_lock);
+ up_write(&lgr->rmbs_lock);
return rc;
}
}
}
- mutex_unlock(&lgr->rmbs_lock);
+ up_write(&lgr->rmbs_lock);
if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
return rc;
/* reg all vzalloced sndbufs for a new link */
- mutex_lock(&lgr->sndbufs_lock);
+ down_write(&lgr->sndbufs_lock);
for (i = 0; i < SMC_RMBE_SIZES; i++) {
list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) {
if (!buf_desc->used || !buf_desc->is_vm)
continue;
rc = smcr_link_reg_buf(lnk, buf_desc);
if (rc) {
- mutex_unlock(&lgr->sndbufs_lock);
+ up_write(&lgr->sndbufs_lock);
return rc;
}
}
}
- mutex_unlock(&lgr->sndbufs_lock);
+ up_write(&lgr->sndbufs_lock);
return rc;
}
@@ -2243,7 +2247,7 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
int i, rc = 0, cnt = 0;
/* protect against parallel link reconfiguration */
- mutex_lock(&lgr->llc_conf_mutex);
+ down_read(&lgr->llc_conf_mutex);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
struct smc_link *lnk = &lgr->lnk[i];
@@ -2256,7 +2260,7 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
cnt++;
}
out:
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_read(&lgr->llc_conf_mutex);
if (!rc && !cnt)
rc = -EINVAL;
return rc;
@@ -2305,8 +2309,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
struct smc_link_group *lgr = conn->lgr;
struct list_head *buf_list;
int bufsize, bufsize_short;
+ struct rw_semaphore *lock; /* lock buffer list */
bool is_dgraded = false;
- struct mutex *lock; /* lock buffer list */
int sk_buf_size;
if (is_rmb)
@@ -2354,9 +2358,9 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
buf_desc->used = 1;
- mutex_lock(lock);
+ down_write(lock);
list_add(&buf_desc->list, buf_list);
- mutex_unlock(lock);
+ up_write(lock);
break; /* found */
}
@@ -2430,9 +2434,9 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
/* create rmb */
rc = __smc_buf_create(smc, is_smcd, true);
if (rc) {
- mutex_lock(&smc->conn.lgr->sndbufs_lock);
+ down_write(&smc->conn.lgr->sndbufs_lock);
list_del(&smc->conn.sndbuf_desc->list);
- mutex_unlock(&smc->conn.lgr->sndbufs_lock);
+ up_write(&smc->conn.lgr->sndbufs_lock);
smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
smc->conn.sndbuf_desc = NULL;
}
@@ -2595,6 +2599,7 @@ static int smc_core_reboot_event(struct notifier_block *this,
{
smc_lgrs_shutdown();
smc_ib_unregister_client();
+ smc_ism_exit();
return 0;
}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 285f9bd8e232..08b457c2d294 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -252,9 +252,9 @@ struct smc_link_group {
unsigned short vlan_id; /* vlan id of link group */
struct list_head sndbufs[SMC_RMBE_SIZES];/* tx buffers */
- struct mutex sndbufs_lock; /* protects tx buffers */
+ struct rw_semaphore sndbufs_lock; /* protects tx buffers */
struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */
- struct mutex rmbs_lock; /* protects rx buffers */
+ struct rw_semaphore rmbs_lock; /* protects rx buffers */
u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */
struct delayed_work free_work; /* delayed freeing of an lgr */
@@ -298,7 +298,7 @@ struct smc_link_group {
/* queue for llc events */
spinlock_t llc_event_q_lock;
/* protects llc_event_q */
- struct mutex llc_conf_mutex;
+ struct rw_semaphore llc_conf_mutex;
/* protects lgr reconfig. */
struct work_struct llc_add_link_work;
struct work_struct llc_del_link_work;
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 80ea7d954ece..7ff2152971a5 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -167,12 +167,13 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
!list_empty(&smc->conn.lgr->list)) {
struct smc_connection *conn = &smc->conn;
struct smcd_diag_dmbinfo dinfo;
+ struct smcd_dev *smcd = conn->lgr->smcd;
memset(&dinfo, 0, sizeof(dinfo));
dinfo.linkid = *((u32 *)conn->lgr->id);
dinfo.peer_gid = conn->lgr->peer_gid;
- dinfo.my_gid = conn->lgr->smcd->local_gid;
+ dinfo.my_gid = smcd->ops->get_local_gid(smcd);
dinfo.token = conn->rmb_desc->token;
dinfo.peer_token = conn->peer_token;
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 911fe08bc54b..3b0b7710c6b0 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -17,6 +17,7 @@
#include "smc_ism.h"
#include "smc_pnet.h"
#include "smc_netlink.h"
+#include "linux/ism.h"
struct smcd_dev_list smcd_dev_list = {
.list = LIST_HEAD_INIT(smcd_dev_list.list),
@@ -26,6 +27,22 @@ struct smcd_dev_list smcd_dev_list = {
static bool smc_ism_v2_capable;
static u8 smc_ism_v2_system_eid[SMC_MAX_EID_LEN];
+#if IS_ENABLED(CONFIG_ISM)
+static void smcd_register_dev(struct ism_dev *ism);
+static void smcd_unregister_dev(struct ism_dev *ism);
+static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event);
+static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno,
+ u16 dmbemask);
+
+static struct ism_client smc_ism_client = {
+ .name = "SMC-D",
+ .add = smcd_register_dev,
+ .remove = smcd_unregister_dev,
+ .handle_event = smcd_handle_event,
+ .handle_irq = smcd_handle_irq,
+};
+#endif
+
/* Test if an ISM communication is possible - same CPC */
int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd)
{
@@ -183,6 +200,7 @@ int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
struct smc_buf_desc *dmb_desc)
{
+#if IS_ENABLED(CONFIG_ISM)
struct smcd_dmb dmb;
int rc;
@@ -191,7 +209,7 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
dmb.sba_idx = dmb_desc->sba_idx;
dmb.vlan_id = lgr->vlan_id;
dmb.rgid = lgr->peer_gid;
- rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb);
+ rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb, &smc_ism_client);
if (!rc) {
dmb_desc->sba_idx = dmb.sba_idx;
dmb_desc->token = dmb.dmb_tok;
@@ -200,6 +218,9 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
dmb_desc->len = dmb.dmb_len;
}
return rc;
+#else
+ return 0;
+#endif
}
static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
@@ -210,9 +231,11 @@ static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
struct smc_pci_dev smc_pci_dev;
struct nlattr *port_attrs;
struct nlattr *attrs;
+ struct ism_dev *ism;
int use_cnt = 0;
void *nlh;
+ ism = smcd->priv;
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&smc_gen_nl_family, NLM_F_MULTI,
SMC_NETLINK_GET_DEV_SMCD);
@@ -227,7 +250,7 @@ static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, use_cnt > 0))
goto errattr;
memset(&smc_pci_dev, 0, sizeof(smc_pci_dev));
- smc_set_pci_values(to_pci_dev(smcd->dev.parent), &smc_pci_dev);
+ smc_set_pci_values(to_pci_dev(ism->dev.parent), &smc_pci_dev);
if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev.pci_fid))
goto errattr;
if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev.pci_pchid))
@@ -293,10 +316,11 @@ int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+#if IS_ENABLED(CONFIG_ISM)
struct smc_ism_event_work {
struct work_struct work;
struct smcd_dev *smcd;
- struct smcd_event event;
+ struct ism_event event;
};
#define ISM_EVENT_REQUEST 0x0001
@@ -336,24 +360,6 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
}
}
-int smc_ism_signal_shutdown(struct smc_link_group *lgr)
-{
- int rc;
- union smcd_sw_event_info ev_info;
-
- if (lgr->peer_shutdown)
- return 0;
-
- memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
- ev_info.vlan_id = lgr->vlan_id;
- ev_info.code = ISM_EVENT_REQUEST;
- rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
- ISM_EVENT_REQUEST_IR,
- ISM_EVENT_CODE_SHUTDOWN,
- ev_info.info);
- return rc;
-}
-
/* worker for SMC-D events */
static void smc_ism_event_work(struct work_struct *work)
{
@@ -373,44 +379,25 @@ static void smc_ism_event_work(struct work_struct *work)
kfree(wrk);
}
-static void smcd_release(struct device *dev)
-{
- struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev);
-
- kfree(smcd->conn);
- kfree(smcd);
-}
-
-struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
- const struct smcd_ops *ops, int max_dmbs)
+static struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+ const struct smcd_ops *ops, int max_dmbs)
{
struct smcd_dev *smcd;
- smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
+ smcd = devm_kzalloc(parent, sizeof(*smcd), GFP_KERNEL);
if (!smcd)
return NULL;
- smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
- GFP_KERNEL);
- if (!smcd->conn) {
- kfree(smcd);
+ smcd->conn = devm_kcalloc(parent, max_dmbs,
+ sizeof(struct smc_connection *), GFP_KERNEL);
+ if (!smcd->conn)
return NULL;
- }
smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
WQ_MEM_RECLAIM, name);
- if (!smcd->event_wq) {
- kfree(smcd->conn);
- kfree(smcd);
+ if (!smcd->event_wq)
return NULL;
- }
- smcd->dev.parent = parent;
- smcd->dev.release = smcd_release;
- device_initialize(&smcd->dev);
- dev_set_name(&smcd->dev, name);
smcd->ops = ops;
- if (smc_pnetid_by_dev_port(parent, 0, smcd->pnetid))
- smc_pnetid_by_table_smcd(smcd);
spin_lock_init(&smcd->lock);
spin_lock_init(&smcd->lgr_lock);
@@ -419,11 +406,23 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
init_waitqueue_head(&smcd->lgrs_deleted);
return smcd;
}
-EXPORT_SYMBOL_GPL(smcd_alloc_dev);
-int smcd_register_dev(struct smcd_dev *smcd)
+static void smcd_register_dev(struct ism_dev *ism)
{
- int rc;
+ const struct smcd_ops *ops = ism_get_smcd_ops();
+ struct smcd_dev *smcd;
+
+ if (!ops)
+ return;
+
+ smcd = smcd_alloc_dev(&ism->pdev->dev, dev_name(&ism->pdev->dev), ops,
+ ISM_NR_DMBS);
+ if (!smcd)
+ return;
+ smcd->priv = ism;
+ ism_set_priv(ism, &smc_ism_client, smcd);
+ if (smc_pnetid_by_dev_port(&ism->pdev->dev, 0, smcd->pnetid))
+ smc_pnetid_by_table_smcd(smcd);
mutex_lock(&smcd_dev_list.mutex);
if (list_empty(&smcd_dev_list.list)) {
@@ -444,43 +443,28 @@ int smcd_register_dev(struct smcd_dev *smcd)
mutex_unlock(&smcd_dev_list.mutex);
pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
- dev_name(&smcd->dev), smcd->pnetid,
+ dev_name(&ism->dev), smcd->pnetid,
smcd->pnetid_by_user ? " (user defined)" : "");
- rc = device_add(&smcd->dev);
- if (rc) {
- mutex_lock(&smcd_dev_list.mutex);
- list_del(&smcd->list);
- mutex_unlock(&smcd_dev_list.mutex);
- }
-
- return rc;
+ return;
}
-EXPORT_SYMBOL_GPL(smcd_register_dev);
-void smcd_unregister_dev(struct smcd_dev *smcd)
+static void smcd_unregister_dev(struct ism_dev *ism)
{
+ struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client);
+
pr_warn_ratelimited("smc: removing smcd device %s\n",
- dev_name(&smcd->dev));
+ dev_name(&ism->dev));
+ smcd->going_away = 1;
+ smc_smcd_terminate_all(smcd);
mutex_lock(&smcd_dev_list.mutex);
list_del_init(&smcd->list);
mutex_unlock(&smcd_dev_list.mutex);
- smcd->going_away = 1;
- smc_smcd_terminate_all(smcd);
destroy_workqueue(smcd->event_wq);
-
- device_del(&smcd->dev);
-}
-EXPORT_SYMBOL_GPL(smcd_unregister_dev);
-
-void smcd_free_dev(struct smcd_dev *smcd)
-{
- put_device(&smcd->dev);
}
-EXPORT_SYMBOL_GPL(smcd_free_dev);
/* SMCD Device event handler. Called from ISM device interrupt handler.
- * Parameters are smcd device pointer,
+ * Parameters are ism device pointer,
* - event->type (0 --> DMB, 1 --> GID),
* - event->code (event code),
* - event->tok (either DMB token when event type 0, or GID when event type 1)
@@ -490,8 +474,9 @@ EXPORT_SYMBOL_GPL(smcd_free_dev);
* Context:
* - Function called in IRQ context from ISM device driver event handler.
*/
-void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event)
+static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event)
{
+ struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client);
struct smc_ism_event_work *wrk;
if (smcd->going_away)
@@ -505,17 +490,18 @@ void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event)
wrk->event = *event;
queue_work(smcd->event_wq, &wrk->work);
}
-EXPORT_SYMBOL_GPL(smcd_handle_event);
/* SMCD Device interrupt handler. Called from ISM device interrupt handler.
- * Parameters are smcd device pointer, DMB number, and the DMBE bitmask.
+ * Parameters are the ism device pointer, DMB number, and the DMBE bitmask.
* Find the connection and schedule the tasklet for this connection.
*
* Context:
* - Function called in IRQ context from ISM device driver IRQ handler.
*/
-void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno, u16 dmbemask)
+static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno,
+ u16 dmbemask)
{
+ struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client);
struct smc_connection *conn = NULL;
unsigned long flags;
@@ -525,10 +511,44 @@ void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno, u16 dmbemask)
tasklet_schedule(&conn->rx_tsklet);
spin_unlock_irqrestore(&smcd->lock, flags);
}
-EXPORT_SYMBOL_GPL(smcd_handle_irq);
+#endif
+
+int smc_ism_signal_shutdown(struct smc_link_group *lgr)
+{
+ int rc = 0;
+#if IS_ENABLED(CONFIG_ISM)
+ union smcd_sw_event_info ev_info;
+
+ if (lgr->peer_shutdown)
+ return 0;
+
+ memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
+ ev_info.vlan_id = lgr->vlan_id;
+ ev_info.code = ISM_EVENT_REQUEST;
+ rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
+ ISM_EVENT_REQUEST_IR,
+ ISM_EVENT_CODE_SHUTDOWN,
+ ev_info.info);
+#endif
+ return rc;
+}
-void __init smc_ism_init(void)
+int smc_ism_init(void)
{
+ int rc = 0;
+
+#if IS_ENABLED(CONFIG_ISM)
smc_ism_v2_capable = false;
memset(smc_ism_v2_system_eid, 0, SMC_MAX_EID_LEN);
+
+ rc = ism_register_client(&smc_ism_client);
+#endif
+ return rc;
+}
+
+void smc_ism_exit(void)
+{
+#if IS_ENABLED(CONFIG_ISM)
+ ism_unregister_client(&smc_ism_client);
+#endif
}
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index d6b2db604fe8..832b2f42d79f 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -42,7 +42,8 @@ int smc_ism_signal_shutdown(struct smc_link_group *lgr);
void smc_ism_get_system_eid(u8 **eid);
u16 smc_ism_get_chid(struct smcd_dev *dev);
bool smc_ism_is_v2_capable(void);
-void smc_ism_init(void);
+int smc_ism_init(void);
+void smc_ism_exit(void);
int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
static inline int smc_ism_write(struct smcd_dev *smcd, u64 dmb_tok,
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 524649d0ab65..a0840b8c935b 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -608,7 +608,7 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
prim_lnk_idx = link->link_idx;
lnk_idx = link_new->link_idx;
- mutex_lock(&lgr->rmbs_lock);
+ down_write(&lgr->rmbs_lock);
ext->num_rkeys = lgr->conns_num;
if (!ext->num_rkeys)
goto out;
@@ -628,7 +628,7 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
}
len += i * sizeof(ext->rt[0]);
out:
- mutex_unlock(&lgr->rmbs_lock);
+ up_write(&lgr->rmbs_lock);
return len;
}
@@ -889,7 +889,7 @@ static int smc_llc_cli_rkey_exchange(struct smc_link *link,
int rc = 0;
int i;
- mutex_lock(&lgr->rmbs_lock);
+ down_write(&lgr->rmbs_lock);
num_rkeys_send = lgr->conns_num;
buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
do {
@@ -916,7 +916,7 @@ static int smc_llc_cli_rkey_exchange(struct smc_link *link,
break;
} while (num_rkeys_send || num_rkeys_recv);
- mutex_unlock(&lgr->rmbs_lock);
+ up_write(&lgr->rmbs_lock);
return rc;
}
@@ -999,14 +999,14 @@ static void smc_llc_save_add_link_rkeys(struct smc_link *link,
ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
SMC_WR_TX_SIZE);
max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
- mutex_lock(&lgr->rmbs_lock);
+ down_write(&lgr->rmbs_lock);
for (i = 0; i < max; i++) {
smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
ext->rt[i].rmb_key,
ext->rt[i].rmb_vaddr_new,
ext->rt[i].rmb_key_new);
}
- mutex_unlock(&lgr->rmbs_lock);
+ up_write(&lgr->rmbs_lock);
}
static void smc_llc_save_add_link_info(struct smc_link *link,
@@ -1202,12 +1202,12 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
- mutex_lock(&lgr->llc_conf_mutex);
+ down_write(&lgr->llc_conf_mutex);
if (smc_llc_is_local_add_link(&qentry->msg))
smc_llc_cli_add_link_invite(qentry->link, qentry);
else
smc_llc_cli_add_link(qentry->link, qentry);
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_write(&lgr->llc_conf_mutex);
}
static int smc_llc_active_link_count(struct smc_link_group *lgr)
@@ -1313,7 +1313,7 @@ static int smc_llc_srv_rkey_exchange(struct smc_link *link,
int rc = 0;
int i;
- mutex_lock(&lgr->rmbs_lock);
+ down_write(&lgr->rmbs_lock);
num_rkeys_send = lgr->conns_num;
buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
do {
@@ -1338,7 +1338,7 @@ static int smc_llc_srv_rkey_exchange(struct smc_link *link,
smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
} while (num_rkeys_send || num_rkeys_recv);
out:
- mutex_unlock(&lgr->rmbs_lock);
+ up_write(&lgr->rmbs_lock);
return rc;
}
@@ -1509,13 +1509,13 @@ static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
- mutex_lock(&lgr->llc_conf_mutex);
+ down_write(&lgr->llc_conf_mutex);
rc = smc_llc_srv_add_link(link, qentry);
if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
/* delete any asymmetric link */
smc_llc_delete_asym_link(lgr);
}
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_write(&lgr->llc_conf_mutex);
kfree(qentry);
}
@@ -1582,7 +1582,7 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
smc_lgr_terminate_sched(lgr);
goto out;
}
- mutex_lock(&lgr->llc_conf_mutex);
+ down_write(&lgr->llc_conf_mutex);
/* delete single link */
for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) {
if (lgr->lnk[lnk_idx].link_id != del_llc->link_num)
@@ -1616,7 +1616,7 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
smc_lgr_terminate_sched(lgr);
}
out_unlock:
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_write(&lgr->llc_conf_mutex);
out:
kfree(qentry);
}
@@ -1652,7 +1652,7 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
int active_links;
int i;
- mutex_lock(&lgr->llc_conf_mutex);
+ down_write(&lgr->llc_conf_mutex);
qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
lnk = qentry->link;
del_llc = &qentry->msg.delete_link;
@@ -1708,7 +1708,7 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
smc_llc_add_link_local(lnk);
}
out:
- mutex_unlock(&lgr->llc_conf_mutex);
+ up_write(&lgr->llc_conf_mutex);
kfree(qentry);
}
@@ -2126,7 +2126,7 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
spin_lock_init(&lgr->llc_flow_lock);
init_waitqueue_head(&lgr->llc_flow_waiter);
init_waitqueue_head(&lgr->llc_msg_waiter);
- mutex_init(&lgr->llc_conf_mutex);
+ init_rwsem(&lgr->llc_conf_mutex);
lgr->llc_testlink_time = READ_ONCE(net->smc.sysctl_smcr_testlink_time);
}
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 25fb2fd186e2..11775401df68 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -103,7 +103,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
struct smc_pnetentry *pnetelem, *tmp_pe;
struct smc_pnettable *pnettable;
struct smc_ib_device *ibdev;
- struct smcd_dev *smcd_dev;
+ struct smcd_dev *smcd;
struct smc_net *sn;
int rc = -ENOENT;
int ibport;
@@ -162,16 +162,17 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
mutex_unlock(&smc_ib_devices.mutex);
/* remove smcd devices */
mutex_lock(&smcd_dev_list.mutex);
- list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
- if (smcd_dev->pnetid_by_user &&
+ list_for_each_entry(smcd, &smcd_dev_list.list, list) {
+ if (smcd->pnetid_by_user &&
(!pnet_name ||
- smc_pnet_match(pnet_name, smcd_dev->pnetid))) {
+ smc_pnet_match(pnet_name, smcd->pnetid))) {
pr_warn_ratelimited("smc: smcd device %s "
"erased user defined pnetid "
- "%.16s\n", dev_name(&smcd_dev->dev),
- smcd_dev->pnetid);
- memset(smcd_dev->pnetid, 0, SMC_MAX_PNETID_LEN);
- smcd_dev->pnetid_by_user = false;
+ "%.16s\n",
+ dev_name(smcd->ops->get_dev(smcd)),
+ smcd->pnetid);
+ memset(smcd->pnetid, 0, SMC_MAX_PNETID_LEN);
+ smcd->pnetid_by_user = false;
rc = 0;
}
}
@@ -331,8 +332,8 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
- if (!strncmp(dev_name(&smcd_dev->dev), smcd_name,
- IB_DEVICE_NAME_MAX - 1))
+ if (!strncmp(dev_name(smcd_dev->ops->get_dev(smcd_dev)),
+ smcd_name, IB_DEVICE_NAME_MAX - 1))
goto out;
}
smcd_dev = NULL;
@@ -411,7 +412,8 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
struct smc_ib_device *ib_dev;
bool smcddev_applied = true;
bool ibdev_applied = true;
- struct smcd_dev *smcd_dev;
+ struct smcd_dev *smcd;
+ struct device *dev;
bool new_ibdev;
/* try to apply the pnetid to active devices */
@@ -425,14 +427,16 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
ib_port,
ib_dev->pnetid[ib_port - 1]);
}
- smcd_dev = smc_pnet_find_smcd(ib_name);
- if (smcd_dev) {
- smcddev_applied = smc_pnet_apply_smcd(smcd_dev, pnet_name);
- if (smcddev_applied)
+ smcd = smc_pnet_find_smcd(ib_name);
+ if (smcd) {
+ smcddev_applied = smc_pnet_apply_smcd(smcd, pnet_name);
+ if (smcddev_applied) {
+ dev = smcd->ops->get_dev(smcd);
pr_warn_ratelimited("smc: smcd device %s "
"applied user defined pnetid "
- "%.16s\n", dev_name(&smcd_dev->dev),
- smcd_dev->pnetid);
+ "%.16s\n", dev_name(dev),
+ smcd->pnetid);
+ }
}
/* Apply fails when a device has a hardware-defined pnetid set, do not
* add a pnet table entry in that case.
@@ -1181,7 +1185,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
*/
int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
{
- const char *ib_name = dev_name(&smcddev->dev);
+ const char *ib_name = dev_name(smcddev->ops->get_dev(smcddev));
struct smc_pnettable *pnettable;
struct smc_pnetentry *tmp_pe;
struct smc_net *sn;
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index 17c5aee7ee4f..4380d32f5a5f 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -13,8 +13,10 @@
#include <linux/net.h>
#include <linux/rcupdate.h>
#include <linux/sched/signal.h>
+#include <linux/splice.h>
#include <net/sock.h>
+#include <trace/events/sock.h>
#include "smc.h"
#include "smc_core.h"
@@ -31,6 +33,8 @@ static void smc_rx_wake_up(struct sock *sk)
{
struct socket_wq *wq;
+ trace_sk_data_ready(sk);
+
/* derived from sock_def_readable() */
/* called already in smc_listen_work() */
rcu_read_lock();
diff --git a/net/socket.c b/net/socket.c
index c6c44e26e954..6bae8ce7059e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -106,6 +106,7 @@
#include <net/busy_poll.h>
#include <linux/errqueue.h>
#include <linux/ptp_clock_kernel.h>
+#include <trace/events/sock.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
@@ -709,12 +710,22 @@ INDIRECT_CALLABLE_DECLARE(int inet_sendmsg(struct socket *, struct msghdr *,
size_t));
INDIRECT_CALLABLE_DECLARE(int inet6_sendmsg(struct socket *, struct msghdr *,
size_t));
+
+static noinline void call_trace_sock_send_length(struct sock *sk, int ret,
+ int flags)
+{
+ trace_sock_send_length(sk, ret, 0);
+}
+
static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
{
int ret = INDIRECT_CALL_INET(sock->ops->sendmsg, inet6_sendmsg,
inet_sendmsg, sock, msg,
msg_data_left(msg));
BUG_ON(ret == -EIOCBQUEUED);
+
+ if (trace_sock_send_length_enabled())
+ call_trace_sock_send_length(sock->sk, ret, 0);
return ret;
}
@@ -992,12 +1003,21 @@ INDIRECT_CALLABLE_DECLARE(int inet_recvmsg(struct socket *, struct msghdr *,
size_t, int));
INDIRECT_CALLABLE_DECLARE(int inet6_recvmsg(struct socket *, struct msghdr *,
size_t, int));
+
+static noinline void call_trace_sock_recv_length(struct sock *sk, int ret, int flags)
+{
+ trace_sock_recv_length(sk, ret, flags);
+}
+
static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
int flags)
{
- return INDIRECT_CALL_INET(sock->ops->recvmsg, inet6_recvmsg,
- inet_recvmsg, sock, msg, msg_data_left(msg),
- flags);
+ int ret = INDIRECT_CALL_INET(sock->ops->recvmsg, inet6_recvmsg,
+ inet_recvmsg, sock, msg,
+ msg_data_left(msg), flags);
+ if (trace_sock_recv_length_enabled())
+ call_trace_sock_recv_length(sock->sk, ret, flags);
+ return ret;
}
/**
@@ -1047,6 +1067,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
{
struct socket *sock;
int flags;
+ int ret;
sock = file->private_data;
@@ -1054,7 +1075,11 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
/* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
flags |= more;
- return kernel_sendpage(sock, page, offset, size, flags);
+ ret = kernel_sendpage(sock, page, offset, size, flags);
+
+ if (trace_sock_send_length_enabled())
+ call_trace_sock_send_length(sock->sk, ret, 0);
+ return ret;
}
static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 91252adcae46..8bcc8c3ffbfe 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -55,6 +55,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/xprt.h>
+#include <trace/events/sock.h>
#include <trace/events/sunrpc.h>
#include "socklib.h"
@@ -307,6 +308,8 @@ static void svc_data_ready(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+ trace_sk_data_ready(sk);
+
if (svsk) {
/* Refer to svc_setup_socket() for details. */
rmb();
@@ -684,6 +687,8 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+ trace_sk_data_ready(sk);
+
if (svsk) {
/* Refer to svc_setup_socket() for details. */
rmb();
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index aaa5b2741b79..adcbedc244d6 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -52,6 +52,7 @@
#include <linux/uio.h>
#include <linux/sched/mm.h>
+#include <trace/events/sock.h>
#include <trace/events/sunrpc.h>
#include "socklib.h"
@@ -1378,6 +1379,8 @@ static void xs_data_ready(struct sock *sk)
{
struct rpc_xprt *xprt;
+ trace_sk_data_ready(sk);
+
xprt = xprt_from_sock(sk);
if (xprt != NULL) {
struct sock_xprt *transport = container_of(xprt,
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index dfea27a906f2..9b47c8409231 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -39,6 +39,7 @@
#include "node.h"
#include "net.h"
#include <net/genetlink.h>
+#include <linux/string_helpers.h>
#include <linux/tipc_config.h>
/* The legacy API had an artificial message length limit called
@@ -173,11 +174,6 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
return buf;
}
-static inline bool string_is_valid(char *s, int len)
-{
- return memchr(s, '\0', len) ? true : false;
-}
-
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg,
struct sk_buff *arg)
@@ -445,7 +441,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
return -EINVAL;
len = min_t(int, len, TIPC_MAX_BEARER_NAME);
- if (!string_is_valid(b->name, len))
+ if (!string_is_terminated(b->name, len))
return -EINVAL;
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
@@ -486,7 +482,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
return -EINVAL;
len = min_t(int, len, TIPC_MAX_BEARER_NAME);
- if (!string_is_valid(name, len))
+ if (!string_is_terminated(name, len))
return -EINVAL;
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
@@ -584,7 +580,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
return -EINVAL;
len = min_t(int, len, TIPC_MAX_LINK_NAME);
- if (!string_is_valid(name, len))
+ if (!string_is_terminated(name, len))
return -EINVAL;
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
@@ -819,7 +815,7 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
return -EINVAL;
len = min_t(int, len, TIPC_MAX_LINK_NAME);
- if (!string_is_valid(lc->name, len))
+ if (!string_is_terminated(lc->name, len))
return -EINVAL;
media = tipc_media_find(lc->name);
@@ -856,7 +852,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
return -EINVAL;
len = min_t(int, len, TIPC_MAX_LINK_NAME);
- if (!string_is_valid(name, len))
+ if (!string_is_terminated(name, len))
return -EINVAL;
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index a38733f2197a..37edfe10f8c6 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -37,6 +37,7 @@
#include <linux/rhashtable.h>
#include <linux/sched/signal.h>
+#include <trace/events/sock.h>
#include "core.h"
#include "name_table.h"
@@ -2130,6 +2131,8 @@ static void tipc_data_ready(struct sock *sk)
{
struct socket_wq *wq;
+ trace_sk_data_ready(sk);
+
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 69c88cc03887..8ee0c07d00e9 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -43,6 +43,7 @@
#include "bearer.h"
#include <net/sock.h>
#include <linux/module.h>
+#include <trace/events/sock.h>
/* Number of messages to send before rescheduling */
#define MAX_SEND_MSG_COUNT 25
@@ -439,6 +440,8 @@ static void tipc_conn_data_ready(struct sock *sk)
{
struct tipc_conn *con;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
con = sk->sk_user_data;
if (connected(con)) {
@@ -496,6 +499,8 @@ static void tipc_topsrv_listener_data_ready(struct sock *sk)
{
struct tipc_topsrv *srv;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
srv = sk->sk_user_data;
if (srv)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 7c5de4afbe99..782d3701b86f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -44,6 +44,7 @@
#include <net/strparser.h>
#include <net/tls.h>
+#include <trace/events/sock.h>
#include "tls.h"
@@ -2300,6 +2301,8 @@ static void tls_data_ready(struct sock *sk)
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct sk_psock *psock;
+ trace_sk_data_ready(sk);
+
tls_strp_data_ready(&ctx->strp);
psock = sk_psock_get(sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 81ff98298996..347122c3575e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -112,6 +112,7 @@
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
+#include <linux/splice.h>
#include <linux/freezer.h>
#include <linux/file.h>
#include <linux/btf_ids.h>
@@ -807,23 +808,23 @@ static int unix_count_nr_fds(struct sock *sk)
static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
{
struct sock *sk = sock->sk;
+ unsigned char s_state;
struct unix_sock *u;
- int nr_fds;
+ int nr_fds = 0;
if (sk) {
+ s_state = READ_ONCE(sk->sk_state);
u = unix_sk(sk);
- if (sock->type == SOCK_DGRAM) {
- nr_fds = atomic_read(&u->scm_stat.nr_fds);
- goto out_print;
- }
- unix_state_lock(sk);
- if (sk->sk_state != TCP_LISTEN)
+ /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
+ * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
+ * SOCK_DGRAM is ordinary. So, no lock is needed.
+ */
+ if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
nr_fds = atomic_read(&u->scm_stat.nr_fds);
- else
+ else if (s_state == TCP_LISTEN)
nr_fds = unix_count_nr_fds(sk);
- unix_state_unlock(sk);
-out_print:
+
seq_printf(m, "scm_fds: %u\n", nr_fds);
}
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index d593d5b6d4b1..19aea7cba26e 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1861,8 +1861,9 @@ static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg,
written = transport->stream_enqueue(vsk,
msg, len - total_written);
}
+
if (written < 0) {
- err = -ENOMEM;
+ err = written;
goto out_err;
}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index ad64f403536a..28b5a8e8e094 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -42,8 +42,7 @@ struct virtio_vsock {
bool tx_run;
struct work_struct send_pkt_work;
- spinlock_t send_pkt_list_lock;
- struct list_head send_pkt_list;
+ struct sk_buff_head send_pkt_queue;
atomic_t queued_replies;
@@ -101,41 +100,31 @@ virtio_transport_send_pkt_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];
for (;;) {
- struct virtio_vsock_pkt *pkt;
struct scatterlist hdr, buf, *sgs[2];
int ret, in_sg = 0, out_sg = 0;
+ struct sk_buff *skb;
bool reply;
- spin_lock_bh(&vsock->send_pkt_list_lock);
- if (list_empty(&vsock->send_pkt_list)) {
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
+ if (!skb)
break;
- }
-
- pkt = list_first_entry(&vsock->send_pkt_list,
- struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
- virtio_transport_deliver_tap_pkt(pkt);
+ virtio_transport_deliver_tap_pkt(skb);
+ reply = virtio_vsock_skb_reply(skb);
- reply = pkt->reply;
-
- sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
+ sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
sgs[out_sg++] = &hdr;
- if (pkt->buf) {
- sg_init_one(&buf, pkt->buf, pkt->len);
+ if (skb->len > 0) {
+ sg_init_one(&buf, skb->data, skb->len);
sgs[out_sg++] = &buf;
}
- ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
+ ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
/* Usually this means that there is no more space available in
* the vq
*/
if (ret < 0) {
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
break;
}
@@ -164,32 +153,32 @@ out:
}
static int
-virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+virtio_transport_send_pkt(struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr;
struct virtio_vsock *vsock;
- int len = pkt->len;
+ int len = skb->len;
+
+ hdr = virtio_vsock_hdr(skb);
rcu_read_lock();
vsock = rcu_dereference(the_virtio_vsock);
if (!vsock) {
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
len = -ENODEV;
goto out_rcu;
}
- if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
- virtio_transport_free_pkt(pkt);
+ if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
+ kfree_skb(skb);
len = -ENODEV;
goto out_rcu;
}
- if (pkt->reply)
+ if (virtio_vsock_skb_reply(skb))
atomic_inc(&vsock->queued_replies);
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add_tail(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
+ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
out_rcu:
@@ -201,9 +190,7 @@ static int
virtio_transport_cancel_pkt(struct vsock_sock *vsk)
{
struct virtio_vsock *vsock;
- struct virtio_vsock_pkt *pkt, *n;
int cnt = 0, ret;
- LIST_HEAD(freeme);
rcu_read_lock();
vsock = rcu_dereference(the_virtio_vsock);
@@ -212,20 +199,7 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
goto out_rcu;
}
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
- if (pkt->vsk != vsk)
- continue;
- list_move(&pkt->list, &freeme);
- }
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
- list_for_each_entry_safe(pkt, n, &freeme, list) {
- if (pkt->reply)
- cnt++;
- list_del(&pkt->list);
- virtio_transport_free_pkt(pkt);
- }
+ cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
if (cnt) {
struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
@@ -246,38 +220,28 @@ out_rcu:
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{
- int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
- struct virtio_vsock_pkt *pkt;
- struct scatterlist hdr, buf, *sgs[2];
+ int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
+ struct scatterlist pkt, *p;
struct virtqueue *vq;
+ struct sk_buff *skb;
int ret;
vq = vsock->vqs[VSOCK_VQ_RX];
do {
- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
+ skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
+ if (!skb)
break;
- pkt->buf = kmalloc(buf_len, GFP_KERNEL);
- if (!pkt->buf) {
- virtio_transport_free_pkt(pkt);
+ memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM);
+ sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len);
+ p = &pkt;
+ ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL);
+ if (ret < 0) {
+ kfree_skb(skb);
break;
}
- pkt->buf_len = buf_len;
- pkt->len = buf_len;
-
- sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
- sgs[0] = &hdr;
-
- sg_init_one(&buf, pkt->buf, buf_len);
- sgs[1] = &buf;
- ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
- if (ret) {
- virtio_transport_free_pkt(pkt);
- break;
- }
vsock->rx_buf_nr++;
} while (vq->num_free);
if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
@@ -299,12 +263,12 @@ static void virtio_transport_tx_work(struct work_struct *work)
goto out;
do {
- struct virtio_vsock_pkt *pkt;
+ struct sk_buff *skb;
unsigned int len;
virtqueue_disable_cb(vq);
- while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
- virtio_transport_free_pkt(pkt);
+ while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
+ consume_skb(skb);
added = true;
}
} while (!virtqueue_enable_cb(vq));
@@ -529,7 +493,7 @@ static void virtio_transport_rx_work(struct work_struct *work)
do {
virtqueue_disable_cb(vq);
for (;;) {
- struct virtio_vsock_pkt *pkt;
+ struct sk_buff *skb;
unsigned int len;
if (!virtio_transport_more_replies(vsock)) {
@@ -540,23 +504,22 @@ static void virtio_transport_rx_work(struct work_struct *work)
goto out;
}
- pkt = virtqueue_get_buf(vq, &len);
- if (!pkt) {
+ skb = virtqueue_get_buf(vq, &len);
+ if (!skb)
break;
- }
vsock->rx_buf_nr--;
/* Drop short/long packets */
- if (unlikely(len < sizeof(pkt->hdr) ||
- len > sizeof(pkt->hdr) + pkt->len)) {
- virtio_transport_free_pkt(pkt);
+ if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
+ len > virtio_vsock_skb_len(skb))) {
+ kfree_skb(skb);
continue;
}
- pkt->len = len - sizeof(pkt->hdr);
- virtio_transport_deliver_tap_pkt(pkt);
- virtio_transport_recv_pkt(&virtio_transport, pkt);
+ virtio_vsock_skb_rx_put(skb);
+ virtio_transport_deliver_tap_pkt(skb);
+ virtio_transport_recv_pkt(&virtio_transport, skb);
}
} while (!virtqueue_enable_cb(vq));
@@ -610,7 +573,7 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
{
struct virtio_device *vdev = vsock->vdev;
- struct virtio_vsock_pkt *pkt;
+ struct sk_buff *skb;
/* Reset all connected sockets when the VQs disappear */
vsock_for_each_connected_socket(&virtio_transport.transport,
@@ -637,23 +600,16 @@ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
virtio_reset_device(vdev);
mutex_lock(&vsock->rx_lock);
- while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
- virtio_transport_free_pkt(pkt);
+ while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
+ kfree_skb(skb);
mutex_unlock(&vsock->rx_lock);
mutex_lock(&vsock->tx_lock);
- while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
- virtio_transport_free_pkt(pkt);
+ while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
+ kfree_skb(skb);
mutex_unlock(&vsock->tx_lock);
- spin_lock_bh(&vsock->send_pkt_list_lock);
- while (!list_empty(&vsock->send_pkt_list)) {
- pkt = list_first_entry(&vsock->send_pkt_list,
- struct virtio_vsock_pkt, list);
- list_del(&pkt->list);
- virtio_transport_free_pkt(pkt);
- }
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
/* Delete virtqueues and flush outstanding callbacks if any */
vdev->config->del_vqs(vdev);
@@ -690,8 +646,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
mutex_init(&vsock->tx_lock);
mutex_init(&vsock->rx_lock);
mutex_init(&vsock->event_lock);
- spin_lock_init(&vsock->send_pkt_list_lock);
- INIT_LIST_HEAD(&vsock->send_pkt_list);
+ skb_queue_head_init(&vsock->send_pkt_queue);
INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
INIT_WORK(&vsock->event_work, virtio_transport_event_work);
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index a9980e9b9304..a1581c77cf84 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -37,53 +37,56 @@ virtio_transport_get_ops(struct vsock_sock *vsk)
return container_of(t, struct virtio_transport, transport);
}
-static struct virtio_vsock_pkt *
-virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
+/* Returns a new packet on success, otherwise returns NULL.
+ *
+ * If NULL is returned, errp is set to a negative errno.
+ */
+static struct sk_buff *
+virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
size_t len,
u32 src_cid,
u32 src_port,
u32 dst_cid,
u32 dst_port)
{
- struct virtio_vsock_pkt *pkt;
+ const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len;
+ struct virtio_vsock_hdr *hdr;
+ struct sk_buff *skb;
+ void *payload;
int err;
- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
+ skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
+ if (!skb)
return NULL;
- pkt->hdr.type = cpu_to_le16(info->type);
- pkt->hdr.op = cpu_to_le16(info->op);
- pkt->hdr.src_cid = cpu_to_le64(src_cid);
- pkt->hdr.dst_cid = cpu_to_le64(dst_cid);
- pkt->hdr.src_port = cpu_to_le32(src_port);
- pkt->hdr.dst_port = cpu_to_le32(dst_port);
- pkt->hdr.flags = cpu_to_le32(info->flags);
- pkt->len = len;
- pkt->hdr.len = cpu_to_le32(len);
- pkt->reply = info->reply;
- pkt->vsk = info->vsk;
+ hdr = virtio_vsock_hdr(skb);
+ hdr->type = cpu_to_le16(info->type);
+ hdr->op = cpu_to_le16(info->op);
+ hdr->src_cid = cpu_to_le64(src_cid);
+ hdr->dst_cid = cpu_to_le64(dst_cid);
+ hdr->src_port = cpu_to_le32(src_port);
+ hdr->dst_port = cpu_to_le32(dst_port);
+ hdr->flags = cpu_to_le32(info->flags);
+ hdr->len = cpu_to_le32(len);
if (info->msg && len > 0) {
- pkt->buf = kmalloc(len, GFP_KERNEL);
- if (!pkt->buf)
- goto out_pkt;
-
- pkt->buf_len = len;
-
- err = memcpy_from_msg(pkt->buf, info->msg, len);
+ payload = skb_put(skb, len);
+ err = memcpy_from_msg(payload, info->msg, len);
if (err)
goto out;
if (msg_data_left(info->msg) == 0 &&
info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
- pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
if (info->msg->msg_flags & MSG_EOR)
- pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
}
}
+ if (info->reply)
+ virtio_vsock_skb_set_reply(skb);
+
trace_virtio_transport_alloc_pkt(src_cid, src_port,
dst_cid, dst_port,
len,
@@ -91,19 +94,18 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
info->op,
info->flags);
- return pkt;
+ return skb;
out:
- kfree(pkt->buf);
-out_pkt:
- kfree(pkt);
+ kfree_skb(skb);
return NULL;
}
/* Packet capture */
static struct sk_buff *virtio_transport_build_skb(void *opaque)
{
- struct virtio_vsock_pkt *pkt = opaque;
+ struct virtio_vsock_hdr *pkt_hdr;
+ struct sk_buff *pkt = opaque;
struct af_vsockmon_hdr *hdr;
struct sk_buff *skb;
size_t payload_len;
@@ -113,10 +115,11 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
* the payload length from the header and the buffer pointer taking
* care of the offset in the original packet.
*/
- payload_len = le32_to_cpu(pkt->hdr.len);
- payload_buf = pkt->buf + pkt->off;
+ pkt_hdr = virtio_vsock_hdr(pkt);
+ payload_len = pkt->len;
+ payload_buf = pkt->data;
- skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
GFP_ATOMIC);
if (!skb)
return NULL;
@@ -124,16 +127,16 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
hdr = skb_put(skb, sizeof(*hdr));
/* pkt->hdr is little-endian so no need to byteswap here */
- hdr->src_cid = pkt->hdr.src_cid;
- hdr->src_port = pkt->hdr.src_port;
- hdr->dst_cid = pkt->hdr.dst_cid;
- hdr->dst_port = pkt->hdr.dst_port;
+ hdr->src_cid = pkt_hdr->src_cid;
+ hdr->src_port = pkt_hdr->src_port;
+ hdr->dst_cid = pkt_hdr->dst_cid;
+ hdr->dst_port = pkt_hdr->dst_port;
hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
- hdr->len = cpu_to_le16(sizeof(pkt->hdr));
+ hdr->len = cpu_to_le16(sizeof(*pkt_hdr));
memset(hdr->reserved, 0, sizeof(hdr->reserved));
- switch (le16_to_cpu(pkt->hdr.op)) {
+ switch (le16_to_cpu(pkt_hdr->op)) {
case VIRTIO_VSOCK_OP_REQUEST:
case VIRTIO_VSOCK_OP_RESPONSE:
hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
@@ -154,7 +157,7 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
break;
}
- skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
+ skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
if (payload_len) {
skb_put_data(skb, payload_buf, payload_len);
@@ -163,13 +166,13 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
return skb;
}
-void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
+void virtio_transport_deliver_tap_pkt(struct sk_buff *skb)
{
- if (pkt->tap_delivered)
+ if (virtio_vsock_skb_tap_delivered(skb))
return;
- vsock_deliver_tap(virtio_transport_build_skb, pkt);
- pkt->tap_delivered = true;
+ vsock_deliver_tap(virtio_transport_build_skb, skb);
+ virtio_vsock_skb_set_tap_delivered(skb);
}
EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
@@ -192,8 +195,8 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
u32 src_cid, src_port, dst_cid, dst_port;
const struct virtio_transport *t_ops;
struct virtio_vsock_sock *vvs;
- struct virtio_vsock_pkt *pkt;
u32 pkt_len = info->pkt_len;
+ struct sk_buff *skb;
info->type = virtio_transport_get_type(sk_vsock(vsk));
@@ -224,42 +227,47 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
return pkt_len;
- pkt = virtio_transport_alloc_pkt(info, pkt_len,
+ skb = virtio_transport_alloc_skb(info, pkt_len,
src_cid, src_port,
dst_cid, dst_port);
- if (!pkt) {
+ if (!skb) {
virtio_transport_put_credit(vvs, pkt_len);
return -ENOMEM;
}
- virtio_transport_inc_tx_pkt(vvs, pkt);
+ virtio_transport_inc_tx_pkt(vvs, skb);
- return t_ops->send_pkt(pkt);
+ return t_ops->send_pkt(skb);
}
static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
- if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
+ if (vvs->rx_bytes + skb->len > vvs->buf_alloc)
return false;
- vvs->rx_bytes += pkt->len;
+ vvs->rx_bytes += skb->len;
return true;
}
static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
- vvs->rx_bytes -= pkt->len;
- vvs->fwd_cnt += pkt->len;
+ int len;
+
+ len = skb_headroom(skb) - sizeof(struct virtio_vsock_hdr) - skb->len;
+ vvs->rx_bytes -= len;
+ vvs->fwd_cnt += len;
}
-void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
+void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+
spin_lock_bh(&vvs->rx_lock);
vvs->last_fwd_cnt = vvs->fwd_cnt;
- pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
- pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
+ hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
+ hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc);
spin_unlock_bh(&vvs->rx_lock);
}
EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
@@ -303,29 +311,29 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
size_t len)
{
struct virtio_vsock_sock *vvs = vsk->trans;
- struct virtio_vsock_pkt *pkt;
size_t bytes, total = 0, off;
+ struct sk_buff *skb, *tmp;
int err = -EFAULT;
spin_lock_bh(&vvs->rx_lock);
- list_for_each_entry(pkt, &vvs->rx_queue, list) {
- off = pkt->off;
+ skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) {
+ off = 0;
if (total == len)
break;
- while (total < len && off < pkt->len) {
+ while (total < len && off < skb->len) {
bytes = len - total;
- if (bytes > pkt->len - off)
- bytes = pkt->len - off;
+ if (bytes > skb->len - off)
+ bytes = skb->len - off;
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, pkt->buf + off, bytes);
+ err = memcpy_to_msg(msg, skb->data + off, bytes);
if (err)
goto out;
@@ -352,37 +360,38 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
size_t len)
{
struct virtio_vsock_sock *vvs = vsk->trans;
- struct virtio_vsock_pkt *pkt;
size_t bytes, total = 0;
- u32 free_space;
+ struct sk_buff *skb;
int err = -EFAULT;
+ u32 free_space;
spin_lock_bh(&vvs->rx_lock);
- while (total < len && !list_empty(&vvs->rx_queue)) {
- pkt = list_first_entry(&vvs->rx_queue,
- struct virtio_vsock_pkt, list);
+ while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
+ skb = __skb_dequeue(&vvs->rx_queue);
bytes = len - total;
- if (bytes > pkt->len - pkt->off)
- bytes = pkt->len - pkt->off;
+ if (bytes > skb->len)
+ bytes = skb->len;
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
+ err = memcpy_to_msg(msg, skb->data, bytes);
if (err)
goto out;
spin_lock_bh(&vvs->rx_lock);
total += bytes;
- pkt->off += bytes;
- if (pkt->off == pkt->len) {
- virtio_transport_dec_rx_pkt(vvs, pkt);
- list_del(&pkt->list);
- virtio_transport_free_pkt(pkt);
+ skb_pull(skb, bytes);
+
+ if (skb->len == 0) {
+ virtio_transport_dec_rx_pkt(vvs, skb);
+ consume_skb(skb);
+ } else {
+ __skb_queue_head(&vvs->rx_queue, skb);
}
}
@@ -414,10 +423,10 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
int flags)
{
struct virtio_vsock_sock *vvs = vsk->trans;
- struct virtio_vsock_pkt *pkt;
int dequeued_len = 0;
size_t user_buf_len = msg_data_left(msg);
bool msg_ready = false;
+ struct sk_buff *skb;
spin_lock_bh(&vvs->rx_lock);
@@ -427,13 +436,18 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
}
while (!msg_ready) {
- pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);
+ struct virtio_vsock_hdr *hdr;
+
+ skb = __skb_dequeue(&vvs->rx_queue);
+ if (!skb)
+ break;
+ hdr = virtio_vsock_hdr(skb);
if (dequeued_len >= 0) {
size_t pkt_len;
size_t bytes_to_copy;
- pkt_len = (size_t)le32_to_cpu(pkt->hdr.len);
+ pkt_len = (size_t)le32_to_cpu(hdr->len);
bytes_to_copy = min(user_buf_len, pkt_len);
if (bytes_to_copy) {
@@ -444,7 +458,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, pkt->buf, bytes_to_copy);
+ err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
if (err) {
/* Copy of message failed. Rest of
* fragments will be freed without copy.
@@ -452,6 +466,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
dequeued_len = err;
} else {
user_buf_len -= bytes_to_copy;
+ skb_pull(skb, bytes_to_copy);
}
spin_lock_bh(&vvs->rx_lock);
@@ -461,17 +476,16 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
dequeued_len += pkt_len;
}
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
msg_ready = true;
vvs->msg_count--;
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
msg->msg_flags |= MSG_EOR;
}
- virtio_transport_dec_rx_pkt(vvs, pkt);
- list_del(&pkt->list);
- virtio_transport_free_pkt(pkt);
+ virtio_transport_dec_rx_pkt(vvs, skb);
+ kfree_skb(skb);
}
spin_unlock_bh(&vvs->rx_lock);
@@ -609,7 +623,7 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk,
spin_lock_init(&vvs->rx_lock);
spin_lock_init(&vvs->tx_lock);
- INIT_LIST_HEAD(&vvs->rx_queue);
+ skb_queue_head_init(&vvs->rx_queue);
return 0;
}
@@ -806,16 +820,16 @@ void virtio_transport_destruct(struct vsock_sock *vsk)
EXPORT_SYMBOL_GPL(virtio_transport_destruct);
static int virtio_transport_reset(struct vsock_sock *vsk,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
- .reply = !!pkt,
+ .reply = !!skb,
.vsk = vsk,
};
/* Send RST only if the original pkt is not a RST pkt */
- if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST)
return 0;
return virtio_transport_send_pkt_info(vsk, &info);
@@ -825,29 +839,30 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
* attempt was made to connect to a socket that does not exist.
*/
static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
- struct virtio_vsock_pkt *reply;
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
- .type = le16_to_cpu(pkt->hdr.type),
+ .type = le16_to_cpu(hdr->type),
.reply = true,
};
+ struct sk_buff *reply;
/* Send RST only if the original pkt is not a RST pkt */
- if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
return 0;
- reply = virtio_transport_alloc_pkt(&info, 0,
- le64_to_cpu(pkt->hdr.dst_cid),
- le32_to_cpu(pkt->hdr.dst_port),
- le64_to_cpu(pkt->hdr.src_cid),
- le32_to_cpu(pkt->hdr.src_port));
+ reply = virtio_transport_alloc_skb(&info, 0,
+ le64_to_cpu(hdr->dst_cid),
+ le32_to_cpu(hdr->dst_port),
+ le64_to_cpu(hdr->src_cid),
+ le32_to_cpu(hdr->src_port));
if (!reply)
return -ENOMEM;
if (!t) {
- virtio_transport_free_pkt(reply);
+ kfree_skb(reply);
return -ENOTCONN;
}
@@ -858,16 +873,11 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
static void virtio_transport_remove_sock(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
- struct virtio_vsock_pkt *pkt, *tmp;
/* We don't need to take rx_lock, as the socket is closing and we are
* removing it.
*/
- list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
- list_del(&pkt->list);
- virtio_transport_free_pkt(pkt);
- }
-
+ __skb_queue_purge(&vvs->rx_queue);
vsock_remove_sock(vsk);
}
@@ -981,13 +991,14 @@ EXPORT_SYMBOL_GPL(virtio_transport_release);
static int
virtio_transport_recv_connecting(struct sock *sk,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
- int err;
int skerr;
+ int err;
- switch (le16_to_cpu(pkt->hdr.op)) {
+ switch (le16_to_cpu(hdr->op)) {
case VIRTIO_VSOCK_OP_RESPONSE:
sk->sk_state = TCP_ESTABLISHED;
sk->sk_socket->state = SS_CONNECTED;
@@ -1008,7 +1019,7 @@ virtio_transport_recv_connecting(struct sock *sk,
return 0;
destroy:
- virtio_transport_reset(vsk, pkt);
+ virtio_transport_reset(vsk, skb);
sk->sk_state = TCP_CLOSE;
sk->sk_err = skerr;
sk_error_report(sk);
@@ -1017,34 +1028,37 @@ destroy:
static void
virtio_transport_recv_enqueue(struct vsock_sock *vsk,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
struct virtio_vsock_sock *vvs = vsk->trans;
bool can_enqueue, free_pkt = false;
+ struct virtio_vsock_hdr *hdr;
+ u32 len;
- pkt->len = le32_to_cpu(pkt->hdr.len);
- pkt->off = 0;
+ hdr = virtio_vsock_hdr(skb);
+ len = le32_to_cpu(hdr->len);
spin_lock_bh(&vvs->rx_lock);
- can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
+ can_enqueue = virtio_transport_inc_rx_pkt(vvs, skb);
if (!can_enqueue) {
free_pkt = true;
goto out;
}
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
vvs->msg_count++;
/* Try to copy small packets into the buffer of last packet queued,
* to avoid wasting memory queueing the entire buffer with a small
* payload.
*/
- if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) {
- struct virtio_vsock_pkt *last_pkt;
+ if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) {
+ struct virtio_vsock_hdr *last_hdr;
+ struct sk_buff *last_skb;
- last_pkt = list_last_entry(&vvs->rx_queue,
- struct virtio_vsock_pkt, list);
+ last_skb = skb_peek_tail(&vvs->rx_queue);
+ last_hdr = virtio_vsock_hdr(last_skb);
/* If there is space in the last packet queued, we copy the
* new packet in its buffer. We avoid this if the last packet
@@ -1052,35 +1066,35 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
* delimiter of SEQPACKET message, so 'pkt' is the first packet
* of a new message.
*/
- if ((pkt->len <= last_pkt->buf_len - last_pkt->len) &&
- !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)) {
- memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
- pkt->len);
- last_pkt->len += pkt->len;
+ if (skb->len < skb_tailroom(last_skb) &&
+ !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) {
+ memcpy(skb_put(last_skb, skb->len), skb->data, skb->len);
free_pkt = true;
- last_pkt->hdr.flags |= pkt->hdr.flags;
+ last_hdr->flags |= hdr->flags;
+ last_hdr->len = cpu_to_le32(last_skb->len);
goto out;
}
}
- list_add_tail(&pkt->list, &vvs->rx_queue);
+ __skb_queue_tail(&vvs->rx_queue, skb);
out:
spin_unlock_bh(&vvs->rx_lock);
if (free_pkt)
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
}
static int
virtio_transport_recv_connected(struct sock *sk,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
int err = 0;
- switch (le16_to_cpu(pkt->hdr.op)) {
+ switch (le16_to_cpu(hdr->op)) {
case VIRTIO_VSOCK_OP_RW:
- virtio_transport_recv_enqueue(vsk, pkt);
+ virtio_transport_recv_enqueue(vsk, skb);
vsock_data_ready(sk);
return err;
case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
@@ -1090,18 +1104,17 @@ virtio_transport_recv_connected(struct sock *sk,
sk->sk_write_space(sk);
break;
case VIRTIO_VSOCK_OP_SHUTDOWN:
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
vsk->peer_shutdown |= RCV_SHUTDOWN;
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
vsk->peer_shutdown |= SEND_SHUTDOWN;
if (vsk->peer_shutdown == SHUTDOWN_MASK &&
vsock_stream_has_data(vsk) <= 0 &&
!sock_flag(sk, SOCK_DONE)) {
(void)virtio_transport_reset(vsk, NULL);
-
virtio_transport_do_close(vsk, true);
}
- if (le32_to_cpu(pkt->hdr.flags))
+ if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
sk->sk_state_change(sk);
break;
case VIRTIO_VSOCK_OP_RST:
@@ -1112,28 +1125,30 @@ virtio_transport_recv_connected(struct sock *sk,
break;
}
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
return err;
}
static void
virtio_transport_recv_disconnecting(struct sock *sk,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
- if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
virtio_transport_do_close(vsk, true);
}
static int
virtio_transport_send_response(struct vsock_sock *vsk,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RESPONSE,
- .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
- .remote_port = le32_to_cpu(pkt->hdr.src_port),
+ .remote_cid = le64_to_cpu(hdr->src_cid),
+ .remote_port = le32_to_cpu(hdr->src_port),
.reply = true,
.vsk = vsk,
};
@@ -1142,8 +1157,9 @@ virtio_transport_send_response(struct vsock_sock *vsk,
}
static bool virtio_transport_space_update(struct sock *sk,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
struct virtio_vsock_sock *vvs = vsk->trans;
bool space_available;
@@ -1158,8 +1174,8 @@ static bool virtio_transport_space_update(struct sock *sk,
/* buf_alloc and fwd_cnt is always included in the hdr */
spin_lock_bh(&vvs->tx_lock);
- vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
- vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
+ vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc);
+ vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt);
space_available = virtio_transport_has_space(vsk);
spin_unlock_bh(&vvs->tx_lock);
return space_available;
@@ -1167,27 +1183,28 @@ static bool virtio_transport_space_update(struct sock *sk,
/* Handle server socket */
static int
-virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
+virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb,
struct virtio_transport *t)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
struct vsock_sock *vchild;
struct sock *child;
int ret;
- if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
- virtio_transport_reset_no_sock(t, pkt);
+ if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) {
+ virtio_transport_reset_no_sock(t, skb);
return -EINVAL;
}
if (sk_acceptq_is_full(sk)) {
- virtio_transport_reset_no_sock(t, pkt);
+ virtio_transport_reset_no_sock(t, skb);
return -ENOMEM;
}
child = vsock_create_connected(sk);
if (!child) {
- virtio_transport_reset_no_sock(t, pkt);
+ virtio_transport_reset_no_sock(t, skb);
return -ENOMEM;
}
@@ -1198,10 +1215,10 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
child->sk_state = TCP_ESTABLISHED;
vchild = vsock_sk(child);
- vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
- le32_to_cpu(pkt->hdr.dst_port));
- vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
- le32_to_cpu(pkt->hdr.src_port));
+ vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid),
+ le32_to_cpu(hdr->dst_port));
+ vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid),
+ le32_to_cpu(hdr->src_port));
ret = vsock_assign_transport(vchild, vsk);
/* Transport assigned (looking at remote_addr) must be the same
@@ -1209,17 +1226,17 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
*/
if (ret || vchild->transport != &t->transport) {
release_sock(child);
- virtio_transport_reset_no_sock(t, pkt);
+ virtio_transport_reset_no_sock(t, skb);
sock_put(child);
return ret;
}
- if (virtio_transport_space_update(child, pkt))
+ if (virtio_transport_space_update(child, skb))
child->sk_write_space(child);
vsock_insert_connected(vchild);
vsock_enqueue_accept(sk, child);
- virtio_transport_send_response(vchild, pkt);
+ virtio_transport_send_response(vchild, skb);
release_sock(child);
@@ -1237,29 +1254,30 @@ static bool virtio_transport_valid_type(u16 type)
* lock.
*/
void virtio_transport_recv_pkt(struct virtio_transport *t,
- struct virtio_vsock_pkt *pkt)
+ struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct sockaddr_vm src, dst;
struct vsock_sock *vsk;
struct sock *sk;
bool space_available;
- vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
- le32_to_cpu(pkt->hdr.src_port));
- vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
- le32_to_cpu(pkt->hdr.dst_port));
+ vsock_addr_init(&src, le64_to_cpu(hdr->src_cid),
+ le32_to_cpu(hdr->src_port));
+ vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid),
+ le32_to_cpu(hdr->dst_port));
trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
dst.svm_cid, dst.svm_port,
- le32_to_cpu(pkt->hdr.len),
- le16_to_cpu(pkt->hdr.type),
- le16_to_cpu(pkt->hdr.op),
- le32_to_cpu(pkt->hdr.flags),
- le32_to_cpu(pkt->hdr.buf_alloc),
- le32_to_cpu(pkt->hdr.fwd_cnt));
-
- if (!virtio_transport_valid_type(le16_to_cpu(pkt->hdr.type))) {
- (void)virtio_transport_reset_no_sock(t, pkt);
+ le32_to_cpu(hdr->len),
+ le16_to_cpu(hdr->type),
+ le16_to_cpu(hdr->op),
+ le32_to_cpu(hdr->flags),
+ le32_to_cpu(hdr->buf_alloc),
+ le32_to_cpu(hdr->fwd_cnt));
+
+ if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) {
+ (void)virtio_transport_reset_no_sock(t, skb);
goto free_pkt;
}
@@ -1270,13 +1288,13 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
if (!sk) {
sk = vsock_find_bound_socket(&dst);
if (!sk) {
- (void)virtio_transport_reset_no_sock(t, pkt);
+ (void)virtio_transport_reset_no_sock(t, skb);
goto free_pkt;
}
}
- if (virtio_transport_get_type(sk) != le16_to_cpu(pkt->hdr.type)) {
- (void)virtio_transport_reset_no_sock(t, pkt);
+ if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) {
+ (void)virtio_transport_reset_no_sock(t, skb);
sock_put(sk);
goto free_pkt;
}
@@ -1287,13 +1305,13 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
/* Check if sk has been closed before lock_sock */
if (sock_flag(sk, SOCK_DONE)) {
- (void)virtio_transport_reset_no_sock(t, pkt);
+ (void)virtio_transport_reset_no_sock(t, skb);
release_sock(sk);
sock_put(sk);
goto free_pkt;
}
- space_available = virtio_transport_space_update(sk, pkt);
+ space_available = virtio_transport_space_update(sk, skb);
/* Update CID in case it has changed after a transport reset event */
if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
@@ -1304,23 +1322,23 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
switch (sk->sk_state) {
case TCP_LISTEN:
- virtio_transport_recv_listen(sk, pkt, t);
- virtio_transport_free_pkt(pkt);
+ virtio_transport_recv_listen(sk, skb, t);
+ kfree_skb(skb);
break;
case TCP_SYN_SENT:
- virtio_transport_recv_connecting(sk, pkt);
- virtio_transport_free_pkt(pkt);
+ virtio_transport_recv_connecting(sk, skb);
+ kfree_skb(skb);
break;
case TCP_ESTABLISHED:
- virtio_transport_recv_connected(sk, pkt);
+ virtio_transport_recv_connected(sk, skb);
break;
case TCP_CLOSING:
- virtio_transport_recv_disconnecting(sk, pkt);
- virtio_transport_free_pkt(pkt);
+ virtio_transport_recv_disconnecting(sk, skb);
+ kfree_skb(skb);
break;
default:
- (void)virtio_transport_reset_no_sock(t, pkt);
- virtio_transport_free_pkt(pkt);
+ (void)virtio_transport_reset_no_sock(t, skb);
+ kfree_skb(skb);
break;
}
@@ -1333,16 +1351,42 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
return;
free_pkt:
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
-void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
+/* Remove skbs found in a queue that have a vsk that matches.
+ *
+ * Each skb is freed.
+ *
+ * Returns the count of skbs that were reply packets.
+ */
+int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue)
{
- kvfree(pkt->buf);
- kfree(pkt);
+ struct sk_buff_head freeme;
+ struct sk_buff *skb, *tmp;
+ int cnt = 0;
+
+ skb_queue_head_init(&freeme);
+
+ spin_lock_bh(&queue->lock);
+ skb_queue_walk_safe(queue, skb, tmp) {
+ if (vsock_sk(skb->sk) != vsk)
+ continue;
+
+ __skb_unlink(skb, queue);
+ __skb_queue_tail(&freeme, skb);
+
+ if (virtio_vsock_skb_reply(skb))
+ cnt++;
+ }
+ spin_unlock_bh(&queue->lock);
+
+ __skb_queue_purge(&freeme);
+
+ return cnt;
}
-EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
+EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Asias He");
diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c
index 169a8cf65b39..671e03240fc5 100644
--- a/net/vmw_vsock/vsock_loopback.c
+++ b/net/vmw_vsock/vsock_loopback.c
@@ -16,7 +16,7 @@ struct vsock_loopback {
struct workqueue_struct *workqueue;
spinlock_t pkt_list_lock; /* protects pkt_list */
- struct list_head pkt_list;
+ struct sk_buff_head pkt_queue;
struct work_struct pkt_work;
};
@@ -27,13 +27,13 @@ static u32 vsock_loopback_get_local_cid(void)
return VMADDR_CID_LOCAL;
}
-static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt)
+static int vsock_loopback_send_pkt(struct sk_buff *skb)
{
struct vsock_loopback *vsock = &the_vsock_loopback;
- int len = pkt->len;
+ int len = skb->len;
spin_lock_bh(&vsock->pkt_list_lock);
- list_add_tail(&pkt->list, &vsock->pkt_list);
+ skb_queue_tail(&vsock->pkt_queue, skb);
spin_unlock_bh(&vsock->pkt_list_lock);
queue_work(vsock->workqueue, &vsock->pkt_work);
@@ -44,21 +44,8 @@ static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt)
static int vsock_loopback_cancel_pkt(struct vsock_sock *vsk)
{
struct vsock_loopback *vsock = &the_vsock_loopback;
- struct virtio_vsock_pkt *pkt, *n;
- LIST_HEAD(freeme);
- spin_lock_bh(&vsock->pkt_list_lock);
- list_for_each_entry_safe(pkt, n, &vsock->pkt_list, list) {
- if (pkt->vsk != vsk)
- continue;
- list_move(&pkt->list, &freeme);
- }
- spin_unlock_bh(&vsock->pkt_list_lock);
-
- list_for_each_entry_safe(pkt, n, &freeme, list) {
- list_del(&pkt->list);
- virtio_transport_free_pkt(pkt);
- }
+ virtio_transport_purge_skbs(vsk, &vsock->pkt_queue);
return 0;
}
@@ -121,20 +108,18 @@ static void vsock_loopback_work(struct work_struct *work)
{
struct vsock_loopback *vsock =
container_of(work, struct vsock_loopback, pkt_work);
- LIST_HEAD(pkts);
+ struct sk_buff_head pkts;
+ struct sk_buff *skb;
+
+ skb_queue_head_init(&pkts);
spin_lock_bh(&vsock->pkt_list_lock);
- list_splice_init(&vsock->pkt_list, &pkts);
+ skb_queue_splice_init(&vsock->pkt_queue, &pkts);
spin_unlock_bh(&vsock->pkt_list_lock);
- while (!list_empty(&pkts)) {
- struct virtio_vsock_pkt *pkt;
-
- pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
-
- virtio_transport_deliver_tap_pkt(pkt);
- virtio_transport_recv_pkt(&loopback_transport, pkt);
+ while ((skb = __skb_dequeue(&pkts))) {
+ virtio_transport_deliver_tap_pkt(skb);
+ virtio_transport_recv_pkt(&loopback_transport, skb);
}
}
@@ -148,7 +133,7 @@ static int __init vsock_loopback_init(void)
return -ENOMEM;
spin_lock_init(&vsock->pkt_list_lock);
- INIT_LIST_HEAD(&vsock->pkt_list);
+ skb_queue_head_init(&vsock->pkt_queue);
INIT_WORK(&vsock->pkt_work, vsock_loopback_work);
ret = vsock_core_register(&loopback_transport.transport,
@@ -166,19 +151,13 @@ out_wq:
static void __exit vsock_loopback_exit(void)
{
struct vsock_loopback *vsock = &the_vsock_loopback;
- struct virtio_vsock_pkt *pkt;
vsock_core_unregister(&loopback_transport.transport);
flush_work(&vsock->pkt_work);
spin_lock_bh(&vsock->pkt_list_lock);
- while (!list_empty(&vsock->pkt_list)) {
- pkt = list_first_entry(&vsock->pkt_list,
- struct virtio_vsock_pkt, list);
- list_del(&pkt->list);
- virtio_transport_free_pkt(pkt);
- }
+ virtio_vsock_skb_queue_purge(&vsock->pkt_queue);
spin_unlock_bh(&vsock->pkt_list_lock);
destroy_workqueue(vsock->workqueue);
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index e68923200018..0962770303b2 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -39,7 +39,7 @@ static int ___cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
wdev->u.ap.ssid_len = 0;
rdev_set_qos_map(rdev, dev, NULL);
if (notify)
- nl80211_send_ap_stopped(wdev);
+ nl80211_send_ap_stopped(wdev, link_id);
/* Should we apply the grace period during beaconing interface
* shutdown also?
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 0e5835cd8c61..0b7e81db383d 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -1460,3 +1460,72 @@ struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
}
}
EXPORT_SYMBOL(wdev_chandef);
+
+struct cfg80211_per_bw_puncturing_values {
+ u8 len;
+ const u16 *valid_values;
+};
+
+static const u16 puncturing_values_80mhz[] = {
+ 0x8, 0x4, 0x2, 0x1
+};
+
+static const u16 puncturing_values_160mhz[] = {
+ 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
+};
+
+static const u16 puncturing_values_320mhz[] = {
+ 0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
+ 0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
+ 0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
+};
+
+#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
+ { \
+ .len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
+ .valid_values = puncturing_values_ ## _bw ## mhz \
+ }
+
+static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
+};
+
+bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
+ const struct cfg80211_chan_def *chandef)
+{
+ u32 idx, i, start_freq;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_80:
+ idx = 0;
+ start_freq = chandef->center_freq1 - 40;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ idx = 1;
+ start_freq = chandef->center_freq1 - 80;
+ break;
+ case NL80211_CHAN_WIDTH_320:
+ idx = 2;
+ start_freq = chandef->center_freq1 - 160;
+ break;
+ default:
+ *bitmap = 0;
+ break;
+ }
+
+ if (!*bitmap)
+ return true;
+
+ /* check if primary channel is punctured */
+ if (*bitmap & (u16)BIT((chandef->chan->center_freq - start_freq) / 20))
+ return false;
+
+ for (i = 0; i < per_bw_puncturing[idx].len; i++)
+ if (per_bw_puncturing[idx].valid_values[i] == *bitmap)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(cfg80211_valid_disable_subchannel_bitmap);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index af85d8909935..7c61752f6d83 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -278,8 +278,8 @@ struct cfg80211_event {
};
struct cfg80211_cached_keys {
- struct key_params params[CFG80211_MAX_WEP_KEYS];
- u8 data[CFG80211_MAX_WEP_KEYS][WLAN_KEY_LEN_WEP104];
+ struct key_params params[4];
+ u8 data[4][WLAN_KEY_LEN_WEP104];
int def;
};
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index edd062f104f4..e6fdb0b8187d 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -45,8 +45,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
cfg80211_hold_bss(bss_from_pub(bss));
wdev->u.ibss.current_bss = bss_from_pub(bss);
- if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
- cfg80211_upload_connect_keys(wdev);
+ cfg80211_upload_connect_keys(wdev);
nl80211_send_ibss_bssid(wiphy_to_rdev(wdev->wiphy), dev, bssid,
GFP_KERNEL);
@@ -294,7 +293,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
if (!ck)
return -ENOMEM;
- for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++)
+ for (i = 0; i < 4; i++)
ck->params[i].key = ck->data[i];
}
err = __cfg80211_join_ibss(rdev, wdev->netdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 58e1fb18f85a..81d3f40d6235 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -742,7 +742,10 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP_VLAN:
- if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)))
+ if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)) &&
+ (params->link_id < 0 ||
+ !ether_addr_equal(mgmt->bssid,
+ wdev->links[params->link_id].addr)))
err = -EINVAL;
break;
case NL80211_IFTYPE_MESH_POINT:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 33a82ecab9d5..112b4bb009c8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -805,6 +805,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN),
[NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT },
+ [NL80211_ATTR_PUNCT_BITMAP] = NLA_POLICY_RANGE(NLA_U8, 0, 0xffff),
};
/* policy for the key attributes */
@@ -883,7 +884,7 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
},
[NL80211_REKEY_DATA_KCK] = {
.type = NLA_BINARY,
- .len = NL80211_KCK_EXT_LEN
+ .len = NL80211_KCK_EXT_LEN_32
},
[NL80211_REKEY_DATA_REPLAY_CTR] = NLA_POLICY_EXACT_LEN(NL80211_REPLAY_CTR_LEN),
[NL80211_REKEY_DATA_AKM] = { .type = NLA_U32 },
@@ -1548,10 +1549,14 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
if (wdev->connected)
return 0;
return -ENOLINK;
+ case NL80211_IFTYPE_NAN:
+ if (wiphy_ext_feature_isset(wdev->wiphy,
+ NL80211_EXT_FEATURE_SECURE_NAN))
+ return 0;
+ return -EINVAL;
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_NAN:
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_WDS:
case NUM_NL80211_IFTYPES:
@@ -3173,6 +3178,21 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
+static int nl80211_parse_punct_bitmap(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
+ const struct cfg80211_chan_def *chandef,
+ u16 *punct_bitmap)
+{
+ if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_PUNCT))
+ return -EINVAL;
+
+ *punct_bitmap = nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
+ if (!cfg80211_valid_disable_subchannel_bitmap(punct_bitmap, chandef))
+ return -EINVAL;
+
+ return 0;
+}
+
int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
struct genl_info *info,
struct cfg80211_chan_def *chandef)
@@ -3181,8 +3201,11 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
struct nlattr **attrs = info->attrs;
u32 control_freq;
- if (!attrs[NL80211_ATTR_WIPHY_FREQ])
+ if (!attrs[NL80211_ATTR_WIPHY_FREQ]) {
+ NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ],
+ "Frequency is missing");
return -EINVAL;
+ }
control_freq = MHZ_TO_KHZ(
nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
@@ -5770,6 +5793,42 @@ static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev,
}
}
+static void nl80211_send_ap_started(struct wireless_dev *wdev,
+ unsigned int link_id)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_START_AP);
+ if (!hdr)
+ goto out;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) ||
+ nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+ NL80211_ATTR_PAD) ||
+ (wdev->u.ap.ssid_len &&
+ nla_put(msg, NL80211_ATTR_SSID, wdev->u.ap.ssid_len,
+ wdev->u.ap.ssid)) ||
+ (wdev->valid_links &&
+ nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)))
+ goto out;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(wiphy), msg, 0,
+ NL80211_MCGRP_MLME, GFP_KERNEL);
+ return;
+out:
+ nlmsg_free(msg);
+}
+
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -5918,6 +5977,14 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
+ err = nl80211_parse_punct_bitmap(rdev, info,
+ &params->chandef,
+ &params->punct_bitmap);
+ if (err)
+ goto out;
+ }
+
if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params->chandef,
wdev->iftype)) {
err = -EINVAL;
@@ -6050,6 +6117,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_SOCKET_OWNER])
wdev->conn_owner_nlportid = info->snd_portid;
+
+ nl80211_send_ap_started(wdev, link_id);
}
out_unlock:
wdev_unlock(wdev);
@@ -6527,6 +6596,22 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
sinfo->assoc_req_ies))
goto nla_put_failure;
+ if (sinfo->assoc_resp_ies_len &&
+ nla_put(msg, NL80211_ATTR_RESP_IE, sinfo->assoc_resp_ies_len,
+ sinfo->assoc_resp_ies))
+ goto nla_put_failure;
+
+ if (sinfo->mlo_params_valid) {
+ if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID,
+ sinfo->assoc_link_id))
+ goto nla_put_failure;
+
+ if (!is_zero_ether_addr(sinfo->mld_addr) &&
+ nla_put(msg, NL80211_ATTR_MLD_ADDR, ETH_ALEN,
+ sinfo->mld_addr))
+ goto nla_put_failure;
+ }
+
cfg80211_sinfo_release_content(sinfo);
genlmsg_end(msg, hdr);
return 0;
@@ -10057,6 +10142,14 @@ skip_beacons:
if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
params.block_tx = true;
+ if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
+ err = nl80211_parse_punct_bitmap(rdev, info,
+ &params.chandef,
+ &params.punct_bitmap);
+ if (err)
+ goto free;
+ }
+
wdev_lock(wdev);
err = rdev_channel_switch(rdev, dev, &params);
wdev_unlock(wdev);
@@ -12253,6 +12346,10 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_P2P_DEVICE:
break;
case NL80211_IFTYPE_NAN:
+ if (!wiphy_ext_feature_isset(wdev->wiphy,
+ NL80211_EXT_FEATURE_SECURE_NAN))
+ return -EOPNOTSUPP;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -12310,6 +12407,10 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_P2P_GO:
break;
case NL80211_IFTYPE_NAN:
+ if (!wiphy_ext_feature_isset(wdev->wiphy,
+ NL80211_EXT_FEATURE_SECURE_NAN))
+ return -EOPNOTSUPP;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -12447,6 +12548,10 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
case NL80211_IFTYPE_P2P_DEVICE:
break;
case NL80211_IFTYPE_NAN:
+ if (!wiphy_ext_feature_isset(wdev->wiphy,
+ NL80211_EXT_FEATURE_SECURE_NAN))
+ return -EOPNOTSUPP;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -13809,7 +13914,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
return -ERANGE;
if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN &&
!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK &&
- nla_len(tb[NL80211_REKEY_DATA_KEK]) == NL80211_KCK_EXT_LEN))
+ nla_len(tb[NL80211_REKEY_DATA_KCK]) == NL80211_KCK_EXT_LEN) &&
+ !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KCK_32 &&
+ nla_len(tb[NL80211_REKEY_DATA_KCK]) == NL80211_KCK_EXT_LEN_32))
return -ERANGE;
rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]);
@@ -18954,7 +19061,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef,
gfp_t gfp,
enum nl80211_commands notif,
- u8 count, bool quiet)
+ u8 count, bool quiet, u16 punct_bitmap)
{
struct wireless_dev *wdev = netdev->ieee80211_ptr;
struct sk_buff *msg;
@@ -18988,6 +19095,9 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
}
+ if (nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, punct_bitmap))
+ goto nla_put_failure;
+
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -19000,7 +19110,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id)
+ unsigned int link_id, u16 punct_bitmap)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19009,7 +19119,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
ASSERT_WDEV_LOCK(wdev);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_notify(dev, chandef, link_id);
+ trace_cfg80211_ch_switch_notify(dev, chandef, link_id, punct_bitmap);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -19037,14 +19147,15 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
cfg80211_sched_dfs_chan_update(rdev);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
- NL80211_CMD_CH_SWITCH_NOTIFY, 0, false);
+ NL80211_CMD_CH_SWITCH_NOTIFY, 0, false,
+ punct_bitmap);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet)
+ bool quiet, u16 punct_bitmap)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19053,15 +19164,17 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
ASSERT_WDEV_LOCK(wdev);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id);
+ trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id,
+ punct_bitmap);
+
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY,
- count, quiet);
+ count, quiet, punct_bitmap);
}
EXPORT_SYMBOL(cfg80211_ch_switch_started_notify);
-int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
+int cfg80211_bss_color_notify(struct net_device *dev,
enum nl80211_commands cmd, u8 count,
u64 color_bitmap)
{
@@ -19075,7 +19188,7 @@ int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
trace_cfg80211_bss_color_notify(dev, cmd, count, color_bitmap);
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -19098,7 +19211,7 @@ int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
genlmsg_end(msg, hdr);
return genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy),
- msg, 0, NL80211_MCGRP_MLME, gfp);
+ msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL);
nla_put_failure:
nlmsg_free(msg);
@@ -19661,7 +19774,7 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
}
EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
-void nl80211_send_ap_stopped(struct wireless_dev *wdev)
+void nl80211_send_ap_stopped(struct wireless_dev *wdev, unsigned int link_id)
{
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
@@ -19679,7 +19792,9 @@ void nl80211_send_ap_stopped(struct wireless_dev *wdev)
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) ||
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
- NL80211_ATTR_PAD))
+ NL80211_ATTR_PAD) ||
+ (wdev->valid_links &&
+ nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)))
goto out;
genlmsg_end(msg, hdr);
@@ -19718,7 +19833,9 @@ int cfg80211_external_auth_request(struct net_device *dev,
params->action) ||
nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) ||
nla_put(msg, NL80211_ATTR_SSID, params->ssid.ssid_len,
- params->ssid.ssid))
+ params->ssid.ssid) ||
+ (!is_zero_ether_addr(params->mld_addr) &&
+ nla_put(msg, NL80211_ATTR_MLD_ADDR, ETH_ALEN, params->mld_addr)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -19760,6 +19877,17 @@ void cfg80211_update_owe_info_event(struct net_device *netdev,
nla_put(msg, NL80211_ATTR_IE, owe_info->ie_len, owe_info->ie))
goto nla_put_failure;
+ if (owe_info->assoc_link_id != -1) {
+ if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID,
+ owe_info->assoc_link_id))
+ goto nla_put_failure;
+
+ if (!is_zero_ether_addr(owe_info->peer_mld_addr) &&
+ nla_put(msg, NL80211_ATTR_MLD_ADDR, ETH_ALEN,
+ owe_info->peer_mld_addr))
+ goto nla_put_failure;
+ }
+
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index ba9457e94c43..0278d817bb02 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -114,7 +114,7 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
enum nl80211_radar_event event,
struct net_device *netdev, gfp_t gfp);
-void nl80211_send_ap_stopped(struct wireless_dev *wdev);
+void nl80211_send_ap_stopped(struct wireless_dev *wdev, unsigned int link_id);
void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4f3f31244e8b..0d40d6af7e10 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -737,51 +737,9 @@ static bool valid_country(const u8 *data, unsigned int size,
}
#ifdef CONFIG_CFG80211_REQUIRE_SIGNED_REGDB
-static struct key *builtin_regdb_keys;
-
-static void __init load_keys_from_buffer(const u8 *p, unsigned int buflen)
-{
- const u8 *end = p + buflen;
- size_t plen;
- key_ref_t key;
+#include <keys/asymmetric-type.h>
- while (p < end) {
- /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
- * than 256 bytes in size.
- */
- if (end - p < 4)
- goto dodgy_cert;
- if (p[0] != 0x30 &&
- p[1] != 0x82)
- goto dodgy_cert;
- plen = (p[2] << 8) | p[3];
- plen += 4;
- if (plen > end - p)
- goto dodgy_cert;
-
- key = key_create_or_update(make_key_ref(builtin_regdb_keys, 1),
- "asymmetric", NULL, p, plen,
- ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
- KEY_USR_VIEW | KEY_USR_READ),
- KEY_ALLOC_NOT_IN_QUOTA |
- KEY_ALLOC_BUILT_IN |
- KEY_ALLOC_BYPASS_RESTRICTION);
- if (IS_ERR(key)) {
- pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
- PTR_ERR(key));
- } else {
- pr_notice("Loaded X.509 cert '%s'\n",
- key_ref_to_ptr(key)->description);
- key_ref_put(key);
- }
- p += plen;
- }
-
- return;
-
-dodgy_cert:
- pr_err("Problem parsing in-kernel X.509 certificate list\n");
-}
+static struct key *builtin_regdb_keys;
static int __init load_builtin_regdb_keys(void)
{
@@ -797,11 +755,15 @@ static int __init load_builtin_regdb_keys(void)
pr_notice("Loading compiled-in X.509 certificates for regulatory database\n");
#ifdef CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS
- load_keys_from_buffer(shipped_regdb_certs, shipped_regdb_certs_len);
+ x509_load_certificate_list(shipped_regdb_certs,
+ shipped_regdb_certs_len,
+ builtin_regdb_keys);
#endif
#ifdef CONFIG_CFG80211_EXTRA_REGDB_KEYDIR
if (CONFIG_CFG80211_EXTRA_REGDB_KEYDIR[0] != '\0')
- load_keys_from_buffer(extra_regdb_certs, extra_regdb_certs_len);
+ x509_load_certificate_list(extra_regdb_certs,
+ extra_regdb_certs_len,
+ builtin_regdb_keys);
#endif
return 0;
@@ -3198,6 +3160,9 @@ static void reg_process_self_managed_hint(struct wiphy *wiphy)
request.alpha2[1] = regd->alpha2[1];
request.initiator = NL80211_REGDOM_SET_BY_DRIVER;
+ if (wiphy->flags & WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER)
+ reg_call_notifier(wiphy, &request);
+
nl80211_send_wiphy_reg_change_event(&request);
}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 4b5b6ee0fe01..28ce13840a88 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -285,6 +285,15 @@ void cfg80211_conn_work(struct work_struct *work)
wiphy_unlock(&rdev->wiphy);
}
+static void cfg80211_step_auth_next(struct cfg80211_conn *conn,
+ struct cfg80211_bss *bss)
+{
+ memcpy(conn->bssid, bss->bssid, ETH_ALEN);
+ conn->params.bssid = conn->bssid;
+ conn->params.channel = bss->channel;
+ conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+}
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
{
@@ -302,10 +311,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
if (!bss)
return NULL;
- memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN);
- wdev->conn->params.bssid = wdev->conn->bssid;
- wdev->conn->params.channel = bss->channel;
- wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+ cfg80211_step_auth_next(wdev->conn, bss);
schedule_work(&rdev->conn_work);
return bss;
@@ -597,7 +603,12 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
wdev->conn->params.ssid_len = wdev->u.client.ssid_len;
/* see if we have the bss already */
- bss = cfg80211_get_conn_bss(wdev);
+ bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
+ wdev->conn->params.bssid,
+ wdev->conn->params.ssid,
+ wdev->conn->params.ssid_len,
+ wdev->conn_bss_type,
+ IEEE80211_PRIVACY(wdev->conn->params.privacy));
if (prev_bssid) {
memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN);
@@ -608,6 +619,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
if (bss) {
enum nl80211_timeout_reason treason;
+ cfg80211_step_auth_next(wdev->conn, bss);
err = cfg80211_conn_do_work(wdev, &treason);
cfg80211_put_bss(wdev->wiphy, bss);
} else {
@@ -724,6 +736,7 @@ void __cfg80211_connect_result(struct net_device *dev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const struct element *country_elem = NULL;
+ const struct element *ssid;
const u8 *country_data;
u8 country_datalen;
#ifdef CONFIG_CFG80211_WEXT
@@ -855,8 +868,7 @@ void __cfg80211_connect_result(struct net_device *dev,
ETH_ALEN);
}
- if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
- cfg80211_upload_connect_keys(wdev);
+ cfg80211_upload_connect_keys(wdev);
rcu_read_lock();
for_each_valid_link(cr, link) {
@@ -883,6 +895,22 @@ void __cfg80211_connect_result(struct net_device *dev,
country_data, country_datalen);
kfree(country_data);
+ if (!wdev->u.client.ssid_len) {
+ rcu_read_lock();
+ for_each_valid_link(cr, link) {
+ ssid = ieee80211_bss_get_elem(cr->links[link].bss,
+ WLAN_EID_SSID);
+
+ if (!ssid || !ssid->datalen)
+ continue;
+
+ memcpy(wdev->u.client.ssid, ssid->data, ssid->datalen);
+ wdev->u.client.ssid_len = ssid->datalen;
+ break;
+ }
+ rcu_read_unlock();
+ }
+
return;
out:
for_each_valid_link(cr, link)
@@ -1462,12 +1490,18 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
connect->crypto.ciphers_pairwise[0] = cipher;
}
}
-
- connect->crypto.wep_keys = connkeys->params;
- connect->crypto.wep_tx_key = connkeys->def;
} else {
if (WARN_ON(connkeys))
return -EINVAL;
+
+ /* connect can point to wdev->wext.connect which
+ * can hold key data from a previous connection
+ */
+ connect->key = NULL;
+ connect->key_len = 0;
+ connect->key_idx = 0;
+ connect->crypto.cipher_group = 0;
+ connect->crypto.n_ciphers_pairwise = 0;
}
wdev->connect_keys = connkeys;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index a405c3edbc47..ca7474eec723 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -19,8 +19,6 @@
else \
eth_zero_addr(__entry->entry_mac); \
} while (0)
-#define MAC_PR_FMT "%pM"
-#define MAC_PR_ARG(entry_mac) (__entry->entry_mac)
#define MAXNAME 32
#define WIPHY_ENTRY __array(char, wiphy_name, 32)
@@ -454,10 +452,10 @@ DECLARE_EVENT_CLASS(key_handle,
__entry->pairwise = pairwise;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, "
- "key_index: %u, pairwise: %s, mac addr: " MAC_PR_FMT,
+ "key_index: %u, pairwise: %s, mac addr: %pM",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id,
__entry->key_index, BOOL_TO_STR(__entry->pairwise),
- MAC_PR_ARG(mac_addr))
+ __entry->mac_addr)
);
DEFINE_EVENT(key_handle, rdev_get_key,
@@ -496,10 +494,10 @@ TRACE_EVENT(rdev_add_key,
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, "
"key_index: %u, mode: %u, pairwise: %s, "
- "mac addr: " MAC_PR_FMT,
+ "mac addr: %pM",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id,
__entry->key_index, __entry->mode,
- BOOL_TO_STR(__entry->pairwise), MAC_PR_ARG(mac_addr))
+ BOOL_TO_STR(__entry->pairwise), __entry->mac_addr)
);
TRACE_EVENT(rdev_set_default_key,
@@ -813,11 +811,11 @@ DECLARE_EVENT_CLASS(station_add_change,
__entry->opmode_notif_used =
params->link_sta_params.opmode_notif_used;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
", station flags mask: %u, station flags set: %u, "
"station modify mask: %u, listen interval: %d, aid: %u, "
"plink action: %u, plink state: %u, uapsd queues: %u, vlan:%s",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
__entry->sta_flags_mask, __entry->sta_flags_set,
__entry->sta_modify_mask, __entry->listen_interval,
__entry->aid, __entry->plink_action, __entry->plink_state,
@@ -849,8 +847,8 @@ DECLARE_EVENT_CLASS(wiphy_netdev_mac_evt,
NETDEV_ASSIGN;
MAC_ASSIGN(sta_mac, mac);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac)
);
DECLARE_EVENT_CLASS(station_del,
@@ -871,9 +869,9 @@ DECLARE_EVENT_CLASS(station_del,
__entry->subtype = params->subtype;
__entry->reason_code = params->reason_code;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
", subtype: %u, reason_code: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
__entry->subtype, __entry->reason_code)
);
@@ -909,8 +907,8 @@ TRACE_EVENT(rdev_dump_station,
MAC_ASSIGN(sta_mac, mac);
__entry->idx = _idx;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT ", idx: %d",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM, idx: %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
__entry->idx)
);
@@ -947,9 +945,9 @@ DECLARE_EVENT_CLASS(mpath_evt,
MAC_ASSIGN(dst, dst);
MAC_ASSIGN(next_hop, next_hop);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: " MAC_PR_FMT ", next hop: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dst),
- MAC_PR_ARG(next_hop))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: %pM, next hop: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->dst,
+ __entry->next_hop)
);
DEFINE_EVENT(mpath_evt, rdev_add_mpath,
@@ -988,10 +986,9 @@ TRACE_EVENT(rdev_dump_mpath,
MAC_ASSIGN(next_hop, next_hop);
__entry->idx = _idx;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: "
- MAC_PR_FMT ", next hop: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, MAC_PR_ARG(dst),
- MAC_PR_ARG(next_hop))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: %pM, next hop: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, __entry->dst,
+ __entry->next_hop)
);
TRACE_EVENT(rdev_get_mpp,
@@ -1010,9 +1007,9 @@ TRACE_EVENT(rdev_get_mpp,
MAC_ASSIGN(dst, dst);
MAC_ASSIGN(mpp, mpp);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: " MAC_PR_FMT
- ", mpp: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG,
- MAC_PR_ARG(dst), MAC_PR_ARG(mpp))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: %pM"
+ ", mpp: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG,
+ __entry->dst, __entry->mpp)
);
TRACE_EVENT(rdev_dump_mpp,
@@ -1033,10 +1030,9 @@ TRACE_EVENT(rdev_dump_mpp,
MAC_ASSIGN(mpp, mpp);
__entry->idx = _idx;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: "
- MAC_PR_FMT ", mpp: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, MAC_PR_ARG(dst),
- MAC_PR_ARG(mpp))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: %pM, mpp: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, __entry->dst,
+ __entry->mpp)
);
TRACE_EVENT(rdev_return_int_mpath_info,
@@ -1243,9 +1239,9 @@ TRACE_EVENT(rdev_auth,
eth_zero_addr(__entry->bssid);
__entry->auth_type = req->auth_type;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: " MAC_PR_FMT,
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: %pM",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->auth_type,
- MAC_PR_ARG(bssid))
+ __entry->bssid)
);
TRACE_EVENT(rdev_assoc,
@@ -1294,10 +1290,10 @@ TRACE_EVENT(rdev_assoc,
memcpy(__get_dynamic_array(fils_nonces),
req->fils_nonces, 2 * FILS_NONCE_LEN);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
- ", previous bssid: " MAC_PR_FMT ", use mfp: %s, flags: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid),
- MAC_PR_ARG(prev_bssid), BOOL_TO_STR(__entry->use_mfp),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
+ ", previous bssid: %pM, use mfp: %s, flags: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid,
+ __entry->prev_bssid, BOOL_TO_STR(__entry->use_mfp),
__entry->flags)
);
@@ -1317,8 +1313,8 @@ TRACE_EVENT(rdev_deauth,
MAC_ASSIGN(bssid, req->bssid);
__entry->reason_code = req->reason_code;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", reason: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM, reason: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid,
__entry->reason_code)
);
@@ -1340,9 +1336,9 @@ TRACE_EVENT(rdev_disassoc,
__entry->reason_code = req->reason_code;
__entry->local_state_change = req->local_state_change;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
", reason: %u, local state change: %s",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid),
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid,
__entry->reason_code,
BOOL_TO_STR(__entry->local_state_change))
);
@@ -1413,12 +1409,12 @@ TRACE_EVENT(rdev_connect,
__entry->flags = sme->flags;
MAC_ASSIGN(prev_bssid, sme->prev_bssid);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
- "flags: %u, previous bssid: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid,
+ "flags: %u, previous bssid: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->ssid,
__entry->auth_type, BOOL_TO_STR(__entry->privacy),
- __entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid))
+ __entry->wpa_versions, __entry->flags, __entry->prev_bssid)
);
TRACE_EVENT(rdev_update_connect_params,
@@ -1542,8 +1538,8 @@ TRACE_EVENT(rdev_join_ibss,
memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
memcpy(__entry->ssid, params->ssid, params->ssid_len);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", ssid: %s",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM, ssid: %s",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->ssid)
);
TRACE_EVENT(rdev_join_ocb,
@@ -1664,9 +1660,9 @@ TRACE_EVENT(rdev_set_bitrate_mask,
__entry->link_id = link_id;
MAC_ASSIGN(peer, peer);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, peer: " MAC_PR_FMT,
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, peer: %pM",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id,
- MAC_PR_ARG(peer))
+ __entry->peer)
);
TRACE_EVENT(rdev_update_mgmt_frame_registrations,
@@ -1810,10 +1806,10 @@ TRACE_EVENT(rdev_tdls_mgmt,
__entry->initiator = initiator;
memcpy(__get_dynamic_array(buf), buf, len);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", action_code: %u, "
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM, action_code: %u, "
"dialog_token: %u, status_code: %u, peer_capability: %u "
"initiator: %s buf: %#.2x ",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer,
__entry->action_code, __entry->dialog_token,
__entry->status_code, __entry->peer_capability,
BOOL_TO_STR(__entry->initiator),
@@ -1893,8 +1889,8 @@ TRACE_EVENT(rdev_tdls_oper,
MAC_ASSIGN(peer, peer);
__entry->oper = oper;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", oper: %d",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM, oper: %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->oper)
);
DECLARE_EVENT_CLASS(rdev_pmksa,
@@ -1911,8 +1907,8 @@ DECLARE_EVENT_CLASS(rdev_pmksa,
NETDEV_ASSIGN;
MAC_ASSIGN(bssid, pmksa->bssid);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid)
);
TRACE_EVENT(rdev_probe_client,
@@ -1929,8 +1925,8 @@ TRACE_EVENT(rdev_probe_client,
NETDEV_ASSIGN;
MAC_ASSIGN(peer, peer);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer)
);
DEFINE_EVENT(rdev_pmksa, rdev_set_pmksa,
@@ -2051,9 +2047,9 @@ TRACE_EVENT(rdev_tx_control_port,
__entry->unencrypted = unencrypted;
__entry->link_id = link_id;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ","
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM,"
" proto: 0x%x, unencrypted: %s, link: %d",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dest),
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->dest,
be16_to_cpu(__entry->proto),
BOOL_TO_STR(__entry->unencrypted),
__entry->link_id)
@@ -2392,8 +2388,8 @@ TRACE_EVENT(rdev_add_tx_ts,
__entry->user_prio = user_prio;
__entry->admitted_time = admitted_time;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", TSID %d, UP %d, time %d",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM, TSID %d, UP %d, time %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer,
__entry->tsid, __entry->user_prio, __entry->admitted_time)
);
@@ -2413,8 +2409,8 @@ TRACE_EVENT(rdev_del_tx_ts,
MAC_ASSIGN(peer, peer);
__entry->tsid = tsid;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", TSID %d",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tsid)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM, TSID %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->tsid)
);
TRACE_EVENT(rdev_tdls_channel_switch,
@@ -2435,9 +2431,9 @@ TRACE_EVENT(rdev_tdls_channel_switch,
MAC_ASSIGN(addr, addr);
CHAN_DEF_ASSIGN(chandef);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM"
" oper class %d, " CHAN_DEF_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr),
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->addr,
__entry->oper_class, CHAN_DEF_PR_ARG)
);
@@ -2455,8 +2451,8 @@ TRACE_EVENT(rdev_tdls_cancel_channel_switch,
NETDEV_ASSIGN;
MAC_ASSIGN(addr, addr);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->addr)
);
TRACE_EVENT(rdev_set_pmk,
@@ -2488,9 +2484,9 @@ TRACE_EVENT(rdev_set_pmk,
pmk_conf->pmk_r0_name ? WLAN_PMK_NAME_LEN : 0);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM"
"pmk_len=%u, pmk: %s pmk_r0_name: %s", WIPHY_PR_ARG,
- NETDEV_PR_ARG, MAC_PR_ARG(aa), __entry->pmk_len,
+ NETDEV_PR_ARG, __entry->aa, __entry->pmk_len,
__print_array(__get_dynamic_array(pmk),
__get_dynamic_array_len(pmk), 1),
__entry->pmk_r0_name_len ?
@@ -2515,8 +2511,8 @@ TRACE_EVENT(rdev_del_pmk,
MAC_ASSIGN(aa, aa);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(aa))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->aa)
);
TRACE_EVENT(rdev_external_auth,
@@ -2528,6 +2524,7 @@ TRACE_EVENT(rdev_external_auth,
MAC_ENTRY(bssid)
__array(u8, ssid, IEEE80211_MAX_SSID_LEN + 1)
__field(u16, status)
+ MAC_ENTRY(mld_addr)
),
TP_fast_assign(WIPHY_ASSIGN;
NETDEV_ASSIGN;
@@ -2536,10 +2533,12 @@ TRACE_EVENT(rdev_external_auth,
memcpy(__entry->ssid, params->ssid.ssid,
params->ssid.ssid_len);
__entry->status = params->status;
+ MAC_ASSIGN(mld_addr, params->mld_addr);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
- ", ssid: %s, status: %u", WIPHY_PR_ARG, NETDEV_PR_ARG,
- __entry->bssid, __entry->ssid, __entry->status)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
+ ", ssid: %s, status: %u, mld_addr: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid,
+ __entry->ssid, __entry->status, __entry->mld_addr)
);
TRACE_EVENT(rdev_start_radar_detection,
@@ -2720,8 +2719,8 @@ TRACE_EVENT(rdev_update_owe_info,
__entry->status = owe_info->status;
memcpy(__get_dynamic_array(ie),
owe_info->ie, owe_info->ie_len);),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT
- " status %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM"
+ " status %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer,
__entry->status)
);
@@ -2739,8 +2738,8 @@ TRACE_EVENT(rdev_probe_mesh_link,
NETDEV_ASSIGN;
MAC_ASSIGN(dest, dest);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dest))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->dest)
);
TRACE_EVENT(rdev_set_tid_config,
@@ -2757,8 +2756,8 @@ TRACE_EVENT(rdev_set_tid_config,
NETDEV_ASSIGN;
MAC_ASSIGN(peer, tid_conf->peer);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer)
);
TRACE_EVENT(rdev_reset_tid_config,
@@ -2777,8 +2776,8 @@ TRACE_EVENT(rdev_reset_tid_config,
MAC_ASSIGN(peer, peer);
__entry->tids = tids;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", tids: 0x%x",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tids)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM, tids: 0x%x",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->tids)
);
TRACE_EVENT(rdev_set_sar_specs,
@@ -2881,8 +2880,8 @@ DECLARE_EVENT_CLASS(cfg80211_netdev_mac_evt,
NETDEV_ASSIGN;
MAC_ASSIGN(macaddr, macaddr);
),
- TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT,
- NETDEV_PR_ARG, MAC_PR_ARG(macaddr))
+ TP_printk(NETDEV_PR_FMT ", mac: %pM",
+ NETDEV_PR_ARG, __entry->macaddr)
);
DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_notify_new_peer_candidate,
@@ -2920,8 +2919,8 @@ TRACE_EVENT(cfg80211_send_rx_assoc,
MAC_ASSIGN(ap_addr,
data->ap_mld_addr ?: data->links[0].bss->bssid);
),
- TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT,
- NETDEV_PR_ARG, MAC_PR_ARG(ap_addr))
+ TP_printk(NETDEV_PR_FMT ", %pM",
+ NETDEV_PR_ARG, __entry->ap_addr)
);
DECLARE_EVENT_CLASS(netdev_frame_event,
@@ -2981,8 +2980,8 @@ DECLARE_EVENT_CLASS(netdev_mac_evt,
NETDEV_ASSIGN;
MAC_ASSIGN(mac, mac)
),
- TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT,
- NETDEV_PR_ARG, MAC_PR_ARG(mac))
+ TP_printk(NETDEV_PR_FMT ", mac: %pM",
+ NETDEV_PR_ARG, __entry->mac)
);
DEFINE_EVENT(netdev_mac_evt, cfg80211_send_auth_timeout,
@@ -3004,8 +3003,8 @@ TRACE_EVENT(cfg80211_send_assoc_failure,
MAC_ASSIGN(ap_addr, data->ap_mld_addr ?: data->bss[0]->bssid);
__entry->timeout = data->timeout;
),
- TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT ", timeout: %d",
- NETDEV_PR_ARG, MAC_PR_ARG(ap_addr), __entry->timeout)
+ TP_printk(NETDEV_PR_FMT ", mac: %pM, timeout: %d",
+ NETDEV_PR_ARG, __entry->ap_addr, __entry->timeout)
);
TRACE_EVENT(cfg80211_michael_mic_failure,
@@ -3027,8 +3026,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
if (tsc)
memcpy(__entry->tsc, tsc, 6);
),
- TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
- NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
+ TP_printk(NETDEV_PR_FMT ", %pM, key type: %d, key id: %d, tsc: %pm",
+ NETDEV_PR_ARG, __entry->addr, __entry->key_type,
__entry->key_id, __entry->tsc)
);
@@ -3104,8 +3103,8 @@ TRACE_EVENT(cfg80211_new_sta,
MAC_ASSIGN(mac_addr, mac_addr);
SINFO_ASSIGN;
),
- TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT,
- NETDEV_PR_ARG, MAC_PR_ARG(mac_addr))
+ TP_printk(NETDEV_PR_FMT ", %pM",
+ NETDEV_PR_ARG, __entry->mac_addr)
);
DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta,
@@ -3182,8 +3181,8 @@ TRACE_EVENT(cfg80211_rx_control_port,
__entry->proto = be16_to_cpu(skb->protocol);
__entry->unencrypted = unencrypted;
),
- TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s",
- NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from),
+ TP_printk(NETDEV_PR_FMT ", len=%d, %pM, proto: 0x%x, unencrypted: %s",
+ NETDEV_PR_ARG, __entry->len, __entry->from,
__entry->proto, BOOL_TO_STR(__entry->unencrypted))
);
@@ -3245,39 +3244,47 @@ TRACE_EVENT(cfg80211_chandef_dfs_required,
TRACE_EVENT(cfg80211_ch_switch_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id),
- TP_ARGS(netdev, chandef, link_id),
+ unsigned int link_id,
+ u16 punct_bitmap),
+ TP_ARGS(netdev, chandef, link_id, punct_bitmap),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
+ __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
+ __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
+ __entry->punct_bitmap)
);
TRACE_EVENT(cfg80211_ch_switch_started_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id),
- TP_ARGS(netdev, chandef, link_id),
+ unsigned int link_id,
+ u16 punct_bitmap),
+ TP_ARGS(netdev, chandef, link_id, punct_bitmap),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
+ __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
+ __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
+ __entry->punct_bitmap)
);
TRACE_EVENT(cfg80211_radar_event,
@@ -3324,7 +3331,7 @@ DECLARE_EVENT_CLASS(cfg80211_rx_evt,
NETDEV_ASSIGN;
MAC_ASSIGN(addr, addr);
),
- TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr))
+ TP_printk(NETDEV_PR_FMT ", %pM", NETDEV_PR_ARG, __entry->addr)
);
DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
@@ -3351,8 +3358,8 @@ TRACE_EVENT(cfg80211_ibss_joined,
MAC_ASSIGN(bssid, bssid);
CHAN_ASSIGN(channel);
),
- TP_printk(NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", " CHAN_PR_FMT,
- NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG)
+ TP_printk(NETDEV_PR_FMT ", bssid: %pM, " CHAN_PR_FMT,
+ NETDEV_PR_ARG, __entry->bssid, CHAN_PR_ARG)
);
TRACE_EVENT(cfg80211_probe_status,
@@ -3371,8 +3378,8 @@ TRACE_EVENT(cfg80211_probe_status,
__entry->cookie = cookie;
__entry->acked = acked;
),
- TP_printk(NETDEV_PR_FMT " addr:" MAC_PR_FMT ", cookie: %llu, acked: %s",
- NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->cookie,
+ TP_printk(NETDEV_PR_FMT " addr:%pM, cookie: %llu, acked: %s",
+ NETDEV_PR_ARG, __entry->addr, __entry->cookie,
BOOL_TO_STR(__entry->acked))
);
@@ -3389,8 +3396,8 @@ TRACE_EVENT(cfg80211_cqm_pktloss_notify,
MAC_ASSIGN(peer, peer);
__entry->num_packets = num_packets;
),
- TP_printk(NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", num of lost packets: %u",
- NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->num_packets)
+ TP_printk(NETDEV_PR_FMT ", peer: %pM, num of lost packets: %u",
+ NETDEV_PR_ARG, __entry->peer, __entry->num_packets)
);
DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_gtk_rekey_notify,
@@ -3414,8 +3421,8 @@ TRACE_EVENT(cfg80211_pmksa_candidate_notify,
MAC_ASSIGN(bssid, bssid);
__entry->preauth = preauth;
),
- TP_printk(NETDEV_PR_FMT ", index:%d, bssid: " MAC_PR_FMT ", pre auth: %s",
- NETDEV_PR_ARG, __entry->index, MAC_PR_ARG(bssid),
+ TP_printk(NETDEV_PR_FMT ", index:%d, bssid: %pM, pre auth: %s",
+ NETDEV_PR_ARG, __entry->index, __entry->bssid,
BOOL_TO_STR(__entry->preauth))
);
@@ -3455,8 +3462,8 @@ TRACE_EVENT(cfg80211_tdls_oper_request,
__entry->oper = oper;
__entry->reason_code = reason_code;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", oper: %d, reason_code %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper,
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM, oper: %d, reason_code %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->oper,
__entry->reason_code)
);
@@ -3494,10 +3501,10 @@ TRACE_EVENT(cfg80211_scan_done,
MAC_ASSIGN(tsf_bssid, info->tsf_bssid);
}
),
- TP_printk("aborted: %s, scan start (TSF): %llu, tsf_bssid: " MAC_PR_FMT,
+ TP_printk("aborted: %s, scan start (TSF): %llu, tsf_bssid: %pM",
BOOL_TO_STR(__entry->aborted),
(unsigned long long)__entry->scan_start_tsf,
- MAC_PR_ARG(tsf_bssid))
+ __entry->tsf_bssid)
);
DECLARE_EVENT_CLASS(wiphy_id_evt,
@@ -3546,9 +3553,9 @@ TRACE_EVENT(cfg80211_get_bss,
__entry->bss_type = bss_type;
__entry->privacy = privacy;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", " MAC_PR_FMT
+ TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", %pM"
", buf: %#.2x, bss_type: %d, privacy: %d",
- WIPHY_PR_ARG, CHAN_PR_ARG, MAC_PR_ARG(bssid),
+ WIPHY_PR_ARG, CHAN_PR_ARG, __entry->bssid,
((u8 *)__get_dynamic_array(ssid))[0], __entry->bss_type,
__entry->privacy)
);
@@ -3579,11 +3586,11 @@ TRACE_EVENT(cfg80211_inform_bss_frame,
MAC_ASSIGN(parent_bssid, data->parent_bssid);
),
TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT
- "(scan_width: %d) signal: %d, tsb:%llu, detect_tsf:%llu, tsf_bssid: "
- MAC_PR_FMT, WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
+ "(scan_width: %d) signal: %d, tsb:%llu, detect_tsf:%llu, tsf_bssid: %pM",
+ WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
__entry->signal, (unsigned long long)__entry->ts_boottime,
(unsigned long long)__entry->parent_tsf,
- MAC_PR_ARG(parent_bssid))
+ __entry->parent_bssid)
);
DECLARE_EVENT_CLASS(cfg80211_bss_evt,
@@ -3597,7 +3604,7 @@ DECLARE_EVENT_CLASS(cfg80211_bss_evt,
MAC_ASSIGN(bssid, pub->bssid);
CHAN_ASSIGN(pub->channel);
),
- TP_printk(MAC_PR_FMT ", " CHAN_PR_FMT, MAC_PR_ARG(bssid), CHAN_PR_ARG)
+ TP_printk("%pM, " CHAN_PR_FMT, __entry->bssid, CHAN_PR_ARG)
);
DEFINE_EVENT(cfg80211_bss_evt, cfg80211_return_bss,
@@ -3689,8 +3696,8 @@ TRACE_EVENT(cfg80211_ft_event,
memcpy(__get_dynamic_array(ric_ies), ft_event->ric_ies,
ft_event->ric_ies_len);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", target_ap: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", target_ap: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->target_ap)
);
TRACE_EVENT(cfg80211_stop_iface,
@@ -3724,10 +3731,10 @@ TRACE_EVENT(cfg80211_pmsr_report,
__entry->cookie = cookie;
MAC_ASSIGN(addr, addr);
),
- TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie:%lld, " MAC_PR_FMT,
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie:%lld, %pM",
WIPHY_PR_ARG, WDEV_PR_ARG,
(unsigned long long)__entry->cookie,
- MAC_PR_ARG(addr))
+ __entry->addr)
);
TRACE_EVENT(cfg80211_pmsr_complete,
@@ -3749,20 +3756,30 @@ TRACE_EVENT(cfg80211_pmsr_complete,
);
TRACE_EVENT(cfg80211_update_owe_info_event,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_update_owe_info *owe_info),
- TP_ARGS(wiphy, netdev, owe_info),
- TP_STRUCT__entry(WIPHY_ENTRY
- NETDEV_ENTRY
- MAC_ENTRY(peer)
- __dynamic_array(u8, ie, owe_info->ie_len)),
- TP_fast_assign(WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- MAC_ASSIGN(peer, owe_info->peer);
- memcpy(__get_dynamic_array(ie), owe_info->ie,
- owe_info->ie_len);),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_update_owe_info *owe_info),
+ TP_ARGS(wiphy, netdev, owe_info),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ __dynamic_array(u8, ie, owe_info->ie_len)
+ __field(int, assoc_link_id)
+ MAC_ENTRY(peer_mld_addr)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, owe_info->peer);
+ memcpy(__get_dynamic_array(ie), owe_info->ie,
+ owe_info->ie_len);
+ __entry->assoc_link_id = owe_info->assoc_link_id;
+ MAC_ASSIGN(peer_mld_addr, owe_info->peer_mld_addr);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM,"
+ " assoc_link_id: %d, peer_mld_addr: %pM",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer,
+ __entry->assoc_link_id, __entry->peer_mld_addr)
);
TRACE_EVENT(cfg80211_bss_color_notify,
@@ -3800,8 +3817,8 @@ TRACE_EVENT(cfg80211_assoc_comeback,
MAC_ASSIGN(ap_addr, ap_addr);
__entry->timeout = timeout;
),
- TP_printk(WDEV_PR_FMT ", " MAC_PR_FMT ", timeout: %u TUs",
- WDEV_PR_ARG, MAC_PR_ARG(ap_addr), __entry->timeout)
+ TP_printk(WDEV_PR_FMT ", %pM, timeout: %u TUs",
+ WDEV_PR_ARG, __entry->ap_addr, __entry->timeout)
);
DECLARE_EVENT_CLASS(link_station_add_mod,
@@ -3859,10 +3876,10 @@ DECLARE_EVENT_CLASS(link_station_add_mod,
memcpy(__get_dynamic_array(eht_capa), params->eht_capa,
params->eht_capa_len);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
- ", link mac: " MAC_PR_FMT ", link id: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(mld_mac),
- MAC_PR_ARG(link_mac), __entry->link_id)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
+ ", link mac: %pM, link id: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac,
+ __entry->link_mac, __entry->link_id)
);
DEFINE_EVENT(link_station_add_mod, rdev_add_link_station,
@@ -3895,9 +3912,9 @@ TRACE_EVENT(rdev_del_link_station,
memcpy(__entry->mld_mac, params->mld_mac, 6);
__entry->link_id = params->link_id;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
", link id: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(mld_mac),
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac,
__entry->link_id)
);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8f403f9fe816..d1a89e82ead0 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -542,6 +542,64 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
}
EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
+bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto)
+{
+ const __be16 *hdr_proto = hdr + ETH_ALEN;
+
+ if (!(ether_addr_equal(hdr, rfc1042_header) &&
+ *hdr_proto != htons(ETH_P_AARP) &&
+ *hdr_proto != htons(ETH_P_IPX)) &&
+ !ether_addr_equal(hdr, bridge_tunnel_header))
+ return false;
+
+ *proto = *hdr_proto;
+
+ return true;
+}
+EXPORT_SYMBOL(ieee80211_get_8023_tunnel_proto);
+
+int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb)
+{
+ const void *mesh_addr;
+ struct {
+ struct ethhdr eth;
+ u8 flags;
+ } payload;
+ int hdrlen;
+ int ret;
+
+ ret = skb_copy_bits(skb, 0, &payload, sizeof(payload));
+ if (ret)
+ return ret;
+
+ hdrlen = sizeof(payload.eth) + __ieee80211_get_mesh_hdrlen(payload.flags);
+
+ if (likely(pskb_may_pull(skb, hdrlen + 8) &&
+ ieee80211_get_8023_tunnel_proto(skb->data + hdrlen,
+ &payload.eth.h_proto)))
+ hdrlen += ETH_ALEN + 2;
+ else if (!pskb_may_pull(skb, hdrlen))
+ return -EINVAL;
+
+ mesh_addr = skb->data + sizeof(payload.eth) + ETH_ALEN;
+ switch (payload.flags & MESH_FLAGS_AE) {
+ case MESH_FLAGS_AE_A4:
+ memcpy(&payload.eth.h_source, mesh_addr, ETH_ALEN);
+ break;
+ case MESH_FLAGS_AE_A5_A6:
+ memcpy(&payload.eth, mesh_addr, 2 * ETH_ALEN);
+ break;
+ default:
+ break;
+ }
+
+ pskb_pull(skb, hdrlen - sizeof(payload.eth));
+ memcpy(skb->data, &payload.eth, sizeof(payload.eth));
+
+ return 0;
+}
+EXPORT_SYMBOL(ieee80211_strip_8023_mesh_hdr);
+
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
u8 data_offset, bool is_amsdu)
@@ -553,7 +611,6 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
} payload;
struct ethhdr tmp;
u16 hdrlen;
- u8 mesh_flags = 0;
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
return -1;
@@ -574,12 +631,6 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
- if (iftype == NL80211_IFTYPE_MESH_POINT &&
- skb_copy_bits(skb, hdrlen, &mesh_flags, 1) < 0)
- return -1;
-
- mesh_flags &= MESH_FLAGS_AE;
-
switch (hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
case cpu_to_le16(IEEE80211_FCTL_TODS):
@@ -593,17 +644,6 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
iftype != NL80211_IFTYPE_AP_VLAN &&
iftype != NL80211_IFTYPE_STATION))
return -1;
- if (iftype == NL80211_IFTYPE_MESH_POINT) {
- if (mesh_flags == MESH_FLAGS_AE_A4)
- return -1;
- if (mesh_flags == MESH_FLAGS_AE_A5_A6 &&
- skb_copy_bits(skb, hdrlen +
- offsetof(struct ieee80211s_hdr, eaddr1),
- tmp.h_dest, 2 * ETH_ALEN) < 0)
- return -1;
-
- hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
- }
break;
case cpu_to_le16(IEEE80211_FCTL_FROMDS):
if ((iftype != NL80211_IFTYPE_STATION &&
@@ -612,16 +652,6 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
(is_multicast_ether_addr(tmp.h_dest) &&
ether_addr_equal(tmp.h_source, addr)))
return -1;
- if (iftype == NL80211_IFTYPE_MESH_POINT) {
- if (mesh_flags == MESH_FLAGS_AE_A5_A6)
- return -1;
- if (mesh_flags == MESH_FLAGS_AE_A4 &&
- skb_copy_bits(skb, hdrlen +
- offsetof(struct ieee80211s_hdr, eaddr1),
- tmp.h_source, ETH_ALEN) < 0)
- return -1;
- hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
- }
break;
case cpu_to_le16(0):
if (iftype != NL80211_IFTYPE_ADHOC &&
@@ -631,15 +661,11 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
break;
}
- if (likely(skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)) == 0 &&
- ((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
- payload.proto != htons(ETH_P_AARP) &&
- payload.proto != htons(ETH_P_IPX)) ||
- ether_addr_equal(payload.hdr, bridge_tunnel_header)))) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and
- * replace EtherType */
+ if (likely(!is_amsdu && iftype != NL80211_IFTYPE_MESH_POINT &&
+ skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)) == 0 &&
+ ieee80211_get_8023_tunnel_proto(&payload, &tmp.h_proto))) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation */
hdrlen += ETH_ALEN + 2;
- tmp.h_proto = payload.proto;
skb_postpull_rcsum(skb, &payload, ETH_ALEN + 2);
} else {
tmp.h_proto = htons(skb->len - hdrlen);
@@ -711,7 +737,8 @@ __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame,
static struct sk_buff *
__ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
- int offset, int len, bool reuse_frag)
+ int offset, int len, bool reuse_frag,
+ int min_len)
{
struct sk_buff *frame;
int cur_len = len;
@@ -725,7 +752,7 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
* in the stack later.
*/
if (reuse_frag)
- cur_len = min_t(int, len, 32);
+ cur_len = min_t(int, len, min_len);
/*
* Allocate and reserve two bytes more for payload
@@ -735,6 +762,7 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
if (!frame)
return NULL;
+ frame->priority = skb->priority;
skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
@@ -748,28 +776,72 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
return frame;
}
+bool ieee80211_is_valid_amsdu(struct sk_buff *skb, bool mesh_hdr)
+{
+ int offset = 0, remaining, subframe_len, padding;
+
+ for (offset = 0; offset < skb->len; offset += subframe_len + padding) {
+ struct {
+ __be16 len;
+ u8 mesh_flags;
+ } hdr;
+ u16 len;
+
+ if (skb_copy_bits(skb, offset + 2 * ETH_ALEN, &hdr, sizeof(hdr)) < 0)
+ return false;
+
+ if (mesh_hdr)
+ len = le16_to_cpu(*(__le16 *)&hdr.len) +
+ __ieee80211_get_mesh_hdrlen(hdr.mesh_flags);
+ else
+ len = ntohs(hdr.len);
+
+ subframe_len = sizeof(struct ethhdr) + len;
+ padding = (4 - subframe_len) & 0x3;
+ remaining = skb->len - offset;
+
+ if (subframe_len > remaining)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(ieee80211_is_valid_amsdu);
+
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- const u8 *check_da, const u8 *check_sa)
+ const u8 *check_da, const u8 *check_sa,
+ bool mesh_control)
{
unsigned int hlen = ALIGN(extra_headroom, 4);
struct sk_buff *frame = NULL;
- u16 ethertype;
- u8 *payload;
int offset = 0, remaining;
- struct ethhdr eth;
+ struct {
+ struct ethhdr eth;
+ uint8_t flags;
+ } hdr;
bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
bool reuse_skb = false;
bool last = false;
+ int copy_len = sizeof(hdr.eth);
+
+ if (iftype == NL80211_IFTYPE_MESH_POINT)
+ copy_len = sizeof(hdr);
while (!last) {
unsigned int subframe_len;
- int len;
+ int len, mesh_len = 0;
u8 padding;
- skb_copy_bits(skb, offset, &eth, sizeof(eth));
- len = ntohs(eth.h_proto);
+ skb_copy_bits(skb, offset, &hdr, copy_len);
+ if (iftype == NL80211_IFTYPE_MESH_POINT)
+ mesh_len = __ieee80211_get_mesh_hdrlen(hdr.flags);
+ if (mesh_control)
+ len = le16_to_cpu(*(__le16 *)&hdr.eth.h_proto) + mesh_len;
+ else
+ len = ntohs(hdr.eth.h_proto);
+
subframe_len = sizeof(struct ethhdr) + len;
padding = (4 - subframe_len) & 0x3;
@@ -778,16 +850,16 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
if (subframe_len > remaining)
goto purge;
/* mitigate A-MSDU aggregation injection attacks */
- if (ether_addr_equal(eth.h_dest, rfc1042_header))
+ if (ether_addr_equal(hdr.eth.h_dest, rfc1042_header))
goto purge;
offset += sizeof(struct ethhdr);
last = remaining <= subframe_len + padding;
/* FIXME: should we really accept multicast DA? */
- if ((check_da && !is_multicast_ether_addr(eth.h_dest) &&
- !ether_addr_equal(check_da, eth.h_dest)) ||
- (check_sa && !ether_addr_equal(check_sa, eth.h_source))) {
+ if ((check_da && !is_multicast_ether_addr(hdr.eth.h_dest) &&
+ !ether_addr_equal(check_da, hdr.eth.h_dest)) ||
+ (check_sa && !ether_addr_equal(check_sa, hdr.eth.h_source))) {
offset += len + padding;
continue;
}
@@ -799,7 +871,7 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
reuse_skb = true;
} else {
frame = __ieee80211_amsdu_copy(skb, hlen, offset, len,
- reuse_frag);
+ reuse_frag, 32 + mesh_len);
if (!frame)
goto purge;
@@ -810,16 +882,11 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
frame->dev = skb->dev;
frame->priority = skb->priority;
- payload = frame->data;
- ethertype = (payload[6] << 8) | payload[7];
- if (likely((ether_addr_equal(payload, rfc1042_header) &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- ether_addr_equal(payload, bridge_tunnel_header))) {
- eth.h_proto = htons(ethertype);
+ if (likely(iftype != NL80211_IFTYPE_MESH_POINT &&
+ ieee80211_get_8023_tunnel_proto(frame->data, &hdr.eth.h_proto)))
skb_pull(frame, ETH_ALEN + 2);
- }
- memcpy(skb_push(frame, sizeof(eth)), &eth, sizeof(eth));
+ memcpy(skb_push(frame, sizeof(hdr.eth)), &hdr.eth, sizeof(hdr.eth));
__skb_queue_tail(list, frame);
}
@@ -934,7 +1001,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
if (!wdev->connect_keys)
return;
- for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++) {
+ for (i = 0; i < 4; i++) {
if (!wdev->connect_keys->params[i].cipher)
continue;
if (rdev_add_key(rdev, dev, -1, i, false, NULL,
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 8a24dfca75af..e3acfac7430a 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -439,7 +439,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
GFP_KERNEL);
if (!wdev->wext.keys)
return -ENOMEM;
- for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++)
+ for (i = 0; i < 4; i++)
wdev->wext.keys->params[i].key =
wdev->wext.keys->data[i];
}
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index fe8765c4075d..13a72b17248e 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -636,7 +636,15 @@ void wireless_send_event(struct net_device * dev,
}
EXPORT_SYMBOL(wireless_send_event);
+#ifdef CONFIG_CFG80211_WEXT
+static void wireless_warn_cfg80211_wext(void)
+{
+ char name[sizeof(current->comm)];
+ pr_warn_ratelimited("warning: `%s' uses wireless extensions that are deprecated for modern drivers; use nl80211\n",
+ get_task_comm(name, current));
+}
+#endif
/* IW handlers */
@@ -652,8 +660,12 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
if (dev->ieee80211_ptr &&
dev->ieee80211_ptr->wiphy &&
dev->ieee80211_ptr->wiphy->wext &&
- dev->ieee80211_ptr->wiphy->wext->get_wireless_stats)
+ dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) {
+ wireless_warn_cfg80211_wext();
+ if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
+ return NULL;
return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev);
+ }
#endif
/* not found */
@@ -690,8 +702,12 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
const struct iw_handler_def *handlers = NULL;
#ifdef CONFIG_CFG80211_WEXT
- if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy)
+ if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) {
+ wireless_warn_cfg80211_wext();
+ if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
+ return NULL;
handlers = dev->ieee80211_ptr->wiphy->wext;
+ }
#endif
#ifdef CONFIG_WIRELESS_EXT
if (dev->wireless_handlers)
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 191c6d98c700..f231207ca210 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -47,7 +47,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
if (!ck)
return -ENOMEM;
- for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++)
+ for (i = 0; i < 4; i++)
ck->params[i].key = ck->data[i];
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 9f0561b67c12..2ac58b282b5e 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -22,6 +22,7 @@
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
+#include <linux/vmalloc.h>
#include <net/xdp_sock_drv.h>
#include <net/busy_poll.h>
#include <net/xdp.h>
@@ -511,7 +512,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
return skb;
}
-static int xsk_generic_xmit(struct sock *sk)
+static int __xsk_generic_xmit(struct sock *sk)
{
struct xdp_sock *xs = xdp_sk(sk);
u32 max_batch = TX_BATCH_SIZE;
@@ -594,22 +595,13 @@ out:
return err;
}
-static int xsk_xmit(struct sock *sk)
+static int xsk_generic_xmit(struct sock *sk)
{
- struct xdp_sock *xs = xdp_sk(sk);
int ret;
- if (unlikely(!(xs->dev->flags & IFF_UP)))
- return -ENETDOWN;
- if (unlikely(!xs->tx))
- return -ENOBUFS;
-
- if (xs->zc)
- return xsk_wakeup(xs, XDP_WAKEUP_TX);
-
/* Drop the RCU lock since the SKB path might sleep. */
rcu_read_unlock();
- ret = xsk_generic_xmit(sk);
+ ret = __xsk_generic_xmit(sk);
/* Reaquire RCU lock before going into common code. */
rcu_read_lock();
@@ -627,17 +619,31 @@ static bool xsk_no_wakeup(struct sock *sk)
#endif
}
+static int xsk_check_common(struct xdp_sock *xs)
+{
+ if (unlikely(!xsk_is_bound(xs)))
+ return -ENXIO;
+ if (unlikely(!(xs->dev->flags & IFF_UP)))
+ return -ENETDOWN;
+
+ return 0;
+}
+
static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
struct xsk_buff_pool *pool;
+ int err;
- if (unlikely(!xsk_is_bound(xs)))
- return -ENXIO;
+ err = xsk_check_common(xs);
+ if (err)
+ return err;
if (unlikely(need_wait))
return -EOPNOTSUPP;
+ if (unlikely(!xs->tx))
+ return -ENOBUFS;
if (sk_can_busy_loop(sk)) {
if (xs->zc)
@@ -649,8 +655,11 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
return 0;
pool = xs->pool;
- if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
- return xsk_xmit(sk);
+ if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
+ if (xs->zc)
+ return xsk_wakeup(xs, XDP_WAKEUP_TX);
+ return xsk_generic_xmit(sk);
+ }
return 0;
}
@@ -670,11 +679,11 @@ static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int
bool need_wait = !(flags & MSG_DONTWAIT);
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
+ int err;
- if (unlikely(!xsk_is_bound(xs)))
- return -ENXIO;
- if (unlikely(!(xs->dev->flags & IFF_UP)))
- return -ENETDOWN;
+ err = xsk_check_common(xs);
+ if (err)
+ return err;
if (unlikely(!xs->rx))
return -ENOBUFS;
if (unlikely(need_wait))
@@ -713,21 +722,20 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
sock_poll_wait(file, sock, wait);
rcu_read_lock();
- if (unlikely(!xsk_is_bound(xs))) {
- rcu_read_unlock();
- return mask;
- }
+ if (xsk_check_common(xs))
+ goto skip_tx;
pool = xs->pool;
if (pool->cached_need_wakeup) {
if (xs->zc)
xsk_wakeup(xs, pool->cached_need_wakeup);
- else
+ else if (xs->tx)
/* Poll needs to drive Tx also in copy mode */
- xsk_xmit(sk);
+ xsk_generic_xmit(sk);
}
+skip_tx:
if (xs->rx && !xskq_prod_is_empty(xs->rx))
mask |= EPOLLIN | EPOLLRDNORM;
if (xs->tx && xsk_tx_writeable(xs))
@@ -845,7 +853,6 @@ static int xsk_release(struct socket *sock)
sock_orphan(sk);
sock->sk = NULL;
- sk_refcnt_debug_release(sk);
sock_put(sk);
return 0;
@@ -1295,8 +1302,6 @@ static int xsk_mmap(struct file *file, struct socket *sock,
unsigned long size = vma->vm_end - vma->vm_start;
struct xdp_sock *xs = xdp_sk(sock->sk);
struct xsk_queue *q = NULL;
- unsigned long pfn;
- struct page *qpg;
if (READ_ONCE(xs->state) != XSK_READY)
return -EBUSY;
@@ -1319,13 +1324,10 @@ static int xsk_mmap(struct file *file, struct socket *sock,
/* Matches the smp_wmb() in xsk_init_queue */
smp_rmb();
- qpg = virt_to_head_page(q->ring);
- if (size > page_size(qpg))
+ if (size > q->ring_vmalloc_size)
return -EINVAL;
- pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
- return remap_pfn_range(vma, vma->vm_start, pfn,
- size, vma->vm_page_prot);
+ return remap_vmalloc_range(vma, q->ring, 0);
}
static int xsk_notifier(struct notifier_block *this,
@@ -1396,8 +1398,6 @@ static void xsk_destruct(struct sock *sk)
if (!xp_put_pool(xs->pool))
xdp_put_umem(xs->umem, !xs->pool);
-
- sk_refcnt_debug_dec(sk);
}
static int xsk_create(struct net *net, struct socket *sock, int protocol,
@@ -1427,7 +1427,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk->sk_family = PF_XDP;
sk->sk_destruct = xsk_destruct;
- sk_refcnt_debug_inc(sk);
sock_set_flag(sk, SOCK_RCU_FREE);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index ed6c71826d31..b2df1e0f8153 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -140,6 +140,10 @@ static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
}
}
+#define NETDEV_XDP_ACT_ZC (NETDEV_XDP_ACT_BASIC | \
+ NETDEV_XDP_ACT_REDIRECT | \
+ NETDEV_XDP_ACT_XSK_ZEROCOPY)
+
int xp_assign_dev(struct xsk_buff_pool *pool,
struct net_device *netdev, u16 queue_id, u16 flags)
{
@@ -178,8 +182,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
/* For copy-mode, we are done. */
return 0;
- if (!netdev->netdev_ops->ndo_bpf ||
- !netdev->netdev_ops->ndo_xsk_wakeup) {
+ if ((netdev->xdp_features & NETDEV_XDP_ACT_ZC) != NETDEV_XDP_ACT_ZC) {
err = -EOPNOTSUPP;
goto err_unreg_pool;
}
diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
index 6cf9586e5027..f8905400ee07 100644
--- a/net/xdp/xsk_queue.c
+++ b/net/xdp/xsk_queue.c
@@ -6,6 +6,7 @@
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/overflow.h>
+#include <linux/vmalloc.h>
#include <net/xdp_sock_drv.h>
#include "xsk_queue.h"
@@ -23,7 +24,6 @@ static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
{
struct xsk_queue *q;
- gfp_t gfp_flags;
size_t size;
q = kzalloc(sizeof(*q), GFP_KERNEL);
@@ -33,17 +33,16 @@ struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
q->nentries = nentries;
q->ring_mask = nentries - 1;
- gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
- __GFP_COMP | __GFP_NORETRY;
size = xskq_get_ring_size(q, umem_queue);
+ size = PAGE_ALIGN(size);
- q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
- get_order(size));
+ q->ring = vmalloc_user(size);
if (!q->ring) {
kfree(q);
return NULL;
}
+ q->ring_vmalloc_size = size;
return q;
}
@@ -52,6 +51,6 @@ void xskq_destroy(struct xsk_queue *q)
if (!q)
return;
- page_frag_free(q->ring);
+ vfree(q->ring);
kfree(q);
}
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index c6fb6b763658..bfb2a7e50c26 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -45,6 +45,7 @@ struct xsk_queue {
struct xdp_ring *ring;
u64 invalid_descs;
u64 queue_empty_descs;
+ size_t ring_vmalloc_size;
};
/* The structure of the shared state of the rings are a simple
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index 74a54295c164..872b80188e83 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -6,6 +6,7 @@
#include <net/espintcp.h>
#include <linux/skmsg.h>
#include <net/inet_common.h>
+#include <trace/events/sock.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6_stubs.h>
#endif
@@ -397,6 +398,8 @@ static void espintcp_data_ready(struct sock *sk)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
+ trace_sk_data_ready(sk);
+
strp_data_ready(&ctx->strp);
}
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 4aff76c6f12e..95f1436bf6a2 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -309,7 +309,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
else
xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
- err = dev->xfrmdev_ops->xdo_dev_state_add(x);
+ err = dev->xfrmdev_ops->xdo_dev_state_add(x, extack);
if (err) {
xso->dev = NULL;
xso->dir = 0;
@@ -326,7 +326,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
*/
WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
if (err != -EOPNOTSUPP || is_packet_offload) {
- NL_SET_ERR_MSG(extack, "Device failed to offload this state");
+ NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state");
return err;
}
}
@@ -383,14 +383,14 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
return -EINVAL;
}
- err = dev->xfrmdev_ops->xdo_dev_policy_add(xp);
+ err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack);
if (err) {
xdo->dev = NULL;
xdo->real_dev = NULL;
xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
xdo->dir = 0;
netdev_put(dev, &xdo->dev_tracker);
- NL_SET_ERR_MSG(extack, "Device failed to offload this policy");
+ NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this policy");
return err;
}
diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c
index 1ef2162cebcf..d74f3fd20f2b 100644
--- a/net/xfrm/xfrm_interface_bpf.c
+++ b/net/xfrm/xfrm_interface_bpf.c
@@ -39,8 +39,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* @to - Pointer to memory to which the metadata will be copied
* Cannot be NULL
*/
-__used noinline
-int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
+__bpf_kfunc int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct xfrm_md_info *info;
@@ -62,9 +61,7 @@ int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
* @from - Pointer to memory from which the metadata will be copied
* Cannot be NULL
*/
-__used noinline
-int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx,
- const struct bpf_xfrm_info *from)
+__bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bpf_xfrm_info *from)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct metadata_dst *md_dst;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 00afe831c71c..2ab3e09e2227 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1274,7 +1274,7 @@ found:
xso->real_dev = xdo->real_dev;
netdev_tracker_alloc(xso->dev, &xso->dev_tracker,
GFP_ATOMIC);
- error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x);
+ error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL);
if (error) {
xso->dir = 0;
netdev_put(xso->dev, &xso->dev_tracker);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 727da3c5879b..615f24ebc49c 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -125,34 +125,34 @@ always-y += sockex1_kern.o
always-y += sockex2_kern.o
always-y += sockex3_kern.o
always-y += tracex1_kern.o
-always-y += tracex2_kern.o
+always-y += tracex2.bpf.o
always-y += tracex3_kern.o
always-y += tracex4_kern.o
always-y += tracex5_kern.o
always-y += tracex6_kern.o
always-y += tracex7_kern.o
-always-y += sock_flags_kern.o
-always-y += test_probe_write_user_kern.o
-always-y += trace_output_kern.o
+always-y += sock_flags.bpf.o
+always-y += test_probe_write_user.bpf.o
+always-y += trace_output.bpf.o
always-y += tcbpf1_kern.o
always-y += tc_l2_redirect_kern.o
always-y += lathist_kern.o
always-y += offwaketime_kern.o
always-y += spintest_kern.o
-always-y += map_perf_test_kern.o
-always-y += test_overhead_tp_kern.o
-always-y += test_overhead_raw_tp_kern.o
-always-y += test_overhead_kprobe_kern.o
+always-y += map_perf_test.bpf.o
+always-y += test_overhead_tp.bpf.o
+always-y += test_overhead_raw_tp.bpf.o
+always-y += test_overhead_kprobe.bpf.o
always-y += parse_varlen.o parse_simple.o parse_ldabs.o
-always-y += test_cgrp2_tc_kern.o
+always-y += test_cgrp2_tc.bpf.o
always-y += xdp1_kern.o
always-y += xdp2_kern.o
-always-y += test_current_task_under_cgroup_kern.o
+always-y += test_current_task_under_cgroup.bpf.o
always-y += trace_event_kern.o
always-y += sampleip_kern.o
-always-y += lwt_len_hist_kern.o
+always-y += lwt_len_hist.bpf.o
always-y += xdp_tx_iptunnel_kern.o
-always-y += test_map_in_map_kern.o
+always-y += test_map_in_map.bpf.o
always-y += tcp_synrto_kern.o
always-y += tcp_rwnd_kern.o
always-y += tcp_bufs_kern.o
diff --git a/samples/bpf/gnu/stubs.h b/samples/bpf/gnu/stubs.h
new file mode 100644
index 000000000000..719225b16626
--- /dev/null
+++ b/samples/bpf/gnu/stubs.h
@@ -0,0 +1 @@
+/* dummy .h to trick /usr/include/features.h to work with 'clang -target bpf' */
diff --git a/samples/bpf/lwt_len_hist_kern.c b/samples/bpf/lwt_len_hist.bpf.c
index 1fa14c54963a..dbab80e813fe 100644
--- a/samples/bpf/lwt_len_hist_kern.c
+++ b/samples/bpf/lwt_len_hist.bpf.c
@@ -10,29 +10,16 @@
* General Public License for more details.
*/
-#include <uapi/linux/bpf.h>
-#include <uapi/linux/if_ether.h>
-#include <uapi/linux/ip.h>
-#include <uapi/linux/in.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
-struct bpf_elf_map {
- __u32 type;
- __u32 size_key;
- __u32 size_value;
- __u32 max_elem;
- __u32 flags;
- __u32 id;
- __u32 pinning;
-};
-
-struct bpf_elf_map SEC("maps") lwt_len_hist_map = {
- .type = BPF_MAP_TYPE_PERCPU_HASH,
- .size_key = sizeof(__u64),
- .size_value = sizeof(__u64),
- .pinning = 2,
- .max_elem = 1024,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __type(key, u64);
+ __type(value, u64);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+ __uint(max_entries, 1024);
+} lwt_len_hist_map SEC(".maps");
static unsigned int log2(unsigned int v)
{
diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh
index 0eda9754f50b..7078bfcc4f4d 100755
--- a/samples/bpf/lwt_len_hist.sh
+++ b/samples/bpf/lwt_len_hist.sh
@@ -4,7 +4,7 @@
NS1=lwt_ns1
VETH0=tst_lwt1a
VETH1=tst_lwt1b
-
+BPF_PROG=lwt_len_hist.bpf.o
TRACE_ROOT=/sys/kernel/debug/tracing
function cleanup {
@@ -30,7 +30,7 @@ ip netns exec $NS1 netserver
echo 1 > ${TRACE_ROOT}/tracing_on
cp /dev/null ${TRACE_ROOT}/trace
-ip route add 192.168.253.2/32 encap bpf out obj lwt_len_hist_kern.o section len_hist dev $VETH0
+ip route add 192.168.253.2/32 encap bpf out obj $BPF_PROG section len_hist dev $VETH0
netperf -H 192.168.253.2 -t TCP_STREAM
cat ${TRACE_ROOT}/trace | grep -v '^#'
./lwt_len_hist
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test.bpf.c
index 7342c5b2f278..3cdeba2afe12 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test.bpf.c
@@ -4,14 +4,12 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
+#include "vmlinux.h"
+#include <errno.h>
#include <linux/version.h>
-#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
-#include "trace_common.h"
#define MAX_ENTRIES 1000
#define MAX_NR_CPUS 1024
@@ -102,8 +100,8 @@ struct {
__uint(max_entries, MAX_ENTRIES);
} lru_hash_lookup_map SEC(".maps");
-SEC("kprobe/" SYSCALL(sys_getuid))
-int stress_hmap(struct pt_regs *ctx)
+SEC("ksyscall/getuid")
+int BPF_KSYSCALL(stress_hmap)
{
u32 key = bpf_get_current_pid_tgid();
long init_val = 1;
@@ -120,8 +118,8 @@ int stress_hmap(struct pt_regs *ctx)
return 0;
}
-SEC("kprobe/" SYSCALL(sys_geteuid))
-int stress_percpu_hmap(struct pt_regs *ctx)
+SEC("ksyscall/geteuid")
+int BPF_KSYSCALL(stress_percpu_hmap)
{
u32 key = bpf_get_current_pid_tgid();
long init_val = 1;
@@ -137,8 +135,8 @@ int stress_percpu_hmap(struct pt_regs *ctx)
return 0;
}
-SEC("kprobe/" SYSCALL(sys_getgid))
-int stress_hmap_alloc(struct pt_regs *ctx)
+SEC("ksyscall/getgid")
+int BPF_KSYSCALL(stress_hmap_alloc)
{
u32 key = bpf_get_current_pid_tgid();
long init_val = 1;
@@ -154,8 +152,8 @@ int stress_hmap_alloc(struct pt_regs *ctx)
return 0;
}
-SEC("kprobe/" SYSCALL(sys_getegid))
-int stress_percpu_hmap_alloc(struct pt_regs *ctx)
+SEC("ksyscall/getegid")
+int BPF_KSYSCALL(stress_percpu_hmap_alloc)
{
u32 key = bpf_get_current_pid_tgid();
long init_val = 1;
@@ -170,11 +168,10 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx)
}
return 0;
}
-
-SEC("kprobe/" SYSCALL(sys_connect))
-int stress_lru_hmap_alloc(struct pt_regs *ctx)
+SEC("ksyscall/connect")
+int BPF_KSYSCALL(stress_lru_hmap_alloc, int fd, struct sockaddr_in *uservaddr,
+ int addrlen)
{
- struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
union {
u16 dst6[8];
@@ -187,14 +184,11 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx)
u32 key;
};
} test_params;
- struct sockaddr_in6 *in6;
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)uservaddr;
u16 test_case;
- int addrlen, ret;
long val = 1;
u32 key = 0;
-
- in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
- addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
+ int ret;
if (addrlen != sizeof(*in6))
return 0;
@@ -251,8 +245,8 @@ done:
return 0;
}
-SEC("kprobe/" SYSCALL(sys_gettid))
-int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
+SEC("ksyscall/gettid")
+int BPF_KSYSCALL(stress_lpm_trie_map_alloc)
{
union {
u32 b32[2];
@@ -273,8 +267,8 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
return 0;
}
-SEC("kprobe/" SYSCALL(sys_getpgid))
-int stress_hash_map_lookup(struct pt_regs *ctx)
+SEC("ksyscall/getpgid")
+int BPF_KSYSCALL(stress_hash_map_lookup)
{
u32 key = 1, i;
long *value;
@@ -286,8 +280,8 @@ int stress_hash_map_lookup(struct pt_regs *ctx)
return 0;
}
-SEC("kprobe/" SYSCALL(sys_getppid))
-int stress_array_map_lookup(struct pt_regs *ctx)
+SEC("ksyscall/getppid")
+int BPF_KSYSCALL(stress_array_map_lookup)
{
u32 key = 1, i;
long *value;
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 1bb53f4b29e1..d2fbcf963cdf 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -443,7 +443,7 @@ int main(int argc, char **argv)
if (argc > 4)
max_cnt = atoi(argv[4]);
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/net_shared.h b/samples/bpf/net_shared.h
new file mode 100644
index 000000000000..e9429af9aa44
--- /dev/null
+++ b/samples/bpf/net_shared.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _NET_SHARED_H
+#define _NET_SHARED_H
+
+#define AF_INET 2
+#define AF_INET6 10
+
+#define ETH_ALEN 6
+#define ETH_P_802_3_MIN 0x0600
+#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+#define ETH_P_ARP 0x0806
+#define IPPROTO_ICMPV6 58
+
+#define TC_ACT_OK 0
+#define TC_ACT_SHOT 2
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define bpf_ntohs(x) __builtin_bswap16(x)
+#define bpf_htons(x) __builtin_bswap16(x)
+#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define bpf_ntohs(x) (x)
+#define bpf_htons(x) (x)
+#else
+# error "Endianness detection needs to be set up for your compiler?!"
+#endif
+
+#endif
diff --git a/samples/bpf/sock_flags_kern.c b/samples/bpf/sock_flags.bpf.c
index 6d0ac7569d6f..0da749f6a9e1 100644
--- a/samples/bpf/sock_flags_kern.c
+++ b/samples/bpf/sock_flags.bpf.c
@@ -1,11 +1,9 @@
-#include <uapi/linux/bpf.h>
-#include <linux/socket.h>
-#include <linux/net.h>
-#include <uapi/linux/in.h>
-#include <uapi/linux/in6.h>
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include "net_shared.h"
#include <bpf/bpf_helpers.h>
-SEC("cgroup/sock1")
+SEC("cgroup/sock")
int bpf_prog1(struct bpf_sock *sk)
{
char fmt[] = "socket: family %d type %d protocol %d\n";
@@ -17,29 +15,29 @@ int bpf_prog1(struct bpf_sock *sk)
bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol);
bpf_trace_printk(fmt2, sizeof(fmt2), uid, gid);
- /* block PF_INET6, SOCK_RAW, IPPROTO_ICMPV6 sockets
+ /* block AF_INET6, SOCK_DGRAM, IPPROTO_ICMPV6 sockets
* ie., make ping6 fail
*/
- if (sk->family == PF_INET6 &&
- sk->type == SOCK_RAW &&
+ if (sk->family == AF_INET6 &&
+ sk->type == SOCK_DGRAM &&
sk->protocol == IPPROTO_ICMPV6)
return 0;
return 1;
}
-SEC("cgroup/sock2")
+SEC("cgroup/sock")
int bpf_prog2(struct bpf_sock *sk)
{
char fmt[] = "socket: family %d type %d protocol %d\n";
bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol);
- /* block PF_INET, SOCK_RAW, IPPROTO_ICMP sockets
+ /* block AF_INET, SOCK_DGRAM, IPPROTO_ICMP sockets
* ie., make ping fail
*/
- if (sk->family == PF_INET &&
- sk->type == SOCK_RAW &&
+ if (sk->family == AF_INET &&
+ sk->type == SOCK_DGRAM &&
sk->protocol == IPPROTO_ICMP)
return 0;
diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c
index 50231c2eff9c..e7121dd1ee37 100644
--- a/samples/bpf/syscall_tp_kern.c
+++ b/samples/bpf/syscall_tp_kern.c
@@ -58,6 +58,13 @@ int trace_enter_open_at(struct syscalls_enter_open_args *ctx)
return 0;
}
+SEC("tracepoint/syscalls/sys_enter_openat2")
+int trace_enter_open_at2(struct syscalls_enter_open_args *ctx)
+{
+ count(&enter_open_map);
+ return 0;
+}
+
SEC("tracepoint/syscalls/sys_exit_open")
int trace_enter_exit(struct syscalls_exit_open_args *ctx)
{
@@ -71,3 +78,10 @@ int trace_enter_exit_at(struct syscalls_exit_open_args *ctx)
count(&exit_open_map);
return 0;
}
+
+SEC("tracepoint/syscalls/sys_exit_openat2")
+int trace_enter_exit_at2(struct syscalls_exit_open_args *ctx)
+{
+ count(&exit_open_map);
+ return 0;
+}
diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh
index 37d95ef3c20f..a28a8fc99dbe 100755
--- a/samples/bpf/tc_l2_redirect.sh
+++ b/samples/bpf/tc_l2_redirect.sh
@@ -8,6 +8,7 @@ REDIRECT_USER='./tc_l2_redirect'
REDIRECT_BPF='./tc_l2_redirect_kern.o'
RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter)
+IPV6_DISABLED=$(< /proc/sys/net/ipv6/conf/all/disable_ipv6)
IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding)
function config_common {
@@ -64,6 +65,7 @@ function config_common {
sysctl -q -w net.ipv4.conf.all.rp_filter=0
sysctl -q -w net.ipv6.conf.all.forwarding=1
+ sysctl -q -w net.ipv6.conf.all.disable_ipv6=0
}
function cleanup {
@@ -77,6 +79,7 @@ function cleanup {
$IP link del ip6t >& /dev/null
sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER
sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING
+ sysctl -q -w net.ipv6.conf.all.disable_ipv6=$IPV6_DISABLED
rm -f /sys/fs/bpf/tc/globals/tun_iface
[[ -z $DEBUG ]] || set -x
set -e
diff --git a/samples/bpf/test_cgrp2_sock.sh b/samples/bpf/test_cgrp2_sock.sh
index 9f6174236856..36bd7cb46f06 100755
--- a/samples/bpf/test_cgrp2_sock.sh
+++ b/samples/bpf/test_cgrp2_sock.sh
@@ -3,6 +3,8 @@
# Test various socket options that can be set by attaching programs to cgroups.
+MY_DIR=$(dirname $0)
+TEST=$MY_DIR/test_cgrp2_sock
CGRP_MNT="/tmp/cgroupv2-test_cgrp2_sock"
################################################################################
@@ -19,7 +21,7 @@ print_result()
check_sock()
{
- out=$(test_cgrp2_sock)
+ out=$($TEST)
echo $out | grep -q "$1"
if [ $? -ne 0 ]; then
print_result 1 "IPv4: $2"
@@ -33,7 +35,7 @@ check_sock()
check_sock6()
{
- out=$(test_cgrp2_sock -6)
+ out=$($TEST -6)
echo $out | grep -q "$1"
if [ $? -ne 0 ]; then
print_result 1 "IPv6: $2"
@@ -61,7 +63,7 @@ cleanup_and_exit()
[ -n "$msg" ] && echo "ERROR: $msg"
- test_cgrp2_sock -d ${CGRP_MNT}/sockopts
+ $TEST -d ${CGRP_MNT}/sockopts
ip li del cgrp2_sock
umount ${CGRP_MNT}
@@ -98,7 +100,7 @@ check_sock6 "dev , mark 0, priority 0" "No programs attached"
# verify device is set
#
-test_cgrp2_sock -b cgrp2_sock ${CGRP_MNT}/sockopts
+$TEST -b cgrp2_sock ${CGRP_MNT}/sockopts
if [ $? -ne 0 ]; then
cleanup_and_exit 1 "Failed to install program to set device"
fi
@@ -107,7 +109,7 @@ check_sock6 "dev cgrp2_sock, mark 0, priority 0" "Device set"
# verify mark is set
#
-test_cgrp2_sock -m 666 ${CGRP_MNT}/sockopts
+$TEST -m 666 ${CGRP_MNT}/sockopts
if [ $? -ne 0 ]; then
cleanup_and_exit 1 "Failed to install program to set mark"
fi
@@ -116,7 +118,7 @@ check_sock6 "dev , mark 666, priority 0" "Mark set"
# verify priority is set
#
-test_cgrp2_sock -p 123 ${CGRP_MNT}/sockopts
+$TEST -p 123 ${CGRP_MNT}/sockopts
if [ $? -ne 0 ]; then
cleanup_and_exit 1 "Failed to install program to set priority"
fi
@@ -125,7 +127,7 @@ check_sock6 "dev , mark 0, priority 123" "Priority set"
# all 3 at once
#
-test_cgrp2_sock -b cgrp2_sock -m 666 -p 123 ${CGRP_MNT}/sockopts
+$TEST -b cgrp2_sock -m 666 -p 123 ${CGRP_MNT}/sockopts
if [ $? -ne 0 ]; then
cleanup_and_exit 1 "Failed to install program to set device, mark and priority"
fi
diff --git a/samples/bpf/test_cgrp2_sock2.sh b/samples/bpf/test_cgrp2_sock2.sh
index 6a3dbe642b2b..82acff93d739 100755
--- a/samples/bpf/test_cgrp2_sock2.sh
+++ b/samples/bpf/test_cgrp2_sock2.sh
@@ -2,18 +2,23 @@
# SPDX-License-Identifier: GPL-2.0
BPFFS=/sys/fs/bpf
+MY_DIR=$(dirname $0)
+TEST=$MY_DIR/test_cgrp2_sock2
LINK_PIN=$BPFFS/test_cgrp2_sock2
+BPF_PROG=$MY_DIR/sock_flags.bpf.o
function config_device {
ip netns add at_ns0
ip link add veth0 type veth peer name veth0b
- ip link set veth0b up
ip link set veth0 netns at_ns0
+ ip netns exec at_ns0 sysctl -q net.ipv6.conf.veth0.disable_ipv6=0
ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
ip netns exec at_ns0 ip addr add 2401:db00::1/64 dev veth0 nodad
ip netns exec at_ns0 ip link set dev veth0 up
+ sysctl -q net.ipv6.conf.veth0b.disable_ipv6=0
ip addr add 172.16.1.101/24 dev veth0b
ip addr add 2401:db00::2/64 dev veth0b nodad
+ ip link set veth0b up
}
function config_cgroup {
@@ -34,7 +39,7 @@ function config_bpffs {
}
function attach_bpf {
- ./test_cgrp2_sock2 /tmp/cgroupv2/foo sock_flags_kern.o $1
+ $TEST /tmp/cgroupv2/foo $BPF_PROG $1
[ $? -ne 0 ] && exit 1
}
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc.bpf.c
index 4dd532a312b9..c7d2291d676f 100644
--- a/samples/bpf/test_cgrp2_tc_kern.c
+++ b/samples/bpf/test_cgrp2_tc.bpf.c
@@ -5,11 +5,8 @@
* License as published by the Free Software Foundation.
*/
#define KBUILD_MODNAME "foo"
-#include <uapi/linux/if_ether.h>
-#include <uapi/linux/in6.h>
-#include <uapi/linux/ipv6.h>
-#include <uapi/linux/pkt_cls.h>
-#include <uapi/linux/bpf.h>
+#include "vmlinux.h"
+#include "net_shared.h"
#include <bpf/bpf_helpers.h>
/* copy of 'struct ethhdr' without __packed */
@@ -19,24 +16,13 @@ struct eth_hdr {
unsigned short h_proto;
};
-#define PIN_GLOBAL_NS 2
-struct bpf_elf_map {
- __u32 type;
- __u32 size_key;
- __u32 size_value;
- __u32 max_elem;
- __u32 flags;
- __u32 id;
- __u32 pinning;
-};
-
-struct bpf_elf_map SEC("maps") test_cgrp2_array_pin = {
- .type = BPF_MAP_TYPE_CGROUP_ARRAY,
- .size_key = sizeof(uint32_t),
- .size_value = sizeof(uint32_t),
- .pinning = PIN_GLOBAL_NS,
- .max_elem = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+ __uint(max_entries, 1);
+} test_cgrp2_array_pin SEC(".maps");
SEC("filter")
int handle_egress(struct __sk_buff *skb)
@@ -53,7 +39,7 @@ int handle_egress(struct __sk_buff *skb)
if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
return TC_ACT_OK;
- if (eth->h_proto != htons(ETH_P_IPV6) ||
+ if (eth->h_proto != bpf_htons(ETH_P_IPV6) ||
ip6h->nexthdr != IPPROTO_ICMPV6) {
bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg),
eth->h_proto, ip6h->nexthdr);
diff --git a/samples/bpf/test_cgrp2_tc.sh b/samples/bpf/test_cgrp2_tc.sh
index 395573be6ae8..38e8dbc9d16e 100755
--- a/samples/bpf/test_cgrp2_tc.sh
+++ b/samples/bpf/test_cgrp2_tc.sh
@@ -4,7 +4,7 @@
MY_DIR=$(dirname $0)
# Details on the bpf prog
BPF_CGRP2_ARRAY_NAME='test_cgrp2_array_pin'
-BPF_PROG="$MY_DIR/test_cgrp2_tc_kern.o"
+BPF_PROG="$MY_DIR/test_cgrp2_tc.bpf.o"
BPF_SECTION='filter'
[ -z "$TC" ] && TC='tc'
@@ -73,11 +73,13 @@ setup_net() {
start)
$IP link add $HOST_IFC type veth peer name $NS_IFC || return $?
$IP link set dev $HOST_IFC up || return $?
+ sysctl -q net.ipv6.conf.$HOST_IFC.disable_ipv6=0
sysctl -q net.ipv6.conf.$HOST_IFC.accept_dad=0
- $IP netns add ns || return $?
- $IP link set dev $NS_IFC netns ns || return $?
+ $IP netns add $NS || return $?
+ $IP link set dev $NS_IFC netns $NS || return $?
$IP -n $NS link set dev $NS_IFC up || return $?
+ $IP netns exec $NS sysctl -q net.ipv6.conf.$NS_IFC.disable_ipv6=0
$IP netns exec $NS sysctl -q net.ipv6.conf.$NS_IFC.accept_dad=0
$TC qdisc add dev $HOST_IFC clsact || return $?
$TC filter add dev $HOST_IFC egress bpf da obj $BPF_PROG sec $BPF_SECTION || return $?
diff --git a/samples/bpf/test_current_task_under_cgroup_kern.c b/samples/bpf/test_current_task_under_cgroup.bpf.c
index fbd43e2bb4d3..58b9cf7ed659 100644
--- a/samples/bpf/test_current_task_under_cgroup_kern.c
+++ b/samples/bpf/test_current_task_under_cgroup.bpf.c
@@ -5,12 +5,11 @@
* License as published by the Free Software Foundation.
*/
-#include <linux/ptrace.h>
-#include <uapi/linux/bpf.h>
+#include "vmlinux.h"
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
-#include <uapi/linux/utsname.h>
-#include "trace_common.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
@@ -27,8 +26,8 @@ struct {
} perf_map SEC(".maps");
/* Writes the last PID that called sync to a map at index 0 */
-SEC("kprobe/" SYSCALL(sys_sync))
-int bpf_prog1(struct pt_regs *ctx)
+SEC("ksyscall/sync")
+int BPF_KSYSCALL(bpf_prog1)
{
u64 pid = bpf_get_current_pid_tgid();
int idx = 0;
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
index ac251a417f45..9726ed2a8a8b 100644
--- a/samples/bpf/test_current_task_under_cgroup_user.c
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -14,14 +14,14 @@
int main(int argc, char **argv)
{
pid_t remote_pid, local_pid = getpid();
+ int cg2 = -1, idx = 0, rc = 1;
struct bpf_link *link = NULL;
struct bpf_program *prog;
- int cg2, idx = 0, rc = 1;
struct bpf_object *obj;
char filename[256];
int map_fd[2];
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
@@ -103,7 +103,9 @@ int main(int argc, char **argv)
rc = 0;
err:
- close(cg2);
+ if (cg2 != -1)
+ close(cg2);
+
cleanup_cgroup_environment();
cleanup:
diff --git a/samples/bpf/test_lru_dist.c b/samples/bpf/test_lru_dist.c
index 5efb91763d65..1c161276d57b 100644
--- a/samples/bpf/test_lru_dist.c
+++ b/samples/bpf/test_lru_dist.c
@@ -42,11 +42,6 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
list->prev = list;
}
-static inline int list_empty(const struct list_head *head)
-{
- return head->next == head;
-}
-
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
diff --git a/samples/bpf/test_lwt_bpf.c b/samples/bpf/test_lwt_bpf.c
index 1b568575ad11..9a13dbb81847 100644
--- a/samples/bpf/test_lwt_bpf.c
+++ b/samples/bpf/test_lwt_bpf.c
@@ -10,16 +10,8 @@
* General Public License for more details.
*/
-#include <stdint.h>
-#include <stddef.h>
-#include <linux/bpf.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/icmpv6.h>
-#include <linux/if_ether.h>
+#include "vmlinux.h"
+#include "net_shared.h"
#include <bpf/bpf_helpers.h>
#include <string.h>
@@ -44,9 +36,9 @@ SEC("test_ctx")
int do_test_ctx(struct __sk_buff *skb)
{
skb->cb[0] = CB_MAGIC;
- printk("len %d hash %d protocol %d\n", skb->len, skb->hash,
+ printk("len %d hash %d protocol %d", skb->len, skb->hash,
skb->protocol);
- printk("cb %d ingress_ifindex %d ifindex %d\n", skb->cb[0],
+ printk("cb %d ingress_ifindex %d ifindex %d", skb->cb[0],
skb->ingress_ifindex, skb->ifindex);
return BPF_OK;
@@ -56,9 +48,9 @@ int do_test_ctx(struct __sk_buff *skb)
SEC("test_cb")
int do_test_cb(struct __sk_buff *skb)
{
- printk("cb0: %x cb1: %x cb2: %x\n", skb->cb[0], skb->cb[1],
+ printk("cb0: %x cb1: %x cb2: %x", skb->cb[0], skb->cb[1],
skb->cb[2]);
- printk("cb3: %x cb4: %x\n", skb->cb[3], skb->cb[4]);
+ printk("cb3: %x cb4: %x", skb->cb[3], skb->cb[4]);
return BPF_OK;
}
@@ -72,11 +64,11 @@ int do_test_data(struct __sk_buff *skb)
struct iphdr *iph = data;
if (data + sizeof(*iph) > data_end) {
- printk("packet truncated\n");
+ printk("packet truncated");
return BPF_DROP;
}
- printk("src: %x dst: %x\n", iph->saddr, iph->daddr);
+ printk("src: %x dst: %x", iph->saddr, iph->daddr);
return BPF_OK;
}
@@ -97,7 +89,7 @@ static inline int rewrite(struct __sk_buff *skb, uint32_t old_ip,
ret = bpf_skb_load_bytes(skb, IP_PROTO_OFF, &proto, 1);
if (ret < 0) {
- printk("bpf_l4_csum_replace failed: %d\n", ret);
+ printk("bpf_l4_csum_replace failed: %d", ret);
return BPF_DROP;
}
@@ -120,14 +112,14 @@ static inline int rewrite(struct __sk_buff *skb, uint32_t old_ip,
ret = bpf_l4_csum_replace(skb, off, old_ip, new_ip,
flags | sizeof(new_ip));
if (ret < 0) {
- printk("bpf_l4_csum_replace failed: %d\n");
+ printk("bpf_l4_csum_replace failed: %d");
return BPF_DROP;
}
}
ret = bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
if (ret < 0) {
- printk("bpf_l3_csum_replace failed: %d\n", ret);
+ printk("bpf_l3_csum_replace failed: %d", ret);
return BPF_DROP;
}
@@ -137,7 +129,7 @@ static inline int rewrite(struct __sk_buff *skb, uint32_t old_ip,
ret = bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
if (ret < 0) {
- printk("bpf_skb_store_bytes() failed: %d\n", ret);
+ printk("bpf_skb_store_bytes() failed: %d", ret);
return BPF_DROP;
}
@@ -153,12 +145,12 @@ int do_test_rewrite(struct __sk_buff *skb)
ret = bpf_skb_load_bytes(skb, IP_DST_OFF, &old_ip, 4);
if (ret < 0) {
- printk("bpf_skb_load_bytes failed: %d\n", ret);
+ printk("bpf_skb_load_bytes failed: %d", ret);
return BPF_DROP;
}
if (old_ip == 0x2fea8c0) {
- printk("out: rewriting from %x to %x\n", old_ip, new_ip);
+ printk("out: rewriting from %x to %x", old_ip, new_ip);
return rewrite(skb, old_ip, new_ip, 1);
}
@@ -173,16 +165,16 @@ static inline int __do_push_ll_and_redirect(struct __sk_buff *skb)
ret = bpf_skb_change_head(skb, 14, 0);
if (ret < 0) {
- printk("skb_change_head() failed: %d\n", ret);
+ printk("skb_change_head() failed: %d", ret);
}
- ehdr.h_proto = __constant_htons(ETH_P_IP);
+ ehdr.h_proto = bpf_htons(ETH_P_IP);
memcpy(&ehdr.h_source, &smac, 6);
memcpy(&ehdr.h_dest, &dmac, 6);
ret = bpf_skb_store_bytes(skb, 0, &ehdr, sizeof(ehdr), 0);
if (ret < 0) {
- printk("skb_store_bytes() failed: %d\n", ret);
+ printk("skb_store_bytes() failed: %d", ret);
return BPF_DROP;
}
@@ -202,7 +194,7 @@ int do_push_ll_and_redirect(struct __sk_buff *skb)
ret = __do_push_ll_and_redirect(skb);
if (ret >= 0)
- printk("redirected to %d\n", ifindex);
+ printk("redirected to %d", ifindex);
return ret;
}
@@ -229,7 +221,7 @@ SEC("fill_garbage")
int do_fill_garbage(struct __sk_buff *skb)
{
__fill_garbage(skb);
- printk("Set initial 96 bytes of header to FF\n");
+ printk("Set initial 96 bytes of header to FF");
return BPF_OK;
}
@@ -238,7 +230,7 @@ int do_fill_garbage_and_redirect(struct __sk_buff *skb)
{
int ifindex = DST_IFINDEX;
__fill_garbage(skb);
- printk("redirected to %d\n", ifindex);
+ printk("redirected to %d", ifindex);
return bpf_redirect(ifindex, 0);
}
@@ -246,7 +238,7 @@ int do_fill_garbage_and_redirect(struct __sk_buff *skb)
SEC("drop_all")
int do_drop_all(struct __sk_buff *skb)
{
- printk("dropping with: %d\n", BPF_DROP);
+ printk("dropping with: %d", BPF_DROP);
return BPF_DROP;
}
diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh
index 65a976058dd3..2e9f5126963b 100755
--- a/samples/bpf/test_lwt_bpf.sh
+++ b/samples/bpf/test_lwt_bpf.sh
@@ -19,7 +19,10 @@ IPVETH3="192.168.111.2"
IP_LOCAL="192.168.99.1"
+PROG_SRC="test_lwt_bpf.c"
+BPF_PROG="test_lwt_bpf.o"
TRACE_ROOT=/sys/kernel/debug/tracing
+CONTEXT_INFO=$(cat ${TRACE_ROOT}/trace_options | grep context)
function lookup_mac()
{
@@ -36,7 +39,7 @@ function lookup_mac()
function cleanup {
set +ex
- rm test_lwt_bpf.o 2> /dev/null
+ rm $BPF_PROG 2> /dev/null
ip link del $VETH0 2> /dev/null
ip link del $VETH1 2> /dev/null
ip link del $VETH2 2> /dev/null
@@ -76,7 +79,7 @@ function install_test {
cleanup_routes
cp /dev/null ${TRACE_ROOT}/trace
- OPTS="encap bpf headroom 14 $1 obj test_lwt_bpf.o section $2 $VERBOSE"
+ OPTS="encap bpf headroom 14 $1 obj $BPF_PROG section $2 $VERBOSE"
if [ "$1" == "in" ]; then
ip route add table local local ${IP_LOCAL}/32 $OPTS dev lo
@@ -96,7 +99,7 @@ function remove_prog {
function filter_trace {
# Add newline to allow starting EXPECT= variables on newline
NL=$'\n'
- echo "${NL}$*" | sed -e 's/^.*: : //g'
+ echo "${NL}$*" | sed -e 's/bpf_trace_printk: //g'
}
function expect_fail {
@@ -160,11 +163,11 @@ function test_ctx_out {
failure "test_ctx out: packets are dropped"
}
match_trace "$(get_trace)" "
-len 84 hash 0 protocol 0
+len 84 hash 0 protocol 8
cb 1234 ingress_ifindex 0 ifindex 0
-len 84 hash 0 protocol 0
+len 84 hash 0 protocol 8
cb 1234 ingress_ifindex 0 ifindex 0
-len 84 hash 0 protocol 0
+len 84 hash 0 protocol 8
cb 1234 ingress_ifindex 0 ifindex 0" || exit 1
remove_prog out
}
@@ -367,6 +370,7 @@ setup_one_veth $NS1 $VETH0 $VETH1 $IPVETH0 $IPVETH1 $IPVETH1b
setup_one_veth $NS2 $VETH2 $VETH3 $IPVETH2 $IPVETH3
ip netns exec $NS1 netserver
echo 1 > ${TRACE_ROOT}/tracing_on
+echo nocontext-info > ${TRACE_ROOT}/trace_options
DST_MAC=$(lookup_mac $VETH1 $NS1)
SRC_MAC=$(lookup_mac $VETH0)
@@ -374,7 +378,7 @@ DST_IFINDEX=$(cat /sys/class/net/$VETH0/ifindex)
CLANG_OPTS="-O2 -target bpf -I ../include/"
CLANG_OPTS+=" -DSRC_MAC=$SRC_MAC -DDST_MAC=$DST_MAC -DDST_IFINDEX=$DST_IFINDEX"
-clang $CLANG_OPTS -c test_lwt_bpf.c -o test_lwt_bpf.o
+clang $CLANG_OPTS -c $PROG_SRC -o $BPF_PROG
test_ctx_xmit
test_ctx_out
@@ -397,4 +401,5 @@ test_netperf_redirect
cleanup
echo 0 > ${TRACE_ROOT}/tracing_on
+echo $CONTEXT_INFO > ${TRACE_ROOT}/trace_options
exit 0
diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map.bpf.c
index b0200c8eac09..1883559e5977 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map.bpf.c
@@ -6,17 +6,17 @@
* License as published by the Free Software Foundation.
*/
#define KBUILD_MODNAME "foo"
-#include <linux/ptrace.h>
+#include "vmlinux.h"
#include <linux/version.h>
-#include <uapi/linux/bpf.h>
-#include <uapi/linux/in6.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
-#include "trace_common.h"
#define MAX_NR_PORTS 65536
+#define EINVAL 22
+#define ENOENT 2
+
/* map #0 */
struct inner_a {
__uint(type, BPF_MAP_TYPE_ARRAY);
diff --git a/samples/bpf/test_map_in_map_user.c b/samples/bpf/test_map_in_map_user.c
index 652ec720533d..55dca43f3723 100644
--- a/samples/bpf/test_map_in_map_user.c
+++ b/samples/bpf/test_map_in_map_user.c
@@ -38,7 +38,7 @@ static void check_map_id(int inner_map_fd, int map_in_map_fd, uint32_t key)
uint32_t info_len = sizeof(info);
int ret, id;
- ret = bpf_obj_get_info_by_fd(inner_map_fd, &info, &info_len);
+ ret = bpf_map_get_info_by_fd(inner_map_fd, &info, &info_len);
assert(!ret);
ret = bpf_map_lookup_elem(map_in_map_fd, &key, &id);
@@ -120,7 +120,7 @@ int main(int argc, char **argv)
struct bpf_object *obj;
char filename[256];
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/test_overhead_kprobe_kern.c b/samples/bpf/test_overhead_kprobe.bpf.c
index 8fdd2c9c56b2..c3528731e0e1 100644
--- a/samples/bpf/test_overhead_kprobe_kern.c
+++ b/samples/bpf/test_overhead_kprobe.bpf.c
@@ -4,10 +4,8 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#include "vmlinux.h"
#include <linux/version.h>
-#include <linux/ptrace.h>
-#include <linux/sched.h>
-#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -39,7 +37,7 @@ int prog(struct pt_regs *ctx)
return 0;
}
-SEC("kprobe/urandom_read")
+SEC("kprobe/fib_table_lookup")
int prog2(struct pt_regs *ctx)
{
return 0;
diff --git a/samples/bpf/test_overhead_raw_tp_kern.c b/samples/bpf/test_overhead_raw_tp.bpf.c
index 8763181a32f3..6af39fe3f8dd 100644
--- a/samples/bpf/test_overhead_raw_tp_kern.c
+++ b/samples/bpf/test_overhead_raw_tp.bpf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook */
-#include <uapi/linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
SEC("raw_tracepoint/task_rename")
@@ -9,7 +9,7 @@ int prog(struct bpf_raw_tracepoint_args *ctx)
return 0;
}
-SEC("raw_tracepoint/urandom_read")
+SEC("raw_tracepoint/fib_table_lookup")
int prog2(struct bpf_raw_tracepoint_args *ctx)
{
return 0;
diff --git a/samples/bpf/test_overhead_tp_kern.c b/samples/bpf/test_overhead_tp.bpf.c
index 80edadacb692..67cab3881969 100644
--- a/samples/bpf/test_overhead_tp_kern.c
+++ b/samples/bpf/test_overhead_tp.bpf.c
@@ -4,8 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <linux/sched.h>
-#include <uapi/linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
/* from /sys/kernel/debug/tracing/events/task/task_rename/format */
@@ -22,15 +21,27 @@ int prog(struct task_rename *ctx)
return 0;
}
-/* from /sys/kernel/debug/tracing/events/random/urandom_read/format */
-struct urandom_read {
+/* from /sys/kernel/debug/tracing/events/fib/fib_table_lookup/format */
+struct fib_table_lookup {
__u64 pad;
- int got_bits;
- int pool_left;
- int input_left;
+ __u32 tb_id;
+ int err;
+ int oif;
+ int iif;
+ __u8 proto;
+ __u8 tos;
+ __u8 scope;
+ __u8 flags;
+ __u8 src[4];
+ __u8 dst[4];
+ __u8 gw4[4];
+ __u8 gw6[16];
+ __u16 sport;
+ __u16 dport;
+ char name[16];
};
-SEC("tracepoint/random/urandom_read")
-int prog2(struct urandom_read *ctx)
+SEC("tracepoint/fib/fib_table_lookup")
+int prog2(struct fib_table_lookup *ctx)
{
return 0;
}
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
index 88717f8ec6ac..dbd86f7b1473 100644
--- a/samples/bpf/test_overhead_user.c
+++ b/samples/bpf/test_overhead_user.c
@@ -11,6 +11,8 @@
#include <unistd.h>
#include <assert.h>
#include <sys/wait.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
#include <stdlib.h>
#include <signal.h>
#include <linux/bpf.h>
@@ -20,6 +22,8 @@
#include <bpf/libbpf.h>
#define MAX_CNT 1000000
+#define DUMMY_IP "127.0.0.1"
+#define DUMMY_PORT 80
static struct bpf_link *links[2];
static struct bpf_object *obj;
@@ -35,8 +39,8 @@ static __u64 time_get_ns(void)
static void test_task_rename(int cpu)
{
- __u64 start_time;
char buf[] = "test\n";
+ __u64 start_time;
int i, fd;
fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
@@ -57,26 +61,32 @@ static void test_task_rename(int cpu)
close(fd);
}
-static void test_urandom_read(int cpu)
+static void test_fib_table_lookup(int cpu)
{
+ struct sockaddr_in addr;
+ char buf[] = "test\n";
__u64 start_time;
- char buf[4];
int i, fd;
- fd = open("/dev/urandom", O_RDONLY);
+ fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if (fd < 0) {
- printf("couldn't open /dev/urandom\n");
+ printf("couldn't open socket\n");
exit(1);
}
+ memset((char *)&addr, 0, sizeof(addr));
+ addr.sin_addr.s_addr = inet_addr(DUMMY_IP);
+ addr.sin_port = htons(DUMMY_PORT);
+ addr.sin_family = AF_INET;
start_time = time_get_ns();
for (i = 0; i < MAX_CNT; i++) {
- if (read(fd, buf, sizeof(buf)) < 0) {
- printf("failed to read from /dev/urandom: %s\n", strerror(errno));
+ if (sendto(fd, buf, strlen(buf), 0,
+ (struct sockaddr *)&addr, sizeof(addr)) < 0) {
+ printf("failed to start ping: %s\n", strerror(errno));
close(fd);
return;
}
}
- printf("urandom_read:%d: %lld events per sec\n",
+ printf("fib_table_lookup:%d: %lld events per sec\n",
cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
close(fd);
}
@@ -92,7 +102,7 @@ static void loop(int cpu, int flags)
if (flags & 1)
test_task_rename(cpu);
if (flags & 2)
- test_urandom_read(cpu);
+ test_fib_table_lookup(cpu);
}
static void run_perf_test(int tasks, int flags)
@@ -179,7 +189,7 @@ int main(int argc, char **argv)
if (test_flags & 0xC) {
snprintf(filename, sizeof(filename),
- "%s_kprobe_kern.o", argv[0]);
+ "%s_kprobe.bpf.o", argv[0]);
printf("w/KPROBE\n");
err = load_progs(filename);
@@ -191,7 +201,7 @@ int main(int argc, char **argv)
if (test_flags & 0x30) {
snprintf(filename, sizeof(filename),
- "%s_tp_kern.o", argv[0]);
+ "%s_tp.bpf.o", argv[0]);
printf("w/TRACEPOINT\n");
err = load_progs(filename);
if (!err)
@@ -202,7 +212,7 @@ int main(int argc, char **argv)
if (test_flags & 0xC0) {
snprintf(filename, sizeof(filename),
- "%s_raw_tp_kern.o", argv[0]);
+ "%s_raw_tp.bpf.o", argv[0]);
printf("w/RAW_TRACEPOINT\n");
err = load_progs(filename);
if (!err)
diff --git a/samples/bpf/test_probe_write_user_kern.c b/samples/bpf/test_probe_write_user.bpf.c
index 220a96438d75..a4f3798b7fb0 100644
--- a/samples/bpf/test_probe_write_user_kern.c
+++ b/samples/bpf/test_probe_write_user.bpf.c
@@ -4,14 +4,12 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <uapi/linux/bpf.h>
+#include "vmlinux.h"
+#include <string.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
-#include "trace_common.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
@@ -28,25 +26,23 @@ struct {
* This example sits on a syscall, and the syscall ABI is relatively stable
* of course, across platforms, and over time, the ABI may change.
*/
-SEC("kprobe/" SYSCALL(sys_connect))
-int bpf_prog1(struct pt_regs *ctx)
+SEC("ksyscall/connect")
+int BPF_KSYSCALL(bpf_prog1, int fd, struct sockaddr_in *uservaddr,
+ int addrlen)
{
- struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
- void *sockaddr_arg = (void *)PT_REGS_PARM2_CORE(real_regs);
- int sockaddr_len = (int)PT_REGS_PARM3_CORE(real_regs);
struct sockaddr_in new_addr, orig_addr = {};
struct sockaddr_in *mapped_addr;
- if (sockaddr_len > sizeof(orig_addr))
+ if (addrlen > sizeof(orig_addr))
return 0;
- if (bpf_probe_read_user(&orig_addr, sizeof(orig_addr), sockaddr_arg) != 0)
+ if (bpf_probe_read_user(&orig_addr, sizeof(orig_addr), uservaddr) != 0)
return 0;
mapped_addr = bpf_map_lookup_elem(&dnat_map, &orig_addr);
if (mapped_addr != NULL) {
memcpy(&new_addr, mapped_addr, sizeof(new_addr));
- bpf_probe_write_user(sockaddr_arg, &new_addr,
+ bpf_probe_write_user(uservaddr, &new_addr,
sizeof(new_addr));
}
return 0;
diff --git a/samples/bpf/test_probe_write_user_user.c b/samples/bpf/test_probe_write_user_user.c
index 00ccfb834e45..2a539aec4116 100644
--- a/samples/bpf/test_probe_write_user_user.c
+++ b/samples/bpf/test_probe_write_user_user.c
@@ -24,7 +24,7 @@ int main(int ac, char **argv)
mapped_addr_in = (struct sockaddr_in *)&mapped_addr;
tmp_addr_in = (struct sockaddr_in *)&tmp_addr;
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/trace_common.h b/samples/bpf/trace_common.h
deleted file mode 100644
index 8cb5400aed1f..000000000000
--- a/samples/bpf/trace_common.h
+++ /dev/null
@@ -1,13 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef __TRACE_COMMON_H
-#define __TRACE_COMMON_H
-
-#ifdef __x86_64__
-#define SYSCALL(SYS) "__x64_" __stringify(SYS)
-#elif defined(__s390x__)
-#define SYSCALL(SYS) "__s390x_" __stringify(SYS)
-#else
-#define SYSCALL(SYS) __stringify(SYS)
-#endif
-
-#endif
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output.bpf.c
index b64815af0943..565a73b51b04 100644
--- a/samples/bpf/trace_output_kern.c
+++ b/samples/bpf/trace_output.bpf.c
@@ -1,8 +1,6 @@
-#include <linux/ptrace.h>
+#include "vmlinux.h"
#include <linux/version.h>
-#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "trace_common.h"
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
@@ -11,7 +9,7 @@ struct {
__uint(max_entries, 2);
} my_map SEC(".maps");
-SEC("kprobe/" SYSCALL(sys_write))
+SEC("ksyscall/write")
int bpf_prog1(struct pt_regs *ctx)
{
struct S {
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
index 371732f9cf8e..d316fd2c8e24 100644
--- a/samples/bpf/trace_output_user.c
+++ b/samples/bpf/trace_output_user.c
@@ -51,7 +51,7 @@ int main(int argc, char **argv)
char filename[256];
FILE *f;
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2.bpf.c
index 93e0b7680b4f..0a5c75b367be 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2.bpf.c
@@ -4,13 +4,11 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
+#include "vmlinux.h"
#include <linux/version.h>
-#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "trace_common.h"
+#include <bpf/bpf_core_read.h>
struct {
__uint(type, BPF_MAP_TYPE_HASH);
@@ -78,15 +76,14 @@ struct {
__uint(max_entries, 1024);
} my_hist_map SEC(".maps");
-SEC("kprobe/" SYSCALL(sys_write))
-int bpf_prog3(struct pt_regs *ctx)
+SEC("ksyscall/write")
+int BPF_KSYSCALL(bpf_prog3, unsigned int fd, const char *buf, size_t count)
{
- long write_size = PT_REGS_PARM3(ctx);
long init_val = 1;
long *value;
struct hist_key key;
- key.index = log2l(write_size);
+ key.index = log2l(count);
key.pid_tgid = bpf_get_current_pid_tgid();
key.uid_gid = bpf_get_current_uid_gid();
bpf_get_current_comm(&key.comm, sizeof(key.comm));
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
index 089e408abd7a..2131f1648cf1 100644
--- a/samples/bpf/tracex2_user.c
+++ b/samples/bpf/tracex2_user.c
@@ -123,7 +123,7 @@ int main(int ac, char **argv)
int i, j = 0;
FILE *f;
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/tracex4_user.c b/samples/bpf/tracex4_user.c
index 227b05a0bc88..dee8f0a091ba 100644
--- a/samples/bpf/tracex4_user.c
+++ b/samples/bpf/tracex4_user.c
@@ -51,7 +51,7 @@ int main(int ac, char **argv)
struct bpf_program *prog;
struct bpf_object *obj;
char filename[256];
- int map_fd, i, j = 0;
+ int map_fd, j = 0;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
@@ -82,7 +82,7 @@ int main(int ac, char **argv)
j++;
}
- for (i = 0; ; i++) {
+ while (1) {
print_old_objects(map_fd);
sleep(1);
}
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
index 281dc964de8d..f05e797013e9 100644
--- a/samples/bpf/xdp1_user.c
+++ b/samples/bpf/xdp1_user.c
@@ -153,7 +153,7 @@ int main(int argc, char **argv)
return 1;
}
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err) {
printf("can't get prog info - %s\n", strerror(errno));
return err;
diff --git a/samples/bpf/xdp_adjust_tail_user.c b/samples/bpf/xdp_adjust_tail_user.c
index 167646077c8f..e9426bd65420 100644
--- a/samples/bpf/xdp_adjust_tail_user.c
+++ b/samples/bpf/xdp_adjust_tail_user.c
@@ -184,7 +184,7 @@ int main(int argc, char **argv)
return 1;
}
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err) {
printf("can't get prog info - %s\n", strerror(errno));
return 1;
diff --git a/samples/bpf/xdp_fwd_user.c b/samples/bpf/xdp_fwd_user.c
index 84f57f1209ce..193b3b79b31f 100644
--- a/samples/bpf/xdp_fwd_user.c
+++ b/samples/bpf/xdp_fwd_user.c
@@ -76,9 +76,9 @@ static int do_detach(int ifindex, const char *ifname, const char *app_name)
return prog_fd;
}
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
if (err) {
- printf("ERROR: bpf_obj_get_info_by_fd failed (%s)\n",
+ printf("ERROR: bpf_prog_get_info_by_fd failed (%s)\n",
strerror(errno));
goto close_out;
}
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index a12381c37d2b..e1458405e2ba 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -494,9 +494,9 @@ int main(int argc, char **argv)
goto end_cpu;
}
- ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.cpu_map), &info, &infosz);
+ ret = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.cpu_map), &info, &infosz);
if (ret < 0) {
- fprintf(stderr, "Failed bpf_obj_get_info_by_fd for cpumap: %s\n",
+ fprintf(stderr, "Failed bpf_map_get_info_by_fd for cpumap: %s\n",
strerror(errno));
goto end_cpu;
}
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
index 08f5331d2b00..b95e0ef61f06 100644
--- a/samples/bpf/xdp_rxq_info_user.c
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -602,7 +602,7 @@ int main(int argc, char **argv)
return EXIT_FAIL_XDP;
}
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err) {
printf("can't get prog info - %s\n", strerror(errno));
return err;
diff --git a/samples/bpf/xdp_sample.bpf.h b/samples/bpf/xdp_sample.bpf.h
index 25b1dbe9b37b..fecc41c5df04 100644
--- a/samples/bpf/xdp_sample.bpf.h
+++ b/samples/bpf/xdp_sample.bpf.h
@@ -7,17 +7,9 @@
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
+#include "net_shared.h"
#include "xdp_sample_shared.h"
-#define ETH_ALEN 6
-#define ETH_P_802_3_MIN 0x0600
-#define ETH_P_8021Q 0x8100
-#define ETH_P_8021AD 0x88A8
-#define ETH_P_IP 0x0800
-#define ETH_P_IPV6 0x86DD
-#define ETH_P_ARP 0x0806
-#define IPPROTO_ICMPV6 58
-
#define EINVAL 22
#define ENETDOWN 100
#define EMSGSIZE 90
@@ -55,18 +47,6 @@ static __always_inline void swap_src_dst_mac(void *data)
p[5] = dst[2];
}
-#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
- __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define bpf_ntohs(x) __builtin_bswap16(x)
-#define bpf_htons(x) __builtin_bswap16(x)
-#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
- __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define bpf_ntohs(x) (x)
-#define bpf_htons(x) (x)
-#else
-# error "Endianness detection needs to be set up for your compiler?!"
-#endif
-
/*
* Note: including linux/compiler.h or linux/kernel.h for the macros below
* conflicts with vmlinux.h include in BPF files, so we define them here.
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
index 7df7163239ac..e39d7f654f30 100644
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -35,7 +35,7 @@ static int do_attach(int idx, int fd, const char *name)
return err;
}
- err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
if (err) {
printf("can't get prog info - %s\n", strerror(errno));
return err;
diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c
index 307baef6861a..7e4b2f7108a6 100644
--- a/samples/bpf/xdp_tx_iptunnel_user.c
+++ b/samples/bpf/xdp_tx_iptunnel_user.c
@@ -295,7 +295,7 @@ int main(int argc, char **argv)
return 1;
}
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err) {
printf("can't get prog info - %s\n", strerror(errno));
return err;
diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
index e8d90829f23e..38d51e05c7a2 100755
--- a/scripts/bpf_doc.py
+++ b/scripts/bpf_doc.py
@@ -271,7 +271,7 @@ class HeaderParser(object):
if capture:
fn_defines_str += self.line
helper_name = capture.expand(r'bpf_\1')
- self.helper_enum_vals[helper_name] = int(capture[2])
+ self.helper_enum_vals[helper_name] = int(capture.group(2))
self.helper_enum_pos[helper_name] = i
i += 1
else:
diff --git a/scripts/pahole-flags.sh b/scripts/pahole-flags.sh
index 0d99ef17e4a5..1f1f1d397c39 100755
--- a/scripts/pahole-flags.sh
+++ b/scripts/pahole-flags.sh
@@ -19,5 +19,9 @@ fi
if [ "${pahole_ver}" -ge "122" ]; then
extra_paholeopt="${extra_paholeopt} -j"
fi
+if [ "${pahole_ver}" -ge "124" ]; then
+ # see PAHOLE_HAS_LANG_EXCLUDE
+ extra_paholeopt="${extra_paholeopt} --lang_exclude=rust"
+fi
echo ${extra_paholeopt}
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index f610e184ce02..681fbcc5ed50 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -53,7 +53,7 @@ $(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_
$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR:/=) prefix= \
- ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) AR=$(HOSTAR) $@ install_headers
+ ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" $@ install_headers
$(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR)
$(call QUIET_INSTALL, $@)
@@ -215,7 +215,8 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
-I$(or $(OUTPUT),.) \
-I$(srctree)/tools/include/uapi/ \
-I$(LIBBPF_BOOTSTRAP_INCLUDE) \
- -g -O2 -Wall -target bpf -c $< -o $@
+ -g -O2 -Wall -fno-stack-protector \
+ -target bpf -c $< -o $@
$(Q)$(LLVM_STRIP) -g $@
$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
@@ -293,3 +294,6 @@ FORCE:
.PHONY: all FORCE bootstrap clean install-bin install uninstall
.PHONY: doc doc-clean doc-install doc-uninstall
.DEFAULT_GOAL := all
+
+# Delete partially updated (corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index 352290ba7b29..91fcb75babe3 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -537,7 +537,7 @@ static bool btf_is_kernel_module(__u32 btf_id)
len = sizeof(btf_info);
btf_info.name = ptr_to_u64(btf_name);
btf_info.name_len = sizeof(btf_name);
- err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+ err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
close(btf_fd);
if (err) {
p_err("can't get BTF (ID %u) object info: %s", btf_id, strerror(errno));
@@ -606,7 +606,7 @@ static int do_dump(int argc, char **argv)
if (fd < 0)
return -1;
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
goto done;
@@ -789,7 +789,10 @@ build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
}
memset(info, 0, *len);
- err = bpf_obj_get_info_by_fd(fd, info, len);
+ if (type == BPF_OBJ_PROG)
+ err = bpf_prog_get_info_by_fd(fd, info, len);
+ else
+ err = bpf_map_get_info_by_fd(fd, info, len);
close(fd);
if (err) {
p_err("can't get %s info: %s", names[type],
@@ -931,7 +934,7 @@ show_btf(int fd, struct hashmap *btf_prog_table,
int err;
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get BTF object info: %s", strerror(errno));
return -1;
@@ -943,7 +946,7 @@ show_btf(int fd, struct hashmap *btf_prog_table,
info.name = ptr_to_u64(name);
len = sizeof(info);
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get BTF object info: %s", strerror(errno));
return -1;
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index eda71fdfe95a..e7f6ec3a8f35 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -57,7 +57,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
if (prog_fd < 0)
goto print;
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err)
goto print;
@@ -70,7 +70,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
info.func_info_rec_size = finfo_rec_size;
info.func_info = ptr_to_u64(&finfo);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err)
goto print;
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index b46a998d8f8d..ac846b0805b4 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -82,7 +82,7 @@ static void guess_vmlinux_btf_id(__u32 attach_btf_obj_id)
if (fd < 0)
return;
- err = bpf_obj_get_info_by_fd(fd, &btf_info, &btf_len);
+ err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_len);
if (err)
goto out;
@@ -108,7 +108,7 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
if (prog_fd < 0)
return -1;
- if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+ if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
close(prog_fd);
return -1;
}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 620032042576..5a73ccf14332 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -353,7 +353,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
info.func_info_rec_size = sizeof(finfo);
info.func_info = ptr_to_u64(&finfo);
- if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len))
+ if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
goto copy_name;
prog_btf = btf__load_from_kernel_by_id(info.btf_id);
@@ -488,7 +488,7 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
goto out_close;
memset(&pinned_info, 0, sizeof(pinned_info));
- if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len))
+ if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
goto out_close;
path = strdup(fpath);
@@ -756,7 +756,7 @@ static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
goto err_close_fds;
}
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info (%u): %s",
id, strerror(errno));
@@ -916,7 +916,7 @@ static int map_fd_by_name(char *name, int **fds)
goto err_close_fds;
}
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_map_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get map info (%u): %s",
id, strerror(errno));
@@ -1026,7 +1026,8 @@ exit_free:
return fd;
}
-int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
+int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
+ __u32 *info_len)
{
int err;
int fd;
@@ -1035,7 +1036,7 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
if (fd < 0)
return -1;
- err = bpf_obj_get_info_by_fd(fd, info, info_len);
+ err = bpf_map_get_info_by_fd(fd, info, info_len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 36cf0f1517c9..da16e6a27ccc 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -486,16 +486,16 @@ static void probe_kernel_image_config(const char *define_prefix)
}
}
-end_parse:
- if (file)
- gzclose(file);
-
for (i = 0; i < ARRAY_SIZE(options); i++) {
if (define_prefix && !options[i].macro_dump)
continue;
print_kernel_option(options[i].name, values[i], define_prefix);
free(values[i]);
}
+
+end_parse:
+ if (file)
+ gzclose(file);
}
static bool probe_bpf_syscall(const char *define_prefix)
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index 6f4cfe01cad4..f985b79cca27 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -145,7 +145,7 @@ static int get_prog_info(int prog_id, struct bpf_prog_info *info)
return prog_fd;
memset(info, 0, sizeof(*info));
- err = bpf_obj_get_info_by_fd(prog_fd, info, &len);
+ err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
if (err)
p_err("can't get prog info: %s", strerror(errno));
close(prog_fd);
@@ -327,7 +327,7 @@ static int do_show_link(int fd)
memset(&info, 0, sizeof(info));
again:
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get link info: %s",
strerror(errno));
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index a84224b6a604..0ef373cef4c7 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -168,7 +168,8 @@ int prog_parse_fd(int *argc, char ***argv);
int prog_parse_fds(int *argc, char ***argv, int **fds);
int map_parse_fd(int *argc, char ***argv);
int map_parse_fds(int *argc, char ***argv, int **fds);
-int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
+int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
+ __u32 *info_len);
struct bpf_prog_linfo;
#if defined(HAVE_LLVM_SUPPORT) || defined(HAVE_LIBBFD_SUPPORT)
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 88911d3aa2d9..aaeb8939e137 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -638,7 +638,7 @@ static int do_show_subset(int argc, char **argv)
if (json_output && nb_fds > 1)
jsonw_start_array(json_wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
- err = bpf_obj_get_info_by_fd(fds[i], &info, &len);
+ err = bpf_map_get_info_by_fd(fds[i], &info, &len);
if (err) {
p_err("can't get map info: %s",
strerror(errno));
@@ -708,7 +708,7 @@ static int do_show(int argc, char **argv)
break;
}
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_map_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
@@ -764,7 +764,7 @@ static int maps_have_btf(int *fds, int nb_fds)
int err, i;
for (i = 0; i < nb_fds; i++) {
- err = bpf_obj_get_info_by_fd(fds[i], &info, &len);
+ err = bpf_map_get_info_by_fd(fds[i], &info, &len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
return -1;
@@ -925,7 +925,7 @@ static int do_dump(int argc, char **argv)
if (wtr && nb_fds > 1)
jsonw_start_array(wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
- if (bpf_obj_get_info_by_fd(fds[i], &info, &len)) {
+ if (bpf_map_get_info_by_fd(fds[i], &info, &len)) {
p_err("can't get map info: %s", strerror(errno));
break;
}
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index cfc9fdc1e863..afbe3ec342c8 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -198,7 +198,7 @@ static void show_prog_maps(int fd, __u32 num_maps)
info.nr_map_ids = num_maps;
info.map_ids = ptr_to_u64(map_ids);
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err || !info.nr_map_ids)
return;
@@ -231,7 +231,7 @@ static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
memset(&prog_info, 0, sizeof(prog_info));
prog_info_len = sizeof(prog_info);
- ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
return NULL;
@@ -248,7 +248,7 @@ static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
prog_info.map_ids = ptr_to_u64(map_ids);
prog_info_len = sizeof(prog_info);
- ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
goto free_map_ids;
@@ -259,7 +259,7 @@ static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
memset(map_info, 0, sizeof(*map_info));
map_info_len = sizeof(*map_info);
- ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
+ ret = bpf_map_get_info_by_fd(map_fd, map_info, &map_info_len);
if (ret < 0) {
close(map_fd);
goto free_map_ids;
@@ -580,7 +580,7 @@ static int show_prog(int fd)
__u32 len = sizeof(info);
int err;
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
return -1;
@@ -949,7 +949,7 @@ static int do_dump(int argc, char **argv)
for (i = 0; i < nb_fds; i++) {
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
+ err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
break;
@@ -961,7 +961,7 @@ static int do_dump(int argc, char **argv)
break;
}
- err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
+ err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
break;
@@ -2170,9 +2170,9 @@ static char *profile_target_name(int tgt_fd)
char *name = NULL;
int err;
- err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
if (err) {
- p_err("failed to bpf_obj_get_info_by_fd for prog FD %d", tgt_fd);
+ p_err("failed to get info for prog FD %d", tgt_fd);
goto out;
}
@@ -2183,7 +2183,7 @@ static char *profile_target_name(int tgt_fd)
func_info_rec_size = info.func_info_rec_size;
if (info.nr_func_info == 0) {
- p_err("bpf_obj_get_info_by_fd for prog FD %d found 0 func_info", tgt_fd);
+ p_err("found 0 func_info for prog FD %d", tgt_fd);
goto out;
}
@@ -2192,7 +2192,7 @@ static char *profile_target_name(int tgt_fd)
info.func_info_rec_size = func_info_rec_size;
info.func_info = ptr_to_u64(&func_info);
- err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
if (err) {
p_err("failed to get func_info for prog FD %d", tgt_fd);
goto out;
@@ -2233,10 +2233,38 @@ static void profile_close_perf_events(struct profiler_bpf *obj)
profile_perf_event_cnt = 0;
}
+static int profile_open_perf_event(int mid, int cpu, int map_fd)
+{
+ int pmu_fd;
+
+ pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
+ -1 /*pid*/, cpu, -1 /*group_fd*/, 0);
+ if (pmu_fd < 0) {
+ if (errno == ENODEV) {
+ p_info("cpu %d may be offline, skip %s profiling.",
+ cpu, metrics[mid].name);
+ profile_perf_event_cnt++;
+ return 0;
+ }
+ return -1;
+ }
+
+ if (bpf_map_update_elem(map_fd,
+ &profile_perf_event_cnt,
+ &pmu_fd, BPF_ANY) ||
+ ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ close(pmu_fd);
+ return -1;
+ }
+
+ profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
+ return 0;
+}
+
static int profile_open_perf_events(struct profiler_bpf *obj)
{
unsigned int cpu, m;
- int map_fd, pmu_fd;
+ int map_fd;
profile_perf_events = calloc(
sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
@@ -2255,17 +2283,11 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
if (!metrics[m].selected)
continue;
for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
- pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
- -1/*pid*/, cpu, -1/*group_fd*/, 0);
- if (pmu_fd < 0 ||
- bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
- &pmu_fd, BPF_ANY) ||
- ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ if (profile_open_perf_event(m, cpu, map_fd)) {
p_err("failed to create event %s on cpu %d",
metrics[m].name, cpu);
return -1;
}
- profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
}
}
return 0;
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
index 903b80ff4e9a..b389f4830e11 100644
--- a/tools/bpf/bpftool/struct_ops.c
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -151,7 +151,7 @@ static int get_next_struct_ops_map(const char *name, int *res_fd,
return -1;
}
- err = bpf_obj_get_info_by_fd(fd, info, &info_len);
+ err = bpf_map_get_info_by_fd(fd, info, &info_len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
@@ -262,7 +262,7 @@ static struct res do_one_id(const char *id_str, work_func func, void *data,
goto done;
}
- if (bpf_obj_get_info_by_fd(fd, info, &info_len)) {
+ if (bpf_map_get_info_by_fd(fd, info, &info_len)) {
p_err("can't get map info: %s", strerror(errno));
res.nr_errs++;
goto done;
@@ -522,7 +522,7 @@ static int do_register(int argc, char **argv)
bpf_link__disconnect(link);
bpf_link__destroy(link);
- if (!bpf_obj_get_info_by_fd(bpf_map__fd(map), &info,
+ if (!bpf_map_get_info_by_fd(bpf_map__fd(map), &info,
&info_len))
p_info("Registered %s %s id %u",
get_kern_struct_ops_name(&info),
diff --git a/tools/bpf/resolve_btfids/Build b/tools/bpf/resolve_btfids/Build
index ae82da03f9bf..077de3829c72 100644
--- a/tools/bpf/resolve_btfids/Build
+++ b/tools/bpf/resolve_btfids/Build
@@ -1,3 +1,5 @@
+hostprogs := resolve_btfids
+
resolve_btfids-y += main.o
resolve_btfids-y += rbtree.o
resolve_btfids-y += zalloc.o
@@ -7,4 +9,4 @@ resolve_btfids-y += str_error_r.o
$(OUTPUT)%.o: ../../lib/%.c FORCE
$(call rule_mkdir)
- $(call if_changed_dep,cc_o_c)
+ $(call if_changed_dep,host_cc_o_c)
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index 19a3112e271a..ac548a7baa73 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -17,15 +17,15 @@ else
MAKEFLAGS=--no-print-directory
endif
-# always use the host compiler
-AR = $(HOSTAR)
-CC = $(HOSTCC)
-LD = $(HOSTLD)
-ARCH = $(HOSTARCH)
+# Overrides for the prepare step libraries.
+HOST_OVERRIDES := AR="$(HOSTAR)" CC="$(HOSTCC)" LD="$(HOSTLD)" ARCH="$(HOSTARCH)" \
+ CROSS_COMPILE="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
+
RM ?= rm
+HOSTCC ?= gcc
+HOSTLD ?= ld
+HOSTAR ?= ar
CROSS_COMPILE =
-CFLAGS := $(KBUILD_HOSTCFLAGS)
-LDFLAGS := $(KBUILD_HOSTLDFLAGS)
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
@@ -35,51 +35,64 @@ SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
BPFOBJ := $(OUTPUT)/libbpf/libbpf.a
LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/
SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a
+SUBCMD_OUT := $(abspath $(dir $(SUBCMDOBJ)))/
LIBBPF_DESTDIR := $(LIBBPF_OUT)
LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include
+SUBCMD_DESTDIR := $(SUBCMD_OUT)
+SUBCMD_INCLUDE := $(SUBCMD_DESTDIR)include
+
BINARY := $(OUTPUT)/resolve_btfids
BINARY_IN := $(BINARY)-in.o
all: $(BINARY)
+prepare: $(BPFOBJ) $(SUBCMDOBJ)
+
$(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT):
$(call msg,MKDIR,,$@)
$(Q)mkdir -p $(@)
$(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
- $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
+ $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(SUBCMD_OUT) \
+ DESTDIR=$(SUBCMD_DESTDIR) $(HOST_OVERRIDES) prefix= subdir= \
+ $(abspath $@) install_headers
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUT)
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) \
- DESTDIR=$(LIBBPF_DESTDIR) prefix= EXTRA_CFLAGS="$(CFLAGS)" \
+ DESTDIR=$(LIBBPF_DESTDIR) $(HOST_OVERRIDES) prefix= subdir= \
$(abspath $@) install_headers
-CFLAGS += -g \
+LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
+LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
+
+HOSTCFLAGS += -g \
-I$(srctree)/tools/include \
-I$(srctree)/tools/include/uapi \
-I$(LIBBPF_INCLUDE) \
- -I$(SUBCMD_SRC)
+ -I$(SUBCMD_INCLUDE) \
+ $(LIBELF_FLAGS)
-LIBS = -lelf -lz
+LIBS = $(LIBELF_LIBS) -lz
-export srctree OUTPUT CFLAGS Q
+export srctree OUTPUT HOSTCFLAGS Q HOSTCC HOSTLD HOSTAR
include $(srctree)/tools/build/Makefile.include
-$(BINARY_IN): $(BPFOBJ) fixdep FORCE | $(OUTPUT)
+$(BINARY_IN): fixdep FORCE prepare | $(OUTPUT)
$(Q)$(MAKE) $(build)=resolve_btfids
$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
$(call msg,LINK,$@)
- $(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
+ $(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
clean_objects := $(wildcard $(OUTPUT)/*.o \
$(OUTPUT)/.*.o.cmd \
$(OUTPUT)/.*.o.d \
$(LIBBPF_OUT) \
$(LIBBPF_DESTDIR) \
- $(OUTPUT)/libsubcmd \
+ $(SUBCMD_OUT) \
+ $(SUBCMD_DESTDIR) \
$(OUTPUT)/resolve_btfids)
ifneq ($(clean_objects),)
@@ -96,4 +109,4 @@ tags:
FORCE:
-.PHONY: all FORCE clean tags
+.PHONY: all FORCE clean tags prepare
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 80cd7843c677..77058174082d 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -75,7 +75,7 @@
#include <linux/err.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
-#include <parse-options.h>
+#include <subcmd/parse-options.h>
#define BTF_IDS_SECTION ".BTF_ids"
#define BTF_ID "__BTF_ID__"
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index 8b3d87b82b7a..47acf6936516 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -13,6 +13,8 @@ BPF_DESTDIR := $(BPFOBJ_OUTPUT)
BPF_INCLUDE := $(BPF_DESTDIR)/include
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS)
+CFLAGS += $(EXTRA_CFLAGS)
+LDFLAGS += $(EXTRA_LDFLAGS)
# Try to detect best kernel BTF source
KERNEL_REL := $(shell uname -r)
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
index d7dfeab0d71a..ff52668abf8c 100644
--- a/tools/include/uapi/asm/bpf_perf_event.h
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -6,6 +6,8 @@
#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
#elif defined(__riscv)
#include "../../arch/riscv/include/uapi/asm/bpf_perf_event.h"
+#elif defined(__loongarch__)
+#include "../../arch/loongarch/include/uapi/asm/bpf_perf_event.h"
#else
#include <uapi/asm-generic/bpf_perf_event.h>
#endif
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 464ca3f01fe7..62ce1f5d1b1d 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1156,6 +1156,11 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_HAS_FRAGS (1U << 5)
+/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded
+ * program becomes device-bound but can access XDP metadata.
+ */
+#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
+
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
@@ -2001,6 +2006,9 @@ union bpf_attr {
* sending the packet. This flag was added for GRE
* encapsulation, but might be used with other protocols
* as well in the future.
+ * **BPF_F_NO_TUNNEL_KEY**
+ * Add a flag to tunnel metadata indicating that no tunnel
+ * key should be set in the resulting tunnel header.
*
* Here is a typical usage on the transmit path:
*
@@ -2644,6 +2652,11 @@ union bpf_attr {
* Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
* L2 type as Ethernet.
*
+ * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**,
+ * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**:
+ * Indicate the new IP header version after decapsulating the outer
+ * IP header. Used when the inner and outer IP versions are different.
+ *
* A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
@@ -2788,7 +2801,7 @@ union bpf_attr {
*
* long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
- * For en eBPF program attached to a perf event, retrieve the
+ * For an eBPF program attached to a perf event, retrieve the
* value of the event counter associated to *ctx* and store it in
* the structure pointed by *buf* and of size *buf_size*. Enabled
* and running times are also stored in the structure (see
@@ -3121,6 +3134,11 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
+ * **BPF_FIB_LOOKUP_SKIP_NEIGH**
+ * Skip the neighbour table lookup. *params*->dmac
+ * and *params*->smac will not be set as output. A common
+ * use case is to call **bpf_redirect_neigh**\ () after
+ * doing **bpf_fib_lookup**\ ().
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@@ -5764,6 +5782,7 @@ enum {
BPF_F_ZERO_CSUM_TX = (1ULL << 1),
BPF_F_DONT_FRAGMENT = (1ULL << 2),
BPF_F_SEQ_NUMBER = (1ULL << 3),
+ BPF_F_NO_TUNNEL_KEY = (1ULL << 4),
};
/* BPF_FUNC_skb_get_tunnel_key flags. */
@@ -5803,6 +5822,8 @@ enum {
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8),
};
enum {
@@ -6734,6 +6755,7 @@ struct bpf_raw_tracepoint_args {
enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
};
enum {
@@ -6901,6 +6923,17 @@ struct bpf_list_node {
__u64 :64;
} __attribute__((aligned(8)));
+struct bpf_rb_root {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_rb_node {
+ __u64 :64;
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
new file mode 100644
index 000000000000..9ee459872600
--- /dev/null
+++ b/tools/include/uapi/linux/netdev.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NETDEV_H
+#define _UAPI_LINUX_NETDEV_H
+
+#define NETDEV_FAMILY_NAME "netdev"
+#define NETDEV_FAMILY_VERSION 1
+
+/**
+ * enum netdev_xdp_act
+ * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers
+ * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
+ * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
+ * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
+ * ndo_xdp_xmit callback.
+ * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP
+ * in zero copy mode.
+ * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw
+ * oflloading.
+ * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear
+ * XDP buffer support in the driver napi callback.
+ * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements
+ * non-linear XDP buffer support in ndo_xdp_xmit callback.
+ */
+enum netdev_xdp_act {
+ NETDEV_XDP_ACT_BASIC = 1,
+ NETDEV_XDP_ACT_REDIRECT = 2,
+ NETDEV_XDP_ACT_NDO_XMIT = 4,
+ NETDEV_XDP_ACT_XSK_ZEROCOPY = 8,
+ NETDEV_XDP_ACT_HW_OFFLOAD = 16,
+ NETDEV_XDP_ACT_RX_SG = 32,
+ NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+};
+
+enum {
+ NETDEV_A_DEV_IFINDEX = 1,
+ NETDEV_A_DEV_PAD,
+ NETDEV_A_DEV_XDP_FEATURES,
+
+ __NETDEV_A_DEV_MAX,
+ NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
+};
+
+enum {
+ NETDEV_CMD_DEV_GET = 1,
+ NETDEV_CMD_DEV_ADD_NTF,
+ NETDEV_CMD_DEV_DEL_NTF,
+ NETDEV_CMD_DEV_CHANGE_NTF,
+
+ __NETDEV_CMD_MAX,
+ NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
+};
+
+#define NETDEV_MCGRP_MGMT "mgmt"
+
+#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 9aff98f42a3d..e750b6f5fcc3 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -1044,6 +1044,26 @@ int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
return libbpf_err_errno(err);
}
+int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len)
+{
+ return bpf_obj_get_info_by_fd(prog_fd, info, info_len);
+}
+
+int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len)
+{
+ return bpf_obj_get_info_by_fd(map_fd, info, info_len);
+}
+
+int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len)
+{
+ return bpf_obj_get_info_by_fd(btf_fd, info, info_len);
+}
+
+int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len)
+{
+ return bpf_obj_get_info_by_fd(link_fd, info, info_len);
+}
+
int bpf_raw_tracepoint_open(const char *name, int prog_fd)
{
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 7468978d3c27..9ed9bceb4111 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -386,6 +386,15 @@ LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id,
const struct bpf_get_fd_by_id_opts *opts);
LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
+/* Type-safe variants of bpf_obj_get_info_by_fd(). The callers still needs to
+ * pass info_len, which should normally be
+ * sizeof(struct bpf_{prog,map,btf,link}_info), in order to be compatible with
+ * different libbpf and kernel versions.
+ */
+LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len);
+LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
+LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len);
+LIBBPF_API int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len);
struct bpf_prog_query_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index 496e6a8ee0dc..1ac57bb7ac55 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -364,7 +364,7 @@ enum bpf_enum_value_kind {
/* Non-CO-RE variant of BPF_CORE_READ_INTO() */
#define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \
- ___core_read(bpf_probe_read, bpf_probe_read, \
+ ___core_read(bpf_probe_read_kernel, bpf_probe_read_kernel, \
dst, (src), a, ##__VA_ARGS__) \
})
@@ -400,7 +400,7 @@ enum bpf_enum_value_kind {
/* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */
#define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \
- ___core_read(bpf_probe_read_str, bpf_probe_read, \
+ ___core_read(bpf_probe_read_kernel_str, bpf_probe_read_kernel, \
dst, (src), a, ##__VA_ARGS__) \
})
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index d37c4fe2849d..5ec1871acb2f 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -109,7 +109,7 @@
* This is a variable-specific variant of more global barrier().
*/
#ifndef barrier_var
-#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
+#define barrier_var(var) asm volatile("" : "+r"(var))
#endif
/*
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index 2972dc25ff72..6db88f41fa0d 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -32,6 +32,9 @@
#elif defined(__TARGET_ARCH_arc)
#define bpf_target_arc
#define bpf_target_defined
+#elif defined(__TARGET_ARCH_loongarch)
+ #define bpf_target_loongarch
+ #define bpf_target_defined
#else
/* Fall back to what the compiler says */
@@ -62,6 +65,9 @@
#elif defined(__arc__)
#define bpf_target_arc
#define bpf_target_defined
+#elif defined(__loongarch__)
+ #define bpf_target_loongarch
+ #define bpf_target_defined
#endif /* no compiler target */
#endif
@@ -72,6 +78,10 @@
#if defined(bpf_target_x86)
+/*
+ * https://en.wikipedia.org/wiki/X86_calling_conventions#System_V_AMD64_ABI
+ */
+
#if defined(__KERNEL__) || defined(__VMLINUX_H__)
#define __PT_PARM1_REG di
@@ -79,25 +89,40 @@
#define __PT_PARM3_REG dx
#define __PT_PARM4_REG cx
#define __PT_PARM5_REG r8
+#define __PT_PARM6_REG r9
+/*
+ * Syscall uses r10 for PARM4. See arch/x86/entry/entry_64.S:entry_SYSCALL_64
+ * comments in Linux sources. And refer to syscall(2) manpage.
+ */
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG r10
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
#define __PT_RET_REG sp
#define __PT_FP_REG bp
#define __PT_RC_REG ax
#define __PT_SP_REG sp
#define __PT_IP_REG ip
-/* syscall uses r10 for PARM4 */
-#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
-#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
#else
#ifdef __i386__
+/* i386 kernel is built with -mregparm=3 */
#define __PT_PARM1_REG eax
#define __PT_PARM2_REG edx
#define __PT_PARM3_REG ecx
-/* i386 kernel is built with -mregparm=3 */
-#define __PT_PARM4_REG __unsupported__
-#define __PT_PARM5_REG __unsupported__
+/* i386 syscall ABI is very different, refer to syscall(2) manpage */
+#define __PT_PARM1_SYSCALL_REG ebx
+#define __PT_PARM2_SYSCALL_REG ecx
+#define __PT_PARM3_SYSCALL_REG edx
+#define __PT_PARM4_SYSCALL_REG esi
+#define __PT_PARM5_SYSCALL_REG edi
+#define __PT_PARM6_SYSCALL_REG ebp
+
#define __PT_RET_REG esp
#define __PT_FP_REG ebp
#define __PT_RC_REG eax
@@ -111,14 +136,20 @@
#define __PT_PARM3_REG rdx
#define __PT_PARM4_REG rcx
#define __PT_PARM5_REG r8
+#define __PT_PARM6_REG r9
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG r10
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
#define __PT_RET_REG rsp
#define __PT_FP_REG rbp
#define __PT_RC_REG rax
#define __PT_SP_REG rsp
#define __PT_IP_REG rip
-/* syscall uses r10 for PARM4 */
-#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
-#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
#endif /* __i386__ */
@@ -126,6 +157,10 @@
#elif defined(bpf_target_s390)
+/*
+ * https://github.com/IBM/s390x-abi/releases/download/v1.6/lzsabi_s390x.pdf
+ */
+
struct pt_regs___s390 {
unsigned long orig_gpr2;
};
@@ -137,21 +172,41 @@ struct pt_regs___s390 {
#define __PT_PARM3_REG gprs[4]
#define __PT_PARM4_REG gprs[5]
#define __PT_PARM5_REG gprs[6]
-#define __PT_RET_REG grps[14]
+
+#define __PT_PARM1_SYSCALL_REG orig_gpr2
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG gprs[7]
+#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) \
+ BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
+
+#define __PT_RET_REG gprs[14]
#define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */
#define __PT_RC_REG gprs[2]
#define __PT_SP_REG gprs[15]
#define __PT_IP_REG psw.addr
-#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
-#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2)
#elif defined(bpf_target_arm)
+/*
+ * https://github.com/ARM-software/abi-aa/blob/main/aapcs32/aapcs32.rst#machine-registers
+ */
+
#define __PT_PARM1_REG uregs[0]
#define __PT_PARM2_REG uregs[1]
#define __PT_PARM3_REG uregs[2]
#define __PT_PARM4_REG uregs[3]
-#define __PT_PARM5_REG uregs[4]
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM6_SYSCALL_REG uregs[5]
+#define __PT_PARM7_SYSCALL_REG uregs[6]
+
#define __PT_RET_REG uregs[14]
#define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */
#define __PT_RC_REG uregs[0]
@@ -160,6 +215,10 @@ struct pt_regs___s390 {
#elif defined(bpf_target_arm64)
+/*
+ * https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#machine-registers
+ */
+
struct pt_regs___arm64 {
unsigned long orig_x0;
};
@@ -171,21 +230,49 @@ struct pt_regs___arm64 {
#define __PT_PARM3_REG regs[2]
#define __PT_PARM4_REG regs[3]
#define __PT_PARM5_REG regs[4]
+#define __PT_PARM6_REG regs[5]
+#define __PT_PARM7_REG regs[6]
+#define __PT_PARM8_REG regs[7]
+
+#define __PT_PARM1_SYSCALL_REG orig_x0
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) \
+ BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
+
#define __PT_RET_REG regs[30]
#define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */
#define __PT_RC_REG regs[0]
#define __PT_SP_REG sp
#define __PT_IP_REG pc
-#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
-#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0)
#elif defined(bpf_target_mips)
+/*
+ * N64 ABI is assumed right now.
+ * https://en.wikipedia.org/wiki/MIPS_architecture#Calling_conventions
+ */
+
#define __PT_PARM1_REG regs[4]
#define __PT_PARM2_REG regs[5]
#define __PT_PARM3_REG regs[6]
#define __PT_PARM4_REG regs[7]
#define __PT_PARM5_REG regs[8]
+#define __PT_PARM6_REG regs[9]
+#define __PT_PARM7_REG regs[10]
+#define __PT_PARM8_REG regs[11]
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG /* only N32/N64 */
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG /* only N32/N64 */
+
#define __PT_RET_REG regs[31]
#define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */
#define __PT_RC_REG regs[2]
@@ -194,26 +281,58 @@ struct pt_regs___arm64 {
#elif defined(bpf_target_powerpc)
+/*
+ * http://refspecs.linux-foundation.org/elf/elfspec_ppc.pdf (page 3-14,
+ * section "Function Calling Sequence")
+ */
+
#define __PT_PARM1_REG gpr[3]
#define __PT_PARM2_REG gpr[4]
#define __PT_PARM3_REG gpr[5]
#define __PT_PARM4_REG gpr[6]
#define __PT_PARM5_REG gpr[7]
+#define __PT_PARM6_REG gpr[8]
+#define __PT_PARM7_REG gpr[9]
+#define __PT_PARM8_REG gpr[10]
+
+/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG orig_gpr3
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+#if !defined(__arch64__)
+#define __PT_PARM7_SYSCALL_REG __PT_PARM7_REG /* only powerpc (not powerpc64) */
+#endif
+
#define __PT_RET_REG regs[31]
#define __PT_FP_REG __unsupported__
#define __PT_RC_REG gpr[3]
#define __PT_SP_REG sp
#define __PT_IP_REG nip
-/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
-#define PT_REGS_SYSCALL_REGS(ctx) ctx
#elif defined(bpf_target_sparc)
+/*
+ * https://en.wikipedia.org/wiki/Calling_convention#SPARC
+ */
+
#define __PT_PARM1_REG u_regs[UREG_I0]
#define __PT_PARM2_REG u_regs[UREG_I1]
#define __PT_PARM3_REG u_regs[UREG_I2]
#define __PT_PARM4_REG u_regs[UREG_I3]
#define __PT_PARM5_REG u_regs[UREG_I4]
+#define __PT_PARM6_REG u_regs[UREG_I5]
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
#define __PT_RET_REG u_regs[UREG_I7]
#define __PT_FP_REG __unsupported__
#define __PT_RC_REG u_regs[UREG_I0]
@@ -227,22 +346,42 @@ struct pt_regs___arm64 {
#elif defined(bpf_target_riscv)
+/*
+ * https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
+ */
+
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG a0
#define __PT_PARM2_REG a1
#define __PT_PARM3_REG a2
#define __PT_PARM4_REG a3
#define __PT_PARM5_REG a4
+#define __PT_PARM6_REG a5
+#define __PT_PARM7_REG a6
+#define __PT_PARM8_REG a7
+
+/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
#define __PT_RET_REG ra
#define __PT_FP_REG s0
#define __PT_RC_REG a0
#define __PT_SP_REG sp
#define __PT_IP_REG pc
-/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
-#define PT_REGS_SYSCALL_REGS(ctx) ctx
#elif defined(bpf_target_arc)
+/*
+ * Section "Function Calling Sequence" (page 24):
+ * https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf
+ */
+
/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG scratch.r0
@@ -250,13 +389,55 @@ struct pt_regs___arm64 {
#define __PT_PARM3_REG scratch.r2
#define __PT_PARM4_REG scratch.r3
#define __PT_PARM5_REG scratch.r4
+#define __PT_PARM6_REG scratch.r5
+#define __PT_PARM7_REG scratch.r6
+#define __PT_PARM8_REG scratch.r7
+
+/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
#define __PT_RET_REG scratch.blink
-#define __PT_FP_REG __unsupported__
+#define __PT_FP_REG scratch.fp
#define __PT_RC_REG scratch.r0
#define __PT_SP_REG scratch.sp
#define __PT_IP_REG scratch.ret
-/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+
+#elif defined(bpf_target_loongarch)
+
+/*
+ * https://docs.kernel.org/loongarch/introduction.html
+ * https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
+ */
+
+#define __PT_PARM1_REG regs[4]
+#define __PT_PARM2_REG regs[5]
+#define __PT_PARM3_REG regs[6]
+#define __PT_PARM4_REG regs[7]
+#define __PT_PARM5_REG regs[8]
+#define __PT_PARM6_REG regs[9]
+#define __PT_PARM7_REG regs[10]
+#define __PT_PARM8_REG regs[11]
+
+/* loongarch does not select ARCH_HAS_SYSCALL_WRAPPER. */
#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG regs[1]
+#define __PT_FP_REG regs[22]
+#define __PT_RC_REG regs[4]
+#define __PT_SP_REG regs[3]
+#define __PT_IP_REG csr_era
#endif
@@ -264,16 +445,49 @@ struct pt_regs___arm64 {
struct pt_regs;
-/* allow some architecutres to override `struct pt_regs` */
+/* allow some architectures to override `struct pt_regs` */
#ifndef __PT_REGS_CAST
#define __PT_REGS_CAST(x) (x)
#endif
+/*
+ * Different architectures support different number of arguments passed
+ * through registers. i386 supports just 3, some arches support up to 8.
+ */
+#ifndef __PT_PARM4_REG
+#define __PT_PARM4_REG __unsupported__
+#endif
+#ifndef __PT_PARM5_REG
+#define __PT_PARM5_REG __unsupported__
+#endif
+#ifndef __PT_PARM6_REG
+#define __PT_PARM6_REG __unsupported__
+#endif
+#ifndef __PT_PARM7_REG
+#define __PT_PARM7_REG __unsupported__
+#endif
+#ifndef __PT_PARM8_REG
+#define __PT_PARM8_REG __unsupported__
+#endif
+/*
+ * Similarly, syscall-specific conventions might differ between function call
+ * conventions within each architecutre. All supported architectures pass
+ * either 6 or 7 syscall arguments in registers.
+ *
+ * See syscall(2) manpage for succinct table with information on each arch.
+ */
+#ifndef __PT_PARM7_SYSCALL_REG
+#define __PT_PARM7_SYSCALL_REG __unsupported__
+#endif
+
#define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
#define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
#define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
#define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG)
#define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
+#define PT_REGS_PARM6(x) (__PT_REGS_CAST(x)->__PT_PARM6_REG)
+#define PT_REGS_PARM7(x) (__PT_REGS_CAST(x)->__PT_PARM7_REG)
+#define PT_REGS_PARM8(x) (__PT_REGS_CAST(x)->__PT_PARM8_REG)
#define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
#define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
#define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
@@ -285,6 +499,9 @@ struct pt_regs;
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG)
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG)
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG)
+#define PT_REGS_PARM6_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_REG)
+#define PT_REGS_PARM7_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_REG)
+#define PT_REGS_PARM8_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM8_REG)
#define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG)
#define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG)
#define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG)
@@ -311,24 +528,33 @@ struct pt_regs;
#endif
#ifndef PT_REGS_PARM1_SYSCALL
-#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x)
+#define PT_REGS_PARM1_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM1_SYSCALL_REG)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM2_SYSCALL
+#define PT_REGS_PARM2_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM2_SYSCALL_REG)
+#define PT_REGS_PARM2_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM3_SYSCALL
+#define PT_REGS_PARM3_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM3_SYSCALL_REG)
+#define PT_REGS_PARM3_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_SYSCALL_REG)
#endif
-#define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x)
-#define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x)
#ifndef PT_REGS_PARM4_SYSCALL
-#define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x)
+#define PT_REGS_PARM4_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM4_SYSCALL_REG)
+#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_SYSCALL_REG)
#endif
-#define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x)
-
-#ifndef PT_REGS_PARM1_CORE_SYSCALL
-#define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x)
+#ifndef PT_REGS_PARM5_SYSCALL
+#define PT_REGS_PARM5_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM5_SYSCALL_REG)
+#define PT_REGS_PARM5_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_SYSCALL_REG)
#endif
-#define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x)
-#define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x)
-#ifndef PT_REGS_PARM4_CORE_SYSCALL
-#define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x)
+#ifndef PT_REGS_PARM6_SYSCALL
+#define PT_REGS_PARM6_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM6_SYSCALL_REG)
+#define PT_REGS_PARM6_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM7_SYSCALL
+#define PT_REGS_PARM7_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM7_SYSCALL_REG)
+#define PT_REGS_PARM7_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_SYSCALL_REG)
#endif
-#define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x)
#else /* defined(bpf_target_defined) */
@@ -337,6 +563,9 @@ struct pt_regs;
#define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM8(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
@@ -348,6 +577,9 @@ struct pt_regs;
#define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM8_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
@@ -362,12 +594,16 @@ struct pt_regs;
#define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#endif /* defined(bpf_target_defined) */
@@ -553,6 +789,9 @@ struct pt_regs;
#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
+#define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (void *)PT_REGS_PARM6(ctx)
+#define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (void *)PT_REGS_PARM7(ctx)
+#define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (void *)PT_REGS_PARM8(ctx)
#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
/*
@@ -609,6 +848,8 @@ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
+#define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (void *)PT_REGS_PARM6_SYSCALL(regs)
+#define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (void *)PT_REGS_PARM7_SYSCALL(regs)
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
@@ -618,6 +859,8 @@ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (void *)PT_REGS_PARM6_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (void *)PT_REGS_PARM7_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
/*
@@ -667,4 +910,11 @@ ____##name(struct pt_regs *ctx, ##args)
#define BPF_KPROBE_SYSCALL BPF_KSYSCALL
+/* BPF_UPROBE and BPF_URETPROBE are identical to BPF_KPROBE and BPF_KRETPROBE,
+ * but are named way less confusingly for SEC("uprobe") and SEC("uretprobe")
+ * use cases.
+ */
+#define BPF_UPROBE(name, args...) BPF_KPROBE(name, ##args)
+#define BPF_URETPROBE(name, args...) BPF_KRETPROBE(name, ##args)
+
#endif
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 71e165b09ed5..9181d36118d2 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -688,8 +688,21 @@ int btf__align_of(const struct btf *btf, __u32 id)
if (align <= 0)
return libbpf_err(align);
max_align = max(max_align, align);
+
+ /* if field offset isn't aligned according to field
+ * type's alignment, then struct must be packed
+ */
+ if (btf_member_bitfield_size(t, i) == 0 &&
+ (m->offset % (8 * align)) != 0)
+ return 1;
}
+ /* if struct/union size isn't a multiple of its alignment,
+ * then struct must be packed
+ */
+ if ((t->size % max_align) != 0)
+ return 1;
+
return max_align;
}
default:
@@ -990,7 +1003,8 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
err = 0;
if (!btf_data) {
- err = -ENOENT;
+ pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
+ err = -ENODATA;
goto done;
}
btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
@@ -1336,9 +1350,9 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
void *ptr;
int err;
- /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
+ /* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
* let's start with a sane default - 4KiB here - and resize it only if
- * bpf_obj_get_info_by_fd() needs a bigger buffer.
+ * bpf_btf_get_info_by_fd() needs a bigger buffer.
*/
last_size = 4096;
ptr = malloc(last_size);
@@ -1348,7 +1362,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
memset(&btf_info, 0, sizeof(btf_info));
btf_info.btf = ptr_to_u64(ptr);
btf_info.btf_size = last_size;
- err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+ err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
if (!err && btf_info.btf_size > last_size) {
void *temp_ptr;
@@ -1366,7 +1380,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
btf_info.btf = ptr_to_u64(ptr);
btf_info.btf_size = last_size;
- err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+ err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
}
if (err || btf_info.btf_size > last_size) {
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index deb2bc9a0a7b..580985ee5545 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -13,6 +13,7 @@
#include <ctype.h>
#include <endian.h>
#include <errno.h>
+#include <limits.h>
#include <linux/err.h>
#include <linux/btf.h>
#include <linux/kernel.h>
@@ -833,14 +834,9 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
const struct btf_type *t)
{
const struct btf_member *m;
- int align, i, bit_sz;
+ int max_align = 1, align, i, bit_sz;
__u16 vlen;
- align = btf__align_of(btf, id);
- /* size of a non-packed struct has to be a multiple of its alignment*/
- if (align && t->size % align)
- return true;
-
m = btf_members(t);
vlen = btf_vlen(t);
/* all non-bitfield fields have to be naturally aligned */
@@ -849,8 +845,11 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
bit_sz = btf_member_bitfield_size(t, i);
if (align && bit_sz == 0 && m->offset % (8 * align) != 0)
return true;
+ max_align = max(align, max_align);
}
-
+ /* size of a non-packed struct has to be a multiple of its alignment */
+ if (t->size % max_align != 0)
+ return true;
/*
* if original struct was marked as packed, but its layout is
* naturally aligned, we'll detect that it's not packed
@@ -858,44 +857,97 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
return false;
}
-static int chip_away_bits(int total, int at_most)
-{
- return total % at_most ? : at_most;
-}
-
static void btf_dump_emit_bit_padding(const struct btf_dump *d,
- int cur_off, int m_off, int m_bit_sz,
- int align, int lvl)
+ int cur_off, int next_off, int next_align,
+ bool in_bitfield, int lvl)
{
- int off_diff = m_off - cur_off;
- int ptr_bits = d->ptr_sz * 8;
+ const struct {
+ const char *name;
+ int bits;
+ } pads[] = {
+ {"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
+ };
+ int new_off, pad_bits, bits, i;
+ const char *pad_type;
+
+ if (cur_off >= next_off)
+ return; /* no gap */
+
+ /* For filling out padding we want to take advantage of
+ * natural alignment rules to minimize unnecessary explicit
+ * padding. First, we find the largest type (among long, int,
+ * short, or char) that can be used to force naturally aligned
+ * boundary. Once determined, we'll use such type to fill in
+ * the remaining padding gap. In some cases we can rely on
+ * compiler filling some gaps, but sometimes we need to force
+ * alignment to close natural alignment with markers like
+ * `long: 0` (this is always the case for bitfields). Note
+ * that even if struct itself has, let's say 4-byte alignment
+ * (i.e., it only uses up to int-aligned types), using `long:
+ * X;` explicit padding doesn't actually change struct's
+ * overall alignment requirements, but compiler does take into
+ * account that type's (long, in this example) natural
+ * alignment requirements when adding implicit padding. We use
+ * this fact heavily and don't worry about ruining correct
+ * struct alignment requirement.
+ */
+ for (i = 0; i < ARRAY_SIZE(pads); i++) {
+ pad_bits = pads[i].bits;
+ pad_type = pads[i].name;
- if (off_diff <= 0)
- /* no gap */
- return;
- if (m_bit_sz == 0 && off_diff < align * 8)
- /* natural padding will take care of a gap */
- return;
+ new_off = roundup(cur_off, pad_bits);
+ if (new_off <= next_off)
+ break;
+ }
- while (off_diff > 0) {
- const char *pad_type;
- int pad_bits;
-
- if (ptr_bits > 32 && off_diff > 32) {
- pad_type = "long";
- pad_bits = chip_away_bits(off_diff, ptr_bits);
- } else if (off_diff > 16) {
- pad_type = "int";
- pad_bits = chip_away_bits(off_diff, 32);
- } else if (off_diff > 8) {
- pad_type = "short";
- pad_bits = chip_away_bits(off_diff, 16);
- } else {
- pad_type = "char";
- pad_bits = chip_away_bits(off_diff, 8);
+ if (new_off > cur_off && new_off <= next_off) {
+ /* We need explicit `<type>: 0` aligning mark if next
+ * field is right on alignment offset and its
+ * alignment requirement is less strict than <type>'s
+ * alignment (so compiler won't naturally align to the
+ * offset we expect), or if subsequent `<type>: X`,
+ * will actually completely fit in the remaining hole,
+ * making compiler basically ignore `<type>: X`
+ * completely.
+ */
+ if (in_bitfield ||
+ (new_off == next_off && roundup(cur_off, next_align * 8) != new_off) ||
+ (new_off != next_off && next_off - new_off <= new_off - cur_off))
+ /* but for bitfields we'll emit explicit bit count */
+ btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type,
+ in_bitfield ? new_off - cur_off : 0);
+ cur_off = new_off;
+ }
+
+ /* Now we know we start at naturally aligned offset for a chosen
+ * padding type (long, int, short, or char), and so the rest is just
+ * a straightforward filling of remaining padding gap with full
+ * `<type>: sizeof(<type>);` markers, except for the last one, which
+ * might need smaller than sizeof(<type>) padding.
+ */
+ while (cur_off != next_off) {
+ bits = min(next_off - cur_off, pad_bits);
+ if (bits == pad_bits) {
+ btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
+ cur_off += bits;
+ continue;
+ }
+ /* For the remainder padding that doesn't cover entire
+ * pad_type bit length, we pick the smallest necessary type.
+ * This is pure aesthetics, we could have just used `long`,
+ * but having smallest necessary one communicates better the
+ * scale of the padding gap.
+ */
+ for (i = ARRAY_SIZE(pads) - 1; i >= 0; i--) {
+ pad_type = pads[i].name;
+ pad_bits = pads[i].bits;
+ if (pad_bits < bits)
+ continue;
+
+ btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, bits);
+ cur_off += bits;
+ break;
}
- btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
- off_diff -= pad_bits;
}
}
@@ -915,9 +967,11 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
{
const struct btf_member *m = btf_members(t);
bool is_struct = btf_is_struct(t);
- int align, i, packed, off = 0;
+ bool packed, prev_bitfield = false;
+ int align, i, off = 0;
__u16 vlen = btf_vlen(t);
+ align = btf__align_of(d->btf, id);
packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
btf_dump_printf(d, "%s%s%s {",
@@ -927,41 +981,47 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
for (i = 0; i < vlen; i++, m++) {
const char *fname;
- int m_off, m_sz;
+ int m_off, m_sz, m_align;
+ bool in_bitfield;
fname = btf_name_of(d, m->name_off);
m_sz = btf_member_bitfield_size(t, i);
m_off = btf_member_bit_offset(t, i);
- align = packed ? 1 : btf__align_of(d->btf, m->type);
+ m_align = packed ? 1 : btf__align_of(d->btf, m->type);
+
+ in_bitfield = prev_bitfield && m_sz != 0;
- btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
+ btf_dump_emit_bit_padding(d, off, m_off, m_align, in_bitfield, lvl + 1);
btf_dump_printf(d, "\n%s", pfx(lvl + 1));
btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
if (m_sz) {
btf_dump_printf(d, ": %d", m_sz);
off = m_off + m_sz;
+ prev_bitfield = true;
} else {
m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type));
off = m_off + m_sz * 8;
+ prev_bitfield = false;
}
+
btf_dump_printf(d, ";");
}
/* pad at the end, if necessary */
- if (is_struct) {
- align = packed ? 1 : btf__align_of(d->btf, id);
- btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align,
- lvl + 1);
- }
+ if (is_struct)
+ btf_dump_emit_bit_padding(d, off, t->size * 8, align, false, lvl + 1);
/*
* Keep `struct empty {}` on a single line,
* only print newline when there are regular or padding fields.
*/
- if (vlen || t->size)
+ if (vlen || t->size) {
btf_dump_printf(d, "\n");
- btf_dump_printf(d, "%s}", pfx(lvl));
+ btf_dump_printf(d, "%s}", pfx(lvl));
+ } else {
+ btf_dump_printf(d, "}");
+ }
if (packed)
btf_dump_printf(d, " __attribute__((packed))");
}
@@ -1073,6 +1133,43 @@ static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
else
btf_dump_emit_enum64_val(d, t, lvl, vlen);
btf_dump_printf(d, "\n%s}", pfx(lvl));
+
+ /* special case enums with special sizes */
+ if (t->size == 1) {
+ /* one-byte enums can be forced with mode(byte) attribute */
+ btf_dump_printf(d, " __attribute__((mode(byte)))");
+ } else if (t->size == 8 && d->ptr_sz == 8) {
+ /* enum can be 8-byte sized if one of the enumerator values
+ * doesn't fit in 32-bit integer, or by adding mode(word)
+ * attribute (but probably only on 64-bit architectures); do
+ * our best here to try to satisfy the contract without adding
+ * unnecessary attributes
+ */
+ bool needs_word_mode;
+
+ if (btf_is_enum(t)) {
+ /* enum can't represent 64-bit values, so we need word mode */
+ needs_word_mode = true;
+ } else {
+ /* enum64 needs mode(word) if none of its values has
+ * non-zero upper 32-bits (which means that all values
+ * fit in 32-bit integers and won't cause compiler to
+ * bump enum to be 64-bit naturally
+ */
+ int i;
+
+ needs_word_mode = true;
+ for (i = 0; i < vlen; i++) {
+ if (btf_enum64(t)[i].val_hi32 != 0) {
+ needs_word_mode = false;
+ break;
+ }
+ }
+ }
+ if (needs_word_mode)
+ btf_dump_printf(d, " __attribute__((mode(word)))");
+ }
+
}
static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2a82f49ce16f..05c4db355f28 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -34,7 +34,6 @@
#include <linux/limits.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
-#include <linux/version.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -870,42 +869,6 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return 0;
}
-__u32 get_kernel_version(void)
-{
- /* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release,
- * but Ubuntu provides /proc/version_signature file, as described at
- * https://ubuntu.com/kernel, with an example contents below, which we
- * can use to get a proper LINUX_VERSION_CODE.
- *
- * Ubuntu 5.4.0-12.15-generic 5.4.8
- *
- * In the above, 5.4.8 is what kernel is actually expecting, while
- * uname() call will return 5.4.0 in info.release.
- */
- const char *ubuntu_kver_file = "/proc/version_signature";
- __u32 major, minor, patch;
- struct utsname info;
-
- if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) == 0) {
- FILE *f;
-
- f = fopen(ubuntu_kver_file, "r");
- if (f) {
- if (fscanf(f, "%*s %*s %d.%d.%d\n", &major, &minor, &patch) == 3) {
- fclose(f);
- return KERNEL_VERSION(major, minor, patch);
- }
- fclose(f);
- }
- /* something went wrong, fall back to uname() approach */
- }
-
- uname(&info);
- if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
- return 0;
- return KERNEL_VERSION(major, minor, patch);
-}
-
static const struct btf_member *
find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
{
@@ -4382,7 +4345,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
char *new_name;
memset(&info, 0, len);
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_map_get_info_by_fd(fd, &info, &len);
if (err && errno == EINVAL)
err = bpf_get_map_info_from_fdinfo(fd, &info);
if (err)
@@ -4766,7 +4729,7 @@ static int probe_module_btf(void)
* kernel's module BTF support coincides with support for
* name/name_len fields in struct bpf_btf_info.
*/
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
close(fd);
return !err;
}
@@ -4929,7 +4892,7 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
int err;
memset(&map_info, 0, map_info_len);
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
+ err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
if (err && errno == EINVAL)
err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
if (err) {
@@ -5474,7 +5437,7 @@ static int load_module_btfs(struct bpf_object *obj)
info.name = ptr_to_u64(name);
info.name_len = sizeof(name);
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
if (err) {
err = -errno;
pr_warn("failed to get BTF object #%d info: %d\n", id, err);
@@ -7355,7 +7318,7 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
if (!bpf_map__is_internal(m))
continue;
if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
- m->def.map_flags ^= BPF_F_MMAPABLE;
+ m->def.map_flags &= ~BPF_F_MMAPABLE;
}
return 0;
@@ -8605,6 +8568,7 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
+ SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
};
@@ -9066,9 +9030,9 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
int err;
memset(&info, 0, info_len);
- err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
if (err) {
- pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n",
+ pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
attach_prog_fd, err);
return err;
}
@@ -9903,7 +9867,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
char errmsg[STRERR_BUFSIZE];
int type, pfd;
- if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
+ if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
return -EINVAL;
memset(&attr, 0, attr_sz);
@@ -9994,9 +9958,16 @@ static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
const char *kfunc_name, size_t offset)
{
static int index = 0;
+ int i;
snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
__sync_fetch_and_add(&index, 1));
+
+ /* sanitize binary_path in the probe name */
+ for (i = 0; buf[i]; i++) {
+ if (!isalnum(buf[i]))
+ buf[i] = '_';
+ }
}
static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
@@ -11702,17 +11673,22 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
const size_t attr_sz = sizeof(struct perf_event_attr);
struct perf_buffer_params p = {};
struct perf_event_attr attr;
+ __u32 sample_period;
if (!OPTS_VALID(opts, perf_buffer_opts))
return libbpf_err_ptr(-EINVAL);
+ sample_period = OPTS_GET(opts, sample_period, 1);
+ if (!sample_period)
+ sample_period = 1;
+
memset(&attr, 0, attr_sz);
attr.size = attr_sz;
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
+ attr.sample_period = sample_period;
+ attr.wakeup_events = sample_period;
p.attr = &attr;
p.sample_cb = sample_cb;
@@ -11765,7 +11741,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
/* best-effort sanity checks */
memset(&map, 0, sizeof(map));
map_info_len = sizeof(map);
- err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
+ err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
if (err) {
err = -errno;
/* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index eee883f007f9..2efd80f6f7b9 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -96,6 +96,12 @@ enum libbpf_print_level {
typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
const char *, va_list ap);
+/**
+ * @brief **libbpf_set_print()** sets user-provided log callback function to
+ * be used for libbpf warnings and informational messages.
+ * @param fn The log print function. If NULL, libbpf won't print anything.
+ * @return Pointer to old print function.
+ */
LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
/* Hide internal to user */
@@ -174,6 +180,14 @@ struct bpf_object_open_opts {
};
#define bpf_object_open_opts__last_field kernel_log_level
+/**
+ * @brief **bpf_object__open()** creates a bpf_object by opening
+ * the BPF ELF object file pointed to by the passed path and loading it
+ * into memory.
+ * @param path BPF object file path.
+ * @return pointer to the new bpf_object; or NULL is returned on error,
+ * error code is stored in errno
+ */
LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
/**
@@ -203,16 +217,46 @@ LIBBPF_API struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts);
-/* Load/unload object into/from kernel */
+/**
+ * @brief **bpf_object__load()** loads BPF object into kernel.
+ * @param obj Pointer to a valid BPF object instance returned by
+ * **bpf_object__open*()** APIs
+ * @return 0, on success; negative error code, otherwise, error code is
+ * stored in errno
+ */
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
-LIBBPF_API void bpf_object__close(struct bpf_object *object);
+/**
+ * @brief **bpf_object__close()** closes a BPF object and releases all
+ * resources.
+ * @param obj Pointer to a valid BPF object
+ */
+LIBBPF_API void bpf_object__close(struct bpf_object *obj);
-/* pin_maps and unpin_maps can both be called with a NULL path, in which case
- * they will use the pin_path attribute of each map (and ignore all maps that
- * don't have a pin_path set).
+/**
+ * @brief **bpf_object__pin_maps()** pins each map contained within
+ * the BPF object at the passed directory.
+ * @param obj Pointer to a valid BPF object
+ * @param path A directory where maps should be pinned.
+ * @return 0, on success; negative error code, otherwise
+ *
+ * If `path` is NULL `bpf_map__pin` (which is being used on each map)
+ * will use the pin_path attribute of each map. In this case, maps that
+ * don't have a pin_path set will be ignored.
*/
LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
+
+/**
+ * @brief **bpf_object__unpin_maps()** unpins each map contained within
+ * the BPF object found in the passed directory.
+ * @param obj Pointer to a valid BPF object
+ * @param path A directory where pinned maps should be searched for.
+ * @return 0, on success; negative error code, otherwise
+ *
+ * If `path` is NULL `bpf_map__unpin` (which is being used on each map)
+ * will use the pin_path attribute of each map. In this case, maps that
+ * don't have a pin_path set will be ignored.
+ */
LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
const char *path);
LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
@@ -823,10 +867,57 @@ LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize
* @return true, if the map is an internal map; false, otherwise
*/
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__set_pin_path()** sets the path attribute that tells where the
+ * BPF map should be pinned. This does not actually create the 'pin'.
+ * @param map The bpf_map
+ * @param path The path
+ * @return 0, on success; negative error, otherwise
+ */
LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
+
+/**
+ * @brief **bpf_map__pin_path()** gets the path attribute that tells where the
+ * BPF map should be pinned.
+ * @param map The bpf_map
+ * @return The path string; which can be NULL
+ */
LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__is_pinned()** tells the caller whether or not the
+ * passed map has been pinned via a 'pin' file.
+ * @param map The bpf_map
+ * @return true, if the map is pinned; false, otherwise
+ */
LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__pin()** creates a file that serves as a 'pin'
+ * for the BPF map. This increments the reference count on the
+ * BPF map which will keep the BPF map loaded even after the
+ * userspace process which loaded it has exited.
+ * @param map The bpf_map to pin
+ * @param path A file path for the 'pin'
+ * @return 0, on success; negative error, otherwise
+ *
+ * If `path` is NULL the maps `pin_path` attribute will be used. If this is
+ * also NULL, an error will be returned and the map will not be pinned.
+ */
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
+
+/**
+ * @brief **bpf_map__unpin()** removes the file that serves as a
+ * 'pin' for the BPF map.
+ * @param map The bpf_map to unpin
+ * @param path A file path for the 'pin'
+ * @return 0, on success; negative error, otherwise
+ *
+ * The `path` parameter can be NULL, in which case the `pin_path`
+ * map attribute is unpinned. If both the `path` parameter and
+ * `pin_path` map attribute are set, they must be equal.
+ */
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
@@ -957,9 +1048,10 @@ struct bpf_xdp_query_opts {
__u32 hw_prog_id; /* output */
__u32 skb_prog_id; /* output */
__u8 attach_mode; /* output */
+ __u64 feature_flags; /* output */
size_t :0;
};
-#define bpf_xdp_query_opts__last_field attach_mode
+#define bpf_xdp_query_opts__last_field feature_flags
LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags,
const struct bpf_xdp_attach_opts *opts);
@@ -1039,7 +1131,8 @@ struct user_ring_buffer_opts {
#define user_ring_buffer_opts__last_field sz
-/* @brief **user_ring_buffer__new()** creates a new instance of a user ring
+/**
+ * @brief **user_ring_buffer__new()** creates a new instance of a user ring
* buffer.
*
* @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map.
@@ -1050,7 +1143,8 @@ struct user_ring_buffer_opts {
LIBBPF_API struct user_ring_buffer *
user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts);
-/* @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the
+/**
+ * @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the
* user ring buffer.
* @param rb A pointer to a user ring buffer.
* @param size The size of the sample, in bytes.
@@ -1070,7 +1164,8 @@ user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts);
*/
LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size);
-/* @brief **user_ring_buffer__reserve_blocking()** reserves a record in the
+/**
+ * @brief **user_ring_buffer__reserve_blocking()** reserves a record in the
* ring buffer, possibly blocking for up to @timeout_ms until a sample becomes
* available.
* @param rb The user ring buffer.
@@ -1114,7 +1209,8 @@ LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb,
__u32 size,
int timeout_ms);
-/* @brief **user_ring_buffer__submit()** submits a previously reserved sample
+/**
+ * @brief **user_ring_buffer__submit()** submits a previously reserved sample
* into the ring buffer.
* @param rb The user ring buffer.
* @param sample A reserved sample.
@@ -1124,7 +1220,8 @@ LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb,
*/
LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample);
-/* @brief **user_ring_buffer__discard()** discards a previously reserved sample.
+/**
+ * @brief **user_ring_buffer__discard()** discards a previously reserved sample.
* @param rb The user ring buffer.
* @param sample A reserved sample.
*
@@ -1133,7 +1230,8 @@ LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *samp
*/
LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample);
-/* @brief **user_ring_buffer__free()** frees a ring buffer that was previously
+/**
+ * @brief **user_ring_buffer__free()** frees a ring buffer that was previously
* created with **user_ring_buffer__new()**.
* @param rb The user ring buffer being freed.
*/
@@ -1149,8 +1247,10 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
/* common use perf buffer options */
struct perf_buffer_opts {
size_t sz;
+ __u32 sample_period;
+ size_t :0;
};
-#define perf_buffer_opts__last_field sz
+#define perf_buffer_opts__last_field sample_period
/**
* @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 71bf5691a689..50dde1f6521e 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -382,3 +382,11 @@ LIBBPF_1.1.0 {
user_ring_buffer__reserve_blocking;
user_ring_buffer__submit;
} LIBBPF_1.0.0;
+
+LIBBPF_1.2.0 {
+ global:
+ bpf_btf_get_info_by_fd;
+ bpf_link_get_info_by_fd;
+ bpf_map_get_info_by_fd;
+ bpf_prog_get_info_by_fd;
+} LIBBPF_1.1.0;
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
index 96f67a772a1b..6b180172ec6b 100644
--- a/tools/lib/bpf/libbpf_errno.c
+++ b/tools/lib/bpf/libbpf_errno.c
@@ -39,14 +39,14 @@ static const char *libbpf_strerror_table[NR_ERRNO] = {
int libbpf_strerror(int err, char *buf, size_t size)
{
+ int ret;
+
if (!buf || !size)
return libbpf_err(-EINVAL);
err = err > 0 ? err : -err;
if (err < __LIBBPF_ERRNO__START) {
- int ret;
-
ret = strerror_r(err, buf, size);
buf[size - 1] = '\0';
return libbpf_err_errno(ret);
@@ -56,12 +56,20 @@ int libbpf_strerror(int err, char *buf, size_t size)
const char *msg;
msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
- snprintf(buf, size, "%s", msg);
+ ret = snprintf(buf, size, "%s", msg);
buf[size - 1] = '\0';
+ /* The length of the buf and msg is positive.
+ * A negative number may be returned only when the
+ * size exceeds INT_MAX. Not likely to appear.
+ */
+ if (ret >= size)
+ return libbpf_err(-ERANGE);
return 0;
}
- snprintf(buf, size, "Unknown libbpf error %d", err);
+ ret = snprintf(buf, size, "Unknown libbpf error %d", err);
buf[size - 1] = '\0';
+ if (ret >= size)
+ return libbpf_err(-ERANGE);
return libbpf_err(-ENOENT);
}
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 377642ff51fc..fbaf68335394 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -20,8 +20,8 @@
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-/* prevent accidental re-addition of reallocarray() */
-#pragma GCC poison reallocarray
+/* prevent accidental re-addition of reallocarray()/strlcpy() */
+#pragma GCC poison reallocarray strlcpy
#include "libbpf.h"
#include "btf.h"
@@ -543,6 +543,7 @@ static inline int ensure_good_fd(int fd)
fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
saved_errno = errno;
close(old_fd);
+ errno = saved_errno;
if (fd < 0) {
pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno);
errno = saved_errno;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index b44fcbb4b42e..4f3bc968ff8e 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -12,11 +12,94 @@
#include <linux/btf.h>
#include <linux/filter.h>
#include <linux/kernel.h>
+#include <linux/version.h>
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
+/* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release,
+ * but Ubuntu provides /proc/version_signature file, as described at
+ * https://ubuntu.com/kernel, with an example contents below, which we
+ * can use to get a proper LINUX_VERSION_CODE.
+ *
+ * Ubuntu 5.4.0-12.15-generic 5.4.8
+ *
+ * In the above, 5.4.8 is what kernel is actually expecting, while
+ * uname() call will return 5.4.0 in info.release.
+ */
+static __u32 get_ubuntu_kernel_version(void)
+{
+ const char *ubuntu_kver_file = "/proc/version_signature";
+ __u32 major, minor, patch;
+ int ret;
+ FILE *f;
+
+ if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) != 0)
+ return 0;
+
+ f = fopen(ubuntu_kver_file, "r");
+ if (!f)
+ return 0;
+
+ ret = fscanf(f, "%*s %*s %u.%u.%u\n", &major, &minor, &patch);
+ fclose(f);
+ if (ret != 3)
+ return 0;
+
+ return KERNEL_VERSION(major, minor, patch);
+}
+
+/* On Debian LINUX_VERSION_CODE doesn't correspond to info.release.
+ * Instead, it is provided in info.version. An example content of
+ * Debian 10 looks like the below.
+ *
+ * utsname::release 4.19.0-22-amd64
+ * utsname::version #1 SMP Debian 4.19.260-1 (2022-09-29)
+ *
+ * In the above, 4.19.260 is what kernel is actually expecting, while
+ * uname() call will return 4.19.0 in info.release.
+ */
+static __u32 get_debian_kernel_version(struct utsname *info)
+{
+ __u32 major, minor, patch;
+ char *p;
+
+ p = strstr(info->version, "Debian ");
+ if (!p) {
+ /* This is not a Debian kernel. */
+ return 0;
+ }
+
+ if (sscanf(p, "Debian %u.%u.%u", &major, &minor, &patch) != 3)
+ return 0;
+
+ return KERNEL_VERSION(major, minor, patch);
+}
+
+__u32 get_kernel_version(void)
+{
+ __u32 major, minor, patch, version;
+ struct utsname info;
+
+ /* Check if this is an Ubuntu kernel. */
+ version = get_ubuntu_kernel_version();
+ if (version != 0)
+ return version;
+
+ uname(&info);
+
+ /* Check if this is a Debian kernel. */
+ version = get_debian_kernel_version(&info);
+ if (version != 0)
+ return version;
+
+ if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
+ return 0;
+
+ return KERNEL_VERSION(major, minor, patch);
+}
+
static int probe_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insns_cnt,
char *log_buf, size_t log_buf_sz)
diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
index e944f5bce728..1fd2eeac5cfc 100644
--- a/tools/lib/bpf/libbpf_version.h
+++ b/tools/lib/bpf/libbpf_version.h
@@ -4,6 +4,6 @@
#define __LIBBPF_VERSION_H
#define LIBBPF_MAJOR_VERSION 1
-#define LIBBPF_MINOR_VERSION 1
+#define LIBBPF_MINOR_VERSION 2
#endif /* __LIBBPF_VERSION_H */
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 35104580870c..1653e7a8b0a1 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -9,6 +9,7 @@
#include <linux/if_ether.h>
#include <linux/pkt_cls.h>
#include <linux/rtnetlink.h>
+#include <linux/netdev.h>
#include <sys/socket.h>
#include <errno.h>
#include <time.h>
@@ -39,9 +40,15 @@ struct xdp_id_md {
int ifindex;
__u32 flags;
struct xdp_link_info info;
+ __u64 feature_flags;
};
-static int libbpf_netlink_open(__u32 *nl_pid)
+struct xdp_features_md {
+ int ifindex;
+ __u64 flags;
+};
+
+static int libbpf_netlink_open(__u32 *nl_pid, int proto)
{
struct sockaddr_nl sa;
socklen_t addrlen;
@@ -51,7 +58,7 @@ static int libbpf_netlink_open(__u32 *nl_pid)
memset(&sa, 0, sizeof(sa));
sa.nl_family = AF_NETLINK;
- sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
+ sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, proto);
if (sock < 0)
return -errno;
@@ -212,14 +219,14 @@ done:
}
static int libbpf_netlink_send_recv(struct libbpf_nla_req *req,
- __dump_nlmsg_t parse_msg,
+ int proto, __dump_nlmsg_t parse_msg,
libbpf_dump_nlmsg_t parse_attr,
void *cookie)
{
__u32 nl_pid = 0;
int sock, ret;
- sock = libbpf_netlink_open(&nl_pid);
+ sock = libbpf_netlink_open(&nl_pid, proto);
if (sock < 0)
return sock;
@@ -238,6 +245,43 @@ out:
return ret;
}
+static int parse_genl_family_id(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
+ void *cookie)
+{
+ struct genlmsghdr *gnl = NLMSG_DATA(nh);
+ struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN);
+ struct nlattr *tb[CTRL_ATTR_FAMILY_ID + 1];
+ __u16 *id = cookie;
+
+ libbpf_nla_parse(tb, CTRL_ATTR_FAMILY_ID, na,
+ NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL);
+ if (!tb[CTRL_ATTR_FAMILY_ID])
+ return NL_CONT;
+
+ *id = libbpf_nla_getattr_u16(tb[CTRL_ATTR_FAMILY_ID]);
+ return NL_DONE;
+}
+
+static int libbpf_netlink_resolve_genl_family_id(const char *name,
+ __u16 len, __u16 *id)
+{
+ struct libbpf_nla_req req = {
+ .nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN),
+ .nh.nlmsg_type = GENL_ID_CTRL,
+ .nh.nlmsg_flags = NLM_F_REQUEST,
+ .gnl.cmd = CTRL_CMD_GETFAMILY,
+ .gnl.version = 2,
+ };
+ int err;
+
+ err = nlattr_add(&req, CTRL_ATTR_FAMILY_NAME, name, len);
+ if (err < 0)
+ return err;
+
+ return libbpf_netlink_send_recv(&req, NETLINK_GENERIC,
+ parse_genl_family_id, NULL, id);
+}
+
static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
__u32 flags)
{
@@ -271,7 +315,7 @@ static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
}
nlattr_end_nested(&req, nla);
- return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
+ return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL);
}
int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts)
@@ -357,6 +401,29 @@ static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb)
return 0;
}
+static int parse_xdp_features(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
+ void *cookie)
+{
+ struct genlmsghdr *gnl = NLMSG_DATA(nh);
+ struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN);
+ struct nlattr *tb[NETDEV_CMD_MAX + 1];
+ struct xdp_features_md *md = cookie;
+ __u32 ifindex;
+
+ libbpf_nla_parse(tb, NETDEV_CMD_MAX, na,
+ NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL);
+
+ if (!tb[NETDEV_A_DEV_IFINDEX] || !tb[NETDEV_A_DEV_XDP_FEATURES])
+ return NL_CONT;
+
+ ifindex = libbpf_nla_getattr_u32(tb[NETDEV_A_DEV_IFINDEX]);
+ if (ifindex != md->ifindex)
+ return NL_CONT;
+
+ md->flags = libbpf_nla_getattr_u64(tb[NETDEV_A_DEV_XDP_FEATURES]);
+ return NL_DONE;
+}
+
int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
{
struct libbpf_nla_req req = {
@@ -366,6 +433,10 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
.ifinfo.ifi_family = AF_PACKET,
};
struct xdp_id_md xdp_id = {};
+ struct xdp_features_md md = {
+ .ifindex = ifindex,
+ };
+ __u16 id;
int err;
if (!OPTS_VALID(opts, bpf_xdp_query_opts))
@@ -382,7 +453,7 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
xdp_id.ifindex = ifindex;
xdp_id.flags = xdp_flags;
- err = libbpf_netlink_send_recv(&req, __dump_link_nlmsg,
+ err = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, __dump_link_nlmsg,
get_xdp_info, &xdp_id);
if (err)
return libbpf_err(err);
@@ -393,6 +464,31 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
OPTS_SET(opts, skb_prog_id, xdp_id.info.skb_prog_id);
OPTS_SET(opts, attach_mode, xdp_id.info.attach_mode);
+ if (!OPTS_HAS(opts, feature_flags))
+ return 0;
+
+ err = libbpf_netlink_resolve_genl_family_id("netdev", sizeof("netdev"), &id);
+ if (err < 0)
+ return libbpf_err(err);
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_type = id;
+ req.gnl.cmd = NETDEV_CMD_DEV_GET;
+ req.gnl.version = 2;
+
+ err = nlattr_add(&req, NETDEV_A_DEV_IFINDEX, &ifindex, sizeof(ifindex));
+ if (err < 0)
+ return libbpf_err(err);
+
+ err = libbpf_netlink_send_recv(&req, NETLINK_GENERIC,
+ parse_xdp_features, NULL, &md);
+ if (err)
+ return libbpf_err(err);
+
+ opts->feature_flags = md.flags;
+
return 0;
}
@@ -493,7 +589,7 @@ static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags)
if (ret < 0)
return ret;
- return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
+ return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL);
}
static int tc_qdisc_create_excl(struct bpf_tc_hook *hook)
@@ -593,7 +689,7 @@ static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd)
int len, ret;
memset(&info, 0, info_len);
- ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ ret = bpf_prog_get_info_by_fd(fd, &info, &info_len);
if (ret < 0)
return ret;
@@ -673,7 +769,8 @@ int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
info.opts = opts;
- ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info);
+ ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL,
+ &info);
if (ret < 0)
return libbpf_err(ret);
if (!info.processed)
@@ -739,7 +836,7 @@ static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
return ret;
}
- return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
+ return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL);
}
int bpf_tc_detach(const struct bpf_tc_hook *hook,
@@ -804,7 +901,8 @@ int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
info.opts = opts;
- ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info);
+ ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL,
+ &info);
if (ret < 0)
return libbpf_err(ret);
if (!info.processed)
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 3900d052ed19..975e265eab3b 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -178,7 +178,7 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh)
hlen += nlmsg_len(&err->msg);
attr = (struct nlattr *) ((void *) err + hlen);
- alen = nlh->nlmsg_len - hlen;
+ alen = (void *)nlh + nlh->nlmsg_len - (void *)attr;
if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen,
extack_policy) != 0) {
diff --git a/tools/lib/bpf/nlattr.h b/tools/lib/bpf/nlattr.h
index 4d15ae2ff812..d92d1c1de700 100644
--- a/tools/lib/bpf/nlattr.h
+++ b/tools/lib/bpf/nlattr.h
@@ -14,6 +14,7 @@
#include <errno.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
+#include <linux/genetlink.h>
/* avoid multiple definition of netlink features */
#define __LINUX_NETLINK_H
@@ -58,6 +59,7 @@ struct libbpf_nla_req {
union {
struct ifinfomsg ifinfo;
struct tcmsg tc;
+ struct genlmsghdr gnl;
};
char buf[128];
};
@@ -89,11 +91,21 @@ static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla)
return *(uint8_t *)libbpf_nla_data(nla);
}
+static inline uint16_t libbpf_nla_getattr_u16(const struct nlattr *nla)
+{
+ return *(uint16_t *)libbpf_nla_data(nla);
+}
+
static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla)
{
return *(uint32_t *)libbpf_nla_data(nla);
}
+static inline uint64_t libbpf_nla_getattr_u64(const struct nlattr *nla)
+{
+ return *(uint64_t *)libbpf_nla_data(nla);
+}
+
static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla)
{
return (const char *)libbpf_nla_data(nla);
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 47855af25f3b..02199364db13 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -83,7 +83,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(map_fd, &info, &len);
+ err = bpf_map_get_info_by_fd(map_fd, &info, &len);
if (err) {
err = -errno;
pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
@@ -359,7 +359,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(map_fd, &info, &len);
+ err = bpf_map_get_info_by_fd(map_fd, &info, &len);
if (err) {
err = -errno;
pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err);
diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h
index fdfd235e52c4..0bd4c135acc2 100644
--- a/tools/lib/bpf/usdt.bpf.h
+++ b/tools/lib/bpf/usdt.bpf.h
@@ -130,7 +130,10 @@ int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
if (!spec)
return -ESRCH;
- if (arg_num >= BPF_USDT_MAX_ARG_CNT || arg_num >= spec->arg_cnt)
+ if (arg_num >= BPF_USDT_MAX_ARG_CNT)
+ return -ENOENT;
+ barrier_var(arg_num);
+ if (arg_num >= spec->arg_cnt)
return -ENOENT;
arg_spec = &spec->args[arg_num];
diff --git a/tools/net/ynl/cli.py b/tools/net/ynl/cli.py
new file mode 100755
index 000000000000..db410b74d539
--- /dev/null
+++ b/tools/net/ynl/cli.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: BSD-3-Clause
+
+import argparse
+import json
+import pprint
+import time
+
+from lib import YnlFamily
+
+
+def main():
+ parser = argparse.ArgumentParser(description='YNL CLI sample')
+ parser.add_argument('--spec', dest='spec', type=str, required=True)
+ parser.add_argument('--schema', dest='schema', type=str)
+ parser.add_argument('--no-schema', action='store_true')
+ parser.add_argument('--json', dest='json_text', type=str)
+ parser.add_argument('--do', dest='do', type=str)
+ parser.add_argument('--dump', dest='dump', type=str)
+ parser.add_argument('--sleep', dest='sleep', type=int)
+ parser.add_argument('--subscribe', dest='ntf', type=str)
+ args = parser.parse_args()
+
+ if args.no_schema:
+ args.schema = ''
+
+ attrs = {}
+ if args.json_text:
+ attrs = json.loads(args.json_text)
+
+ ynl = YnlFamily(args.spec, args.schema)
+
+ if args.ntf:
+ ynl.ntf_subscribe(args.ntf)
+
+ if args.sleep:
+ time.sleep(args.sleep)
+
+ if args.do:
+ reply = ynl.do(args.do, attrs)
+ pprint.PrettyPrinter().pprint(reply)
+ if args.dump:
+ reply = ynl.dump(args.dump, attrs)
+ pprint.PrettyPrinter().pprint(reply)
+
+ if args.ntf:
+ ynl.check_ntf()
+ pprint.PrettyPrinter().pprint(ynl.async_msg_queue)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/net/ynl/lib/__init__.py b/tools/net/ynl/lib/__init__.py
new file mode 100644
index 000000000000..3c73f59eabab
--- /dev/null
+++ b/tools/net/ynl/lib/__init__.py
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+from .nlspec import SpecAttr, SpecAttrSet, SpecFamily, SpecOperation
+from .ynl import YnlFamily
+
+__all__ = ["SpecAttr", "SpecAttrSet", "SpecFamily", "SpecOperation",
+ "YnlFamily"]
diff --git a/tools/net/ynl/lib/nlspec.py b/tools/net/ynl/lib/nlspec.py
new file mode 100644
index 000000000000..e204679ad8b7
--- /dev/null
+++ b/tools/net/ynl/lib/nlspec.py
@@ -0,0 +1,310 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+import collections
+import importlib
+import os
+import traceback
+import yaml
+
+
+# To be loaded dynamically as needed
+jsonschema = None
+
+
+class SpecElement:
+ """Netlink spec element.
+
+ Abstract element of the Netlink spec. Implements the dictionary interface
+ for access to the raw spec. Supports iterative resolution of dependencies
+ across elements and class inheritance levels. The elements of the spec
+ may refer to each other, and although loops should be very rare, having
+ to maintain correct ordering of instantiation is painful, so the resolve()
+ method should be used to perform parts of init which require access to
+ other parts of the spec.
+
+ Attributes:
+ yaml raw spec as loaded from the spec file
+ family back reference to the full family
+
+ name name of the entity as listed in the spec (optional)
+ ident_name name which can be safely used as identifier in code (optional)
+ """
+ def __init__(self, family, yaml):
+ self.yaml = yaml
+ self.family = family
+
+ if 'name' in self.yaml:
+ self.name = self.yaml['name']
+ self.ident_name = self.name.replace('-', '_')
+
+ self._super_resolved = False
+ family.add_unresolved(self)
+
+ def __getitem__(self, key):
+ return self.yaml[key]
+
+ def __contains__(self, key):
+ return key in self.yaml
+
+ def get(self, key, default=None):
+ return self.yaml.get(key, default)
+
+ def resolve_up(self, up):
+ if not self._super_resolved:
+ up.resolve()
+ self._super_resolved = True
+
+ def resolve(self):
+ pass
+
+
+class SpecAttr(SpecElement):
+ """ Single Netlink atttribute type
+
+ Represents a single attribute type within an attr space.
+
+ Attributes:
+ value numerical ID when serialized
+ attr_set Attribute Set containing this attr
+ """
+ def __init__(self, family, attr_set, yaml, value):
+ super().__init__(family, yaml)
+
+ self.value = value
+ self.attr_set = attr_set
+ self.is_multi = yaml.get('multi-attr', False)
+
+
+class SpecAttrSet(SpecElement):
+ """ Netlink Attribute Set class.
+
+ Represents a ID space of attributes within Netlink.
+
+ Note that unlike other elements, which expose contents of the raw spec
+ via the dictionary interface Attribute Set exposes attributes by name.
+
+ Attributes:
+ attrs ordered dict of all attributes (indexed by name)
+ attrs_by_val ordered dict of all attributes (indexed by value)
+ subset_of parent set if this is a subset, otherwise None
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ self.subset_of = self.yaml.get('subset-of', None)
+
+ self.attrs = collections.OrderedDict()
+ self.attrs_by_val = collections.OrderedDict()
+
+ val = 0
+ for elem in self.yaml['attributes']:
+ if 'value' in elem:
+ val = elem['value']
+
+ attr = self.new_attr(elem, val)
+ self.attrs[attr.name] = attr
+ self.attrs_by_val[attr.value] = attr
+ val += 1
+
+ def new_attr(self, elem, value):
+ return SpecAttr(self.family, self, elem, value)
+
+ def __getitem__(self, key):
+ return self.attrs[key]
+
+ def __contains__(self, key):
+ return key in self.attrs
+
+ def __iter__(self):
+ yield from self.attrs
+
+ def items(self):
+ return self.attrs.items()
+
+
+class SpecOperation(SpecElement):
+ """Netlink Operation
+
+ Information about a single Netlink operation.
+
+ Attributes:
+ value numerical ID when serialized, None if req/rsp values differ
+
+ req_value numerical ID when serialized, user -> kernel
+ rsp_value numerical ID when serialized, user <- kernel
+ is_call bool, whether the operation is a call
+ is_async bool, whether the operation is a notification
+ is_resv bool, whether the operation does not exist (it's just a reserved ID)
+ attr_set attribute set name
+
+ yaml raw spec as loaded from the spec file
+ """
+ def __init__(self, family, yaml, req_value, rsp_value):
+ super().__init__(family, yaml)
+
+ self.value = req_value if req_value == rsp_value else None
+ self.req_value = req_value
+ self.rsp_value = rsp_value
+
+ self.is_call = 'do' in yaml or 'dump' in yaml
+ self.is_async = 'notify' in yaml or 'event' in yaml
+ self.is_resv = not self.is_async and not self.is_call
+
+ # Added by resolve:
+ self.attr_set = None
+ delattr(self, "attr_set")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if 'attribute-set' in self.yaml:
+ attr_set_name = self.yaml['attribute-set']
+ elif 'notify' in self.yaml:
+ msg = self.family.msgs[self.yaml['notify']]
+ attr_set_name = msg['attribute-set']
+ elif self.is_resv:
+ attr_set_name = ''
+ else:
+ raise Exception(f"Can't resolve attribute set for op '{self.name}'")
+ if attr_set_name:
+ self.attr_set = self.family.attr_sets[attr_set_name]
+
+
+class SpecFamily(SpecElement):
+ """ Netlink Family Spec class.
+
+ Netlink family information loaded from a spec (e.g. in YAML).
+ Takes care of unfolding implicit information which can be skipped
+ in the spec itself for brevity.
+
+ The class can be used like a dictionary to access the raw spec
+ elements but that's usually a bad idea.
+
+ Attributes:
+ proto protocol type (e.g. genetlink)
+
+ attr_sets dict of attribute sets
+ msgs dict of all messages (index by name)
+ msgs_by_value dict of all messages (indexed by name)
+ ops dict of all valid requests / responses
+ """
+ def __init__(self, spec_path, schema_path=None):
+ with open(spec_path, "r") as stream:
+ spec = yaml.safe_load(stream)
+
+ self._resolution_list = []
+
+ super().__init__(self, spec)
+
+ self.proto = self.yaml.get('protocol', 'genetlink')
+
+ if schema_path is None:
+ schema_path = os.path.dirname(os.path.dirname(spec_path)) + f'/{self.proto}.yaml'
+ if schema_path:
+ global jsonschema
+
+ with open(schema_path, "r") as stream:
+ schema = yaml.safe_load(stream)
+
+ if jsonschema is None:
+ jsonschema = importlib.import_module("jsonschema")
+
+ jsonschema.validate(self.yaml, schema)
+
+ self.attr_sets = collections.OrderedDict()
+ self.msgs = collections.OrderedDict()
+ self.req_by_value = collections.OrderedDict()
+ self.rsp_by_value = collections.OrderedDict()
+ self.ops = collections.OrderedDict()
+
+ last_exception = None
+ while len(self._resolution_list) > 0:
+ resolved = []
+ unresolved = self._resolution_list
+ self._resolution_list = []
+
+ for elem in unresolved:
+ try:
+ elem.resolve()
+ except (KeyError, AttributeError) as e:
+ self._resolution_list.append(elem)
+ last_exception = e
+ continue
+
+ resolved.append(elem)
+
+ if len(resolved) == 0:
+ traceback.print_exception(last_exception)
+ raise Exception("Could not resolve any spec element, infinite loop?")
+
+ def new_attr_set(self, elem):
+ return SpecAttrSet(self, elem)
+
+ def new_operation(self, elem, req_val, rsp_val):
+ return SpecOperation(self, elem, req_val, rsp_val)
+
+ def add_unresolved(self, elem):
+ self._resolution_list.append(elem)
+
+ def _dictify_ops_unified(self):
+ val = 0
+ for elem in self.yaml['operations']['list']:
+ if 'value' in elem:
+ val = elem['value']
+
+ op = self.new_operation(elem, val, val)
+ val += 1
+
+ self.msgs[op.name] = op
+
+ def _dictify_ops_directional(self):
+ req_val = rsp_val = 0
+ for elem in self.yaml['operations']['list']:
+ if 'notify' in elem:
+ if 'value' in elem:
+ rsp_val = elem['value']
+ req_val_next = req_val
+ rsp_val_next = rsp_val + 1
+ req_val = None
+ elif 'do' in elem or 'dump' in elem:
+ mode = elem['do'] if 'do' in elem else elem['dump']
+
+ v = mode.get('request', {}).get('value', None)
+ if v:
+ req_val = v
+ v = mode.get('reply', {}).get('value', None)
+ if v:
+ rsp_val = v
+
+ rsp_inc = 1 if 'reply' in mode else 0
+ req_val_next = req_val + 1
+ rsp_val_next = rsp_val + rsp_inc
+ else:
+ raise Exception("Can't parse directional ops")
+
+ op = self.new_operation(elem, req_val, rsp_val)
+ req_val = req_val_next
+ rsp_val = rsp_val_next
+
+ self.msgs[op.name] = op
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ for elem in self.yaml['attribute-sets']:
+ attr_set = self.new_attr_set(elem)
+ self.attr_sets[elem['name']] = attr_set
+
+ msg_id_model = self.yaml['operations'].get('enum-model', 'unified')
+ if msg_id_model == 'unified':
+ self._dictify_ops_unified()
+ elif msg_id_model == 'directional':
+ self._dictify_ops_directional()
+
+ for op in self.msgs.values():
+ if op.req_value is not None:
+ self.req_by_value[op.req_value] = op
+ if op.rsp_value is not None:
+ self.rsp_by_value[op.rsp_value] = op
+ if not op.is_async and 'attribute-set' in op:
+ self.ops[op.name] = op
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
new file mode 100644
index 000000000000..1c7411ee04dc
--- /dev/null
+++ b/tools/net/ynl/lib/ynl.py
@@ -0,0 +1,528 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+import functools
+import os
+import random
+import socket
+import struct
+import yaml
+
+from .nlspec import SpecFamily
+
+#
+# Generic Netlink code which should really be in some library, but I can't quickly find one.
+#
+
+
+class Netlink:
+ # Netlink socket
+ SOL_NETLINK = 270
+
+ NETLINK_ADD_MEMBERSHIP = 1
+ NETLINK_CAP_ACK = 10
+ NETLINK_EXT_ACK = 11
+
+ # Netlink message
+ NLMSG_ERROR = 2
+ NLMSG_DONE = 3
+
+ NLM_F_REQUEST = 1
+ NLM_F_ACK = 4
+ NLM_F_ROOT = 0x100
+ NLM_F_MATCH = 0x200
+ NLM_F_APPEND = 0x800
+
+ NLM_F_CAPPED = 0x100
+ NLM_F_ACK_TLVS = 0x200
+
+ NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH
+
+ NLA_F_NESTED = 0x8000
+ NLA_F_NET_BYTEORDER = 0x4000
+
+ NLA_TYPE_MASK = NLA_F_NESTED | NLA_F_NET_BYTEORDER
+
+ # Genetlink defines
+ NETLINK_GENERIC = 16
+
+ GENL_ID_CTRL = 0x10
+
+ # nlctrl
+ CTRL_CMD_GETFAMILY = 3
+
+ CTRL_ATTR_FAMILY_ID = 1
+ CTRL_ATTR_FAMILY_NAME = 2
+ CTRL_ATTR_MAXATTR = 5
+ CTRL_ATTR_MCAST_GROUPS = 7
+
+ CTRL_ATTR_MCAST_GRP_NAME = 1
+ CTRL_ATTR_MCAST_GRP_ID = 2
+
+ # Extack types
+ NLMSGERR_ATTR_MSG = 1
+ NLMSGERR_ATTR_OFFS = 2
+ NLMSGERR_ATTR_COOKIE = 3
+ NLMSGERR_ATTR_POLICY = 4
+ NLMSGERR_ATTR_MISS_TYPE = 5
+ NLMSGERR_ATTR_MISS_NEST = 6
+
+
+class NlAttr:
+ def __init__(self, raw, offset):
+ self._len, self._type = struct.unpack("HH", raw[offset:offset + 4])
+ self.type = self._type & ~Netlink.NLA_TYPE_MASK
+ self.payload_len = self._len
+ self.full_len = (self.payload_len + 3) & ~3
+ self.raw = raw[offset + 4:offset + self.payload_len]
+
+ def as_u8(self):
+ return struct.unpack("B", self.raw)[0]
+
+ def as_u16(self):
+ return struct.unpack("H", self.raw)[0]
+
+ def as_u32(self):
+ return struct.unpack("I", self.raw)[0]
+
+ def as_u64(self):
+ return struct.unpack("Q", self.raw)[0]
+
+ def as_strz(self):
+ return self.raw.decode('ascii')[:-1]
+
+ def as_bin(self):
+ return self.raw
+
+ def __repr__(self):
+ return f"[type:{self.type} len:{self._len}] {self.raw}"
+
+
+class NlAttrs:
+ def __init__(self, msg):
+ self.attrs = []
+
+ offset = 0
+ while offset < len(msg):
+ attr = NlAttr(msg, offset)
+ offset += attr.full_len
+ self.attrs.append(attr)
+
+ def __iter__(self):
+ yield from self.attrs
+
+ def __repr__(self):
+ msg = ''
+ for a in self.attrs:
+ if msg:
+ msg += '\n'
+ msg += repr(a)
+ return msg
+
+
+class NlMsg:
+ def __init__(self, msg, offset, attr_space=None):
+ self.hdr = msg[offset:offset + 16]
+
+ self.nl_len, self.nl_type, self.nl_flags, self.nl_seq, self.nl_portid = \
+ struct.unpack("IHHII", self.hdr)
+
+ self.raw = msg[offset + 16:offset + self.nl_len]
+
+ self.error = 0
+ self.done = 0
+
+ extack_off = None
+ if self.nl_type == Netlink.NLMSG_ERROR:
+ self.error = struct.unpack("i", self.raw[0:4])[0]
+ self.done = 1
+ extack_off = 20
+ elif self.nl_type == Netlink.NLMSG_DONE:
+ self.done = 1
+ extack_off = 4
+
+ self.extack = None
+ if self.nl_flags & Netlink.NLM_F_ACK_TLVS and extack_off:
+ self.extack = dict()
+ extack_attrs = NlAttrs(self.raw[extack_off:])
+ for extack in extack_attrs:
+ if extack.type == Netlink.NLMSGERR_ATTR_MSG:
+ self.extack['msg'] = extack.as_strz()
+ elif extack.type == Netlink.NLMSGERR_ATTR_MISS_TYPE:
+ self.extack['miss-type'] = extack.as_u32()
+ elif extack.type == Netlink.NLMSGERR_ATTR_MISS_NEST:
+ self.extack['miss-nest'] = extack.as_u32()
+ elif extack.type == Netlink.NLMSGERR_ATTR_OFFS:
+ self.extack['bad-attr-offs'] = extack.as_u32()
+ else:
+ if 'unknown' not in self.extack:
+ self.extack['unknown'] = []
+ self.extack['unknown'].append(extack)
+
+ if attr_space:
+ # We don't have the ability to parse nests yet, so only do global
+ if 'miss-type' in self.extack and 'miss-nest' not in self.extack:
+ miss_type = self.extack['miss-type']
+ if miss_type in attr_space.attrs_by_val:
+ spec = attr_space.attrs_by_val[miss_type]
+ desc = spec['name']
+ if 'doc' in spec:
+ desc += f" ({spec['doc']})"
+ self.extack['miss-type'] = desc
+
+ def __repr__(self):
+ msg = f"nl_len = {self.nl_len} ({len(self.raw)}) nl_flags = 0x{self.nl_flags:x} nl_type = {self.nl_type}\n"
+ if self.error:
+ msg += '\terror: ' + str(self.error)
+ if self.extack:
+ msg += '\textack: ' + repr(self.extack)
+ return msg
+
+
+class NlMsgs:
+ def __init__(self, data, attr_space=None):
+ self.msgs = []
+
+ offset = 0
+ while offset < len(data):
+ msg = NlMsg(data, offset, attr_space=attr_space)
+ offset += msg.nl_len
+ self.msgs.append(msg)
+
+ def __iter__(self):
+ yield from self.msgs
+
+
+genl_family_name_to_id = None
+
+
+def _genl_msg(nl_type, nl_flags, genl_cmd, genl_version, seq=None):
+ # we prepend length in _genl_msg_finalize()
+ if seq is None:
+ seq = random.randint(1, 1024)
+ nlmsg = struct.pack("HHII", nl_type, nl_flags, seq, 0)
+ genlmsg = struct.pack("bbH", genl_cmd, genl_version, 0)
+ return nlmsg + genlmsg
+
+
+def _genl_msg_finalize(msg):
+ return struct.pack("I", len(msg) + 4) + msg
+
+
+def _genl_load_families():
+ with socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, Netlink.NETLINK_GENERIC) as sock:
+ sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
+
+ msg = _genl_msg(Netlink.GENL_ID_CTRL,
+ Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK | Netlink.NLM_F_DUMP,
+ Netlink.CTRL_CMD_GETFAMILY, 1)
+ msg = _genl_msg_finalize(msg)
+
+ sock.send(msg, 0)
+
+ global genl_family_name_to_id
+ genl_family_name_to_id = dict()
+
+ while True:
+ reply = sock.recv(128 * 1024)
+ nms = NlMsgs(reply)
+ for nl_msg in nms:
+ if nl_msg.error:
+ print("Netlink error:", nl_msg.error)
+ return
+ if nl_msg.done:
+ return
+
+ gm = GenlMsg(nl_msg)
+ fam = dict()
+ for attr in gm.raw_attrs:
+ if attr.type == Netlink.CTRL_ATTR_FAMILY_ID:
+ fam['id'] = attr.as_u16()
+ elif attr.type == Netlink.CTRL_ATTR_FAMILY_NAME:
+ fam['name'] = attr.as_strz()
+ elif attr.type == Netlink.CTRL_ATTR_MAXATTR:
+ fam['maxattr'] = attr.as_u32()
+ elif attr.type == Netlink.CTRL_ATTR_MCAST_GROUPS:
+ fam['mcast'] = dict()
+ for entry in NlAttrs(attr.raw):
+ mcast_name = None
+ mcast_id = None
+ for entry_attr in NlAttrs(entry.raw):
+ if entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_NAME:
+ mcast_name = entry_attr.as_strz()
+ elif entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_ID:
+ mcast_id = entry_attr.as_u32()
+ if mcast_name and mcast_id is not None:
+ fam['mcast'][mcast_name] = mcast_id
+ if 'name' in fam and 'id' in fam:
+ genl_family_name_to_id[fam['name']] = fam
+
+
+class GenlMsg:
+ def __init__(self, nl_msg):
+ self.nl = nl_msg
+
+ self.hdr = nl_msg.raw[0:4]
+ self.raw = nl_msg.raw[4:]
+
+ self.genl_cmd, self.genl_version, _ = struct.unpack("bbH", self.hdr)
+
+ self.raw_attrs = NlAttrs(self.raw)
+
+ def __repr__(self):
+ msg = repr(self.nl)
+ msg += f"\tgenl_cmd = {self.genl_cmd} genl_ver = {self.genl_version}\n"
+ for a in self.raw_attrs:
+ msg += '\t\t' + repr(a) + '\n'
+ return msg
+
+
+class GenlFamily:
+ def __init__(self, family_name):
+ self.family_name = family_name
+
+ global genl_family_name_to_id
+ if genl_family_name_to_id is None:
+ _genl_load_families()
+
+ self.genl_family = genl_family_name_to_id[family_name]
+ self.family_id = genl_family_name_to_id[family_name]['id']
+
+
+#
+# YNL implementation details.
+#
+
+
+class YnlFamily(SpecFamily):
+ def __init__(self, def_path, schema=None):
+ super().__init__(def_path, schema)
+
+ self.include_raw = False
+
+ self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, Netlink.NETLINK_GENERIC)
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_EXT_ACK, 1)
+
+ self._types = dict()
+
+ for elem in self.yaml.get('definitions', []):
+ self._types[elem['name']] = elem
+
+ self.async_msg_ids = set()
+ self.async_msg_queue = []
+
+ for msg in self.msgs.values():
+ if msg.is_async:
+ self.async_msg_ids.add(msg.rsp_value)
+
+ for op_name, op in self.ops.items():
+ bound_f = functools.partial(self._op, op_name)
+ setattr(self, op.ident_name, bound_f)
+
+ self.family = GenlFamily(self.yaml['name'])
+
+ def ntf_subscribe(self, mcast_name):
+ if mcast_name not in self.family.genl_family['mcast']:
+ raise Exception(f'Multicast group "{mcast_name}" not present in the family')
+
+ self.sock.bind((0, 0))
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_ADD_MEMBERSHIP,
+ self.family.genl_family['mcast'][mcast_name])
+
+ def _add_attr(self, space, name, value):
+ attr = self.attr_sets[space][name]
+ nl_type = attr.value
+ if attr["type"] == 'nest':
+ nl_type |= Netlink.NLA_F_NESTED
+ attr_payload = b''
+ for subname, subvalue in value.items():
+ attr_payload += self._add_attr(attr['nested-attributes'], subname, subvalue)
+ elif attr["type"] == 'flag':
+ attr_payload = b''
+ elif attr["type"] == 'u32':
+ attr_payload = struct.pack("I", int(value))
+ elif attr["type"] == 'string':
+ attr_payload = str(value).encode('ascii') + b'\x00'
+ elif attr["type"] == 'binary':
+ attr_payload = value
+ else:
+ raise Exception(f'Unknown type at {space} {name} {value} {attr["type"]}')
+
+ pad = b'\x00' * ((4 - len(attr_payload) % 4) % 4)
+ return struct.pack('HH', len(attr_payload) + 4, nl_type) + attr_payload + pad
+
+ def _decode_enum(self, rsp, attr_spec):
+ raw = rsp[attr_spec['name']]
+ enum = self._types[attr_spec['enum']]
+ i = attr_spec.get('value-start', 0)
+ if 'enum-as-flags' in attr_spec and attr_spec['enum-as-flags']:
+ value = set()
+ while raw:
+ if raw & 1:
+ value.add(enum['entries'][i])
+ raw >>= 1
+ i += 1
+ else:
+ value = enum['entries'][raw - i]
+ rsp[attr_spec['name']] = value
+
+ def _decode(self, attrs, space):
+ attr_space = self.attr_sets[space]
+ rsp = dict()
+ for attr in attrs:
+ attr_spec = attr_space.attrs_by_val[attr.type]
+ if attr_spec["type"] == 'nest':
+ subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'])
+ decoded = subdict
+ elif attr_spec['type'] == 'u8':
+ decoded = attr.as_u8()
+ elif attr_spec['type'] == 'u32':
+ decoded = attr.as_u32()
+ elif attr_spec['type'] == 'u64':
+ decoded = attr.as_u64()
+ elif attr_spec["type"] == 'string':
+ decoded = attr.as_strz()
+ elif attr_spec["type"] == 'binary':
+ decoded = attr.as_bin()
+ elif attr_spec["type"] == 'flag':
+ decoded = True
+ else:
+ raise Exception(f'Unknown {attr.type} {attr_spec["name"]} {attr_spec["type"]}')
+
+ if not attr_spec.is_multi:
+ rsp[attr_spec['name']] = decoded
+ elif attr_spec.name in rsp:
+ rsp[attr_spec.name].append(decoded)
+ else:
+ rsp[attr_spec.name] = [decoded]
+
+ if 'enum' in attr_spec:
+ self._decode_enum(rsp, attr_spec)
+ return rsp
+
+ def _decode_extack_path(self, attrs, attr_set, offset, target):
+ for attr in attrs:
+ attr_spec = attr_set.attrs_by_val[attr.type]
+ if offset > target:
+ break
+ if offset == target:
+ return '.' + attr_spec.name
+
+ if offset + attr.full_len <= target:
+ offset += attr.full_len
+ continue
+ if attr_spec['type'] != 'nest':
+ raise Exception(f"Can't dive into {attr.type} ({attr_spec['name']}) for extack")
+ offset += 4
+ subpath = self._decode_extack_path(NlAttrs(attr.raw),
+ self.attr_sets[attr_spec['nested-attributes']],
+ offset, target)
+ if subpath is None:
+ return None
+ return '.' + attr_spec.name + subpath
+
+ return None
+
+ def _decode_extack(self, request, attr_space, extack):
+ if 'bad-attr-offs' not in extack:
+ return
+
+ genl_req = GenlMsg(NlMsg(request, 0, attr_space=attr_space))
+ path = self._decode_extack_path(genl_req.raw_attrs, attr_space,
+ 20, extack['bad-attr-offs'])
+ if path:
+ del extack['bad-attr-offs']
+ extack['bad-attr'] = path
+
+ def handle_ntf(self, nl_msg, genl_msg):
+ msg = dict()
+ if self.include_raw:
+ msg['nlmsg'] = nl_msg
+ msg['genlmsg'] = genl_msg
+ op = self.rsp_by_value[genl_msg.genl_cmd]
+ msg['name'] = op['name']
+ msg['msg'] = self._decode(genl_msg.raw_attrs, op.attr_set.name)
+ self.async_msg_queue.append(msg)
+
+ def check_ntf(self):
+ while True:
+ try:
+ reply = self.sock.recv(128 * 1024, socket.MSG_DONTWAIT)
+ except BlockingIOError:
+ return
+
+ nms = NlMsgs(reply)
+ for nl_msg in nms:
+ if nl_msg.error:
+ print("Netlink error in ntf!?", os.strerror(-nl_msg.error))
+ print(nl_msg)
+ continue
+ if nl_msg.done:
+ print("Netlink done while checking for ntf!?")
+ continue
+
+ gm = GenlMsg(nl_msg)
+ if gm.genl_cmd not in self.async_msg_ids:
+ print("Unexpected msg id done while checking for ntf", gm)
+ continue
+
+ self.handle_ntf(nl_msg, gm)
+
+ def _op(self, method, vals, dump=False):
+ op = self.ops[method]
+
+ nl_flags = Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK
+ if dump:
+ nl_flags |= Netlink.NLM_F_DUMP
+
+ req_seq = random.randint(1024, 65535)
+ msg = _genl_msg(self.family.family_id, nl_flags, op.req_value, 1, req_seq)
+ for name, value in vals.items():
+ msg += self._add_attr(op.attr_set.name, name, value)
+ msg = _genl_msg_finalize(msg)
+
+ self.sock.send(msg, 0)
+
+ done = False
+ rsp = []
+ while not done:
+ reply = self.sock.recv(128 * 1024)
+ nms = NlMsgs(reply, attr_space=op.attr_set)
+ for nl_msg in nms:
+ if nl_msg.extack:
+ self._decode_extack(msg, op.attr_set, nl_msg.extack)
+
+ if nl_msg.error:
+ print("Netlink error:", os.strerror(-nl_msg.error))
+ print(nl_msg)
+ return
+ if nl_msg.done:
+ if nl_msg.extack:
+ print("Netlink warning:")
+ print(nl_msg)
+ done = True
+ break
+
+ gm = GenlMsg(nl_msg)
+ # Check if this is a reply to our request
+ if nl_msg.nl_seq != req_seq or gm.genl_cmd != op.rsp_value:
+ if gm.genl_cmd in self.async_msg_ids:
+ self.handle_ntf(nl_msg, gm)
+ continue
+ else:
+ print('Unexpected message: ' + repr(gm))
+ continue
+
+ rsp.append(self._decode(gm.raw_attrs, op.attr_set.name))
+
+ if not rsp:
+ return None
+ if not dump and len(rsp) == 1:
+ return rsp[0]
+ return rsp
+
+ def do(self, method, vals):
+ return self._op(method, vals)
+
+ def dump(self, method, vals):
+ return self._op(method, vals, dump=True)
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
new file mode 100755
index 000000000000..3942f24b9163
--- /dev/null
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -0,0 +1,2357 @@
+#!/usr/bin/env python3
+
+import argparse
+import collections
+import os
+import yaml
+
+from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation
+
+
+def c_upper(name):
+ return name.upper().replace('-', '_')
+
+
+def c_lower(name):
+ return name.lower().replace('-', '_')
+
+
+class BaseNlLib:
+ def get_family_id(self):
+ return 'ys->family_id'
+
+ def parse_cb_run(self, cb, data, is_dump=False, indent=1):
+ ind = '\n\t\t' + '\t' * indent + ' '
+ if is_dump:
+ return f"mnl_cb_run2(ys->rx_buf, len, 0, 0, {cb}, {data},{ind}ynl_cb_array, NLMSG_MIN_TYPE)"
+ else:
+ return f"mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,{ind}{cb}, {data},{ind}" + \
+ "ynl_cb_array, NLMSG_MIN_TYPE)"
+
+
+class Type(SpecAttr):
+ def __init__(self, family, attr_set, attr, value):
+ super().__init__(family, attr_set, attr, value)
+
+ self.attr = attr
+ self.attr_set = attr_set
+ self.type = attr['type']
+ self.checks = attr.get('checks', {})
+
+ if 'len' in attr:
+ self.len = attr['len']
+ if 'nested-attributes' in attr:
+ self.nested_attrs = attr['nested-attributes']
+ if self.nested_attrs == family.name:
+ self.nested_render_name = f"{family.name}"
+ else:
+ self.nested_render_name = f"{family.name}_{c_lower(self.nested_attrs)}"
+
+ self.c_name = c_lower(self.name)
+ if self.c_name in _C_KW:
+ self.c_name += '_'
+
+ # Added by resolve():
+ self.enum_name = None
+ delattr(self, "enum_name")
+
+ def resolve(self):
+ self.enum_name = f"{self.attr_set.name_prefix}{self.name}"
+ self.enum_name = c_upper(self.enum_name)
+
+ def is_multi_val(self):
+ return None
+
+ def is_scalar(self):
+ return self.type in {'u8', 'u16', 'u32', 'u64', 's32', 's64'}
+
+ def presence_type(self):
+ return 'bit'
+
+ def presence_member(self, space, type_filter):
+ if self.presence_type() != type_filter:
+ return
+
+ if self.presence_type() == 'bit':
+ pfx = '__' if space == 'user' else ''
+ return f"{pfx}u32 {self.c_name}:1;"
+
+ if self.presence_type() == 'len':
+ pfx = '__' if space == 'user' else ''
+ return f"{pfx}u32 {self.c_name}_len;"
+
+ def _complex_member_type(self, ri):
+ return None
+
+ def free_needs_iter(self):
+ return False
+
+ def free(self, ri, var, ref):
+ if self.is_multi_val() or self.presence_type() == 'len':
+ ri.cw.p(f'free({var}->{ref}{self.c_name});')
+
+ def arg_member(self, ri):
+ member = self._complex_member_type(ri)
+ if member:
+ return [member + ' *' + self.c_name]
+ raise Exception(f"Struct member not implemented for class type {self.type}")
+
+ def struct_member(self, ri):
+ if self.is_multi_val():
+ ri.cw.p(f"unsigned int n_{self.c_name};")
+ member = self._complex_member_type(ri)
+ if member:
+ ptr = '*' if self.is_multi_val() else ''
+ ri.cw.p(f"{member} {ptr}{self.c_name};")
+ return
+ members = self.arg_member(ri)
+ for one in members:
+ ri.cw.p(one + ';')
+
+ def _attr_policy(self, policy):
+ return '{ .type = ' + policy + ', }'
+
+ def attr_policy(self, cw):
+ policy = c_upper('nla-' + self.attr['type'])
+
+ spec = self._attr_policy(policy)
+ cw.p(f"\t[{self.enum_name}] = {spec},")
+
+ def _attr_typol(self):
+ raise Exception(f"Type policy not implemented for class type {self.type}")
+
+ def attr_typol(self, cw):
+ typol = self._attr_typol()
+ cw.p(f'[{self.enum_name}] = {"{"} .name = "{self.name}", {typol}{"}"},')
+
+ def _attr_put_line(self, ri, var, line):
+ if self.presence_type() == 'bit':
+ ri.cw.p(f"if ({var}->_present.{self.c_name})")
+ elif self.presence_type() == 'len':
+ ri.cw.p(f"if ({var}->_present.{self.c_name}_len)")
+ ri.cw.p(f"{line};")
+
+ def _attr_put_simple(self, ri, var, put_type):
+ line = f"mnl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name})"
+ self._attr_put_line(ri, var, line)
+
+ def attr_put(self, ri, var):
+ raise Exception(f"Put not implemented for class type {self.type}")
+
+ def _attr_get(self, ri, var):
+ raise Exception(f"Attr get not implemented for class type {self.type}")
+
+ def attr_get(self, ri, var, first):
+ lines, init_lines, local_vars = self._attr_get(ri, var)
+ if type(lines) is str:
+ lines = [lines]
+ if type(init_lines) is str:
+ init_lines = [init_lines]
+
+ kw = 'if' if first else 'else if'
+ ri.cw.block_start(line=f"{kw} (mnl_attr_get_type(attr) == {self.enum_name})")
+ if local_vars:
+ for local in local_vars:
+ ri.cw.p(local)
+ ri.cw.nl()
+
+ if not self.is_multi_val():
+ ri.cw.p("if (ynl_attr_validate(yarg, attr))")
+ ri.cw.p("return MNL_CB_ERROR;")
+ if self.presence_type() == 'bit':
+ ri.cw.p(f"{var}->_present.{self.c_name} = 1;")
+
+ if init_lines:
+ ri.cw.nl()
+ for line in init_lines:
+ ri.cw.p(line)
+
+ for line in lines:
+ ri.cw.p(line)
+ ri.cw.block_end()
+
+ def _setter_lines(self, ri, member, presence):
+ raise Exception(f"Setter not implemented for class type {self.type}")
+
+ def setter(self, ri, space, direction, deref=False, ref=None):
+ ref = (ref if ref else []) + [self.c_name]
+ var = "req"
+ member = f"{var}->{'.'.join(ref)}"
+
+ code = []
+ presence = ''
+ for i in range(0, len(ref)):
+ presence = f"{var}->{'.'.join(ref[:i] + [''])}_present.{ref[i]}"
+ if self.presence_type() == 'bit':
+ code.append(presence + ' = 1;')
+ code += self._setter_lines(ri, member, presence)
+
+ ri.cw.write_func('static inline void',
+ f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}",
+ body=code,
+ args=[f'{type_name(ri, direction, deref=deref)} *{var}'] + self.arg_member(ri))
+
+
+class TypeUnused(Type):
+ def presence_type(self):
+ return ''
+
+ def _attr_typol(self):
+ return '.type = YNL_PT_REJECT, '
+
+ def attr_policy(self, cw):
+ pass
+
+
+class TypePad(Type):
+ def presence_type(self):
+ return ''
+
+ def _attr_typol(self):
+ return '.type = YNL_PT_REJECT, '
+
+ def attr_policy(self, cw):
+ pass
+
+
+class TypeScalar(Type):
+ def __init__(self, family, attr_set, attr, value):
+ super().__init__(family, attr_set, attr, value)
+
+ self.byte_order_comment = ''
+ if 'byte-order' in attr:
+ self.byte_order_comment = f" /* {attr['byte-order']} */"
+
+ # Added by resolve():
+ self.is_bitfield = None
+ delattr(self, "is_bitfield")
+ self.type_name = None
+ delattr(self, "type_name")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if 'enum-as-flags' in self.attr and self.attr['enum-as-flags']:
+ self.is_bitfield = True
+ elif 'enum' in self.attr:
+ self.is_bitfield = self.family.consts[self.attr['enum']]['type'] == 'flags'
+ else:
+ self.is_bitfield = False
+
+ if 'enum' in self.attr and not self.is_bitfield:
+ self.type_name = f"enum {self.family.name}_{c_lower(self.attr['enum'])}"
+ else:
+ self.type_name = '__' + self.type
+
+ def _mnl_type(self):
+ t = self.type
+ # mnl does not have a helper for signed types
+ if t[0] == 's':
+ t = 'u' + t[1:]
+ return t
+
+ def _attr_policy(self, policy):
+ if 'flags-mask' in self.checks or self.is_bitfield:
+ if self.is_bitfield:
+ mask = self.family.consts[self.attr['enum']].get_mask()
+ else:
+ flags = self.family.consts[self.checks['flags-mask']]
+ flag_cnt = len(flags['entries'])
+ mask = (1 << flag_cnt) - 1
+ return f"NLA_POLICY_MASK({policy}, 0x{mask:x})"
+ elif 'min' in self.checks:
+ return f"NLA_POLICY_MIN({policy}, {self.checks['min']})"
+ elif 'enum' in self.attr:
+ enum = self.family.consts[self.attr['enum']]
+ cnt = len(enum['entries'])
+ return f"NLA_POLICY_MAX({policy}, {cnt - 1})"
+ return super()._attr_policy(policy)
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_U{self.type[1:]}, '
+
+ def arg_member(self, ri):
+ return [f'{self.type_name} {self.c_name}{self.byte_order_comment}']
+
+ def attr_put(self, ri, var):
+ self._attr_put_simple(ri, var, self._mnl_type())
+
+ def _attr_get(self, ri, var):
+ return f"{var}->{self.c_name} = mnl_attr_get_{self._mnl_type()}(attr);", None, None
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"{member} = {self.c_name};"]
+
+
+class TypeFlag(Type):
+ def arg_member(self, ri):
+ return []
+
+ def _attr_typol(self):
+ return '.type = YNL_PT_FLAG, '
+
+ def attr_put(self, ri, var):
+ self._attr_put_line(ri, var, f"mnl_attr_put(nlh, {self.enum_name}, 0, NULL)")
+
+ def _attr_get(self, ri, var):
+ return [], None, None
+
+ def _setter_lines(self, ri, member, presence):
+ return []
+
+
+class TypeString(Type):
+ def arg_member(self, ri):
+ return [f"const char *{self.c_name}"]
+
+ def presence_type(self):
+ return 'len'
+
+ def struct_member(self, ri):
+ ri.cw.p(f"char *{self.c_name};")
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NUL_STR, '
+
+ def _attr_policy(self, policy):
+ mem = '{ .type = ' + policy
+ if 'max-len' in self.checks:
+ mem += ', .len = ' + str(self.checks['max-len'])
+ mem += ', }'
+ return mem
+
+ def attr_policy(self, cw):
+ if self.checks.get('unterminated-ok', False):
+ policy = 'NLA_STRING'
+ else:
+ policy = 'NLA_NUL_STRING'
+
+ spec = self._attr_policy(policy)
+ cw.p(f"\t[{self.enum_name}] = {spec},")
+
+ def attr_put(self, ri, var):
+ self._attr_put_simple(ri, var, 'strz')
+
+ def _attr_get(self, ri, var):
+ len_mem = var + '->_present.' + self.c_name + '_len'
+ return [f"{len_mem} = len;",
+ f"{var}->{self.c_name} = malloc(len + 1);",
+ f"memcpy({var}->{self.c_name}, mnl_attr_get_str(attr), len);",
+ f"{var}->{self.c_name}[len] = 0;"], \
+ ['len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));'], \
+ ['unsigned int len;']
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"free({member});",
+ f"{presence}_len = strlen({self.c_name});",
+ f"{member} = malloc({presence}_len + 1);",
+ f'memcpy({member}, {self.c_name}, {presence}_len);',
+ f'{member}[{presence}_len] = 0;']
+
+
+class TypeBinary(Type):
+ def arg_member(self, ri):
+ return [f"const void *{self.c_name}", 'size_t len']
+
+ def presence_type(self):
+ return 'len'
+
+ def struct_member(self, ri):
+ ri.cw.p(f"void *{self.c_name};")
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_BINARY,'
+
+ def _attr_policy(self, policy):
+ mem = '{ '
+ if len(self.checks) == 1 and 'min-len' in self.checks:
+ mem += '.len = ' + str(self.checks['min-len'])
+ elif len(self.checks) == 0:
+ mem += '.type = NLA_BINARY'
+ else:
+ raise Exception('One or more of binary type checks not implemented, yet')
+ mem += ', }'
+ return mem
+
+ def attr_put(self, ri, var):
+ self._attr_put_line(ri, var, f"mnl_attr_put(nlh, {self.enum_name}, " +
+ f"{var}->_present.{self.c_name}_len, {var}->{self.c_name})")
+
+ def _attr_get(self, ri, var):
+ len_mem = var + '->_present.' + self.c_name + '_len'
+ return [f"{len_mem} = len;",
+ f"{var}->{self.c_name} = malloc(len);",
+ f"memcpy({var}->{self.c_name}, mnl_attr_get_payload(attr), len);"], \
+ ['len = mnl_attr_get_payload_len(attr);'], \
+ ['unsigned int len;']
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"free({member});",
+ f"{member} = malloc({presence}_len);",
+ f'memcpy({member}, {self.c_name}, {presence}_len);']
+
+
+class TypeNest(Type):
+ def _complex_member_type(self, ri):
+ return f"struct {self.nested_render_name}"
+
+ def free(self, ri, var, ref):
+ ri.cw.p(f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name});')
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+
+ def _attr_policy(self, policy):
+ return 'NLA_POLICY_NESTED(' + self.nested_render_name + '_nl_policy)'
+
+ def attr_put(self, ri, var):
+ self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
+ f"{self.enum_name}, &{var}->{self.c_name})")
+
+ def _attr_get(self, ri, var):
+ get_lines = [f"{self.nested_render_name}_parse(&parg, attr);"]
+ init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
+ f"parg.data = &{var}->{self.c_name};"]
+ return get_lines, init_lines, None
+
+ def setter(self, ri, space, direction, deref=False, ref=None):
+ ref = (ref if ref else []) + [self.c_name]
+
+ for _, attr in ri.family.pure_nested_structs[self.nested_attrs].member_list():
+ attr.setter(ri, self.nested_attrs, direction, deref=deref, ref=ref)
+
+
+class TypeMultiAttr(Type):
+ def is_multi_val(self):
+ return True
+
+ def presence_type(self):
+ return 'count'
+
+ def _complex_member_type(self, ri):
+ if 'type' not in self.attr or self.attr['type'] == 'nest':
+ return f"struct {self.nested_render_name}"
+ elif self.attr['type'] in scalars:
+ scalar_pfx = '__' if ri.ku_space == 'user' else ''
+ return scalar_pfx + self.attr['type']
+ else:
+ raise Exception(f"Sub-type {self.attr['type']} not supported yet")
+
+ def free_needs_iter(self):
+ return 'type' not in self.attr or self.attr['type'] == 'nest'
+
+ def free(self, ri, var, ref):
+ if 'type' not in self.attr or self.attr['type'] == 'nest':
+ ri.cw.p(f"for (i = 0; i < {var}->{ref}n_{self.c_name}; i++)")
+ ri.cw.p(f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name}[i]);')
+
+ def _attr_typol(self):
+ if 'type' not in self.attr or self.attr['type'] == 'nest':
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+ elif self.attr['type'] in scalars:
+ return f".type = YNL_PT_U{self.attr['type'][1:]}, "
+ else:
+ raise Exception(f"Sub-type {self.attr['type']} not supported yet")
+
+ def _attr_get(self, ri, var):
+ return f'{var}->n_{self.c_name}++;', None, None
+
+
+class TypeArrayNest(Type):
+ def is_multi_val(self):
+ return True
+
+ def presence_type(self):
+ return 'count'
+
+ def _complex_member_type(self, ri):
+ if 'sub-type' not in self.attr or self.attr['sub-type'] == 'nest':
+ return f"struct {self.nested_render_name}"
+ elif self.attr['sub-type'] in scalars:
+ scalar_pfx = '__' if ri.ku_space == 'user' else ''
+ return scalar_pfx + self.attr['sub-type']
+ else:
+ raise Exception(f"Sub-type {self.attr['sub-type']} not supported yet")
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+
+ def _attr_get(self, ri, var):
+ local_vars = ['const struct nlattr *attr2;']
+ get_lines = [f'attr_{self.c_name} = attr;',
+ 'mnl_attr_for_each_nested(attr2, attr)',
+ f'\t{var}->n_{self.c_name}++;']
+ return get_lines, None, local_vars
+
+
+class TypeNestTypeValue(Type):
+ def _complex_member_type(self, ri):
+ return f"struct {self.nested_render_name}"
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+
+ def _attr_get(self, ri, var):
+ prev = 'attr'
+ tv_args = ''
+ get_lines = []
+ local_vars = []
+ init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
+ f"parg.data = &{var}->{self.c_name};"]
+ if 'type-value' in self.attr:
+ tv_names = [c_lower(x) for x in self.attr["type-value"]]
+ local_vars += [f'const struct nlattr *attr_{", *attr_".join(tv_names)};']
+ local_vars += [f'__u32 {", ".join(tv_names)};']
+ for level in self.attr["type-value"]:
+ level = c_lower(level)
+ get_lines += [f'attr_{level} = mnl_attr_get_payload({prev});']
+ get_lines += [f'{level} = mnl_attr_get_type(attr_{level});']
+ prev = 'attr_' + level
+
+ tv_args = f", {', '.join(tv_names)}"
+
+ get_lines += [f"{self.nested_render_name}_parse(&parg, {prev}{tv_args});"]
+ return get_lines, init_lines, local_vars
+
+
+class Struct:
+ def __init__(self, family, space_name, type_list=None, inherited=None):
+ self.family = family
+ self.space_name = space_name
+ self.attr_set = family.attr_sets[space_name]
+ # Use list to catch comparisons with empty sets
+ self._inherited = inherited if inherited is not None else []
+ self.inherited = []
+
+ self.nested = type_list is None
+ if family.name == c_lower(space_name):
+ self.render_name = f"{family.name}"
+ else:
+ self.render_name = f"{family.name}_{c_lower(space_name)}"
+ self.struct_name = 'struct ' + self.render_name
+ self.ptr_name = self.struct_name + ' *'
+
+ self.request = False
+ self.reply = False
+
+ self.attr_list = []
+ self.attrs = dict()
+ if type_list:
+ for t in type_list:
+ self.attr_list.append((t, self.attr_set[t]),)
+ else:
+ for t in self.attr_set:
+ self.attr_list.append((t, self.attr_set[t]),)
+
+ max_val = 0
+ self.attr_max_val = None
+ for name, attr in self.attr_list:
+ if attr.value > max_val:
+ max_val = attr.value
+ self.attr_max_val = attr
+ self.attrs[name] = attr
+
+ def __iter__(self):
+ yield from self.attrs
+
+ def __getitem__(self, key):
+ return self.attrs[key]
+
+ def member_list(self):
+ return self.attr_list
+
+ def set_inherited(self, new_inherited):
+ if self._inherited != new_inherited:
+ raise Exception("Inheriting different members not supported")
+ self.inherited = [c_lower(x) for x in sorted(self._inherited)]
+
+
+class EnumEntry:
+ def __init__(self, enum_set, yaml, prev, value_start):
+ if isinstance(yaml, str):
+ self.name = yaml
+ yaml = {}
+ self.doc = ''
+ else:
+ self.name = yaml['name']
+ self.doc = yaml.get('doc', '')
+
+ self.yaml = yaml
+ self.enum_set = enum_set
+ self.c_name = c_upper(enum_set.value_pfx + self.name)
+
+ if 'value' in yaml:
+ self.value = yaml['value']
+ if prev:
+ self.value_change = (self.value != prev.value + 1)
+ elif prev:
+ self.value_change = False
+ self.value = prev.value + 1
+ else:
+ self.value = value_start
+ self.value_change = (self.value != 0)
+
+ self.value_change = self.value_change or self.enum_set['type'] == 'flags'
+
+ def __getitem__(self, key):
+ return self.yaml[key]
+
+ def __contains__(self, key):
+ return key in self.yaml
+
+ def has_doc(self):
+ return bool(self.doc)
+
+ # raw value, i.e. the id in the enum, unlike user value which is a mask for flags
+ def raw_value(self):
+ return self.value
+
+ # user value, same as raw value for enums, for flags it's the mask
+ def user_value(self):
+ if self.enum_set['type'] == 'flags':
+ return 1 << self.value
+ else:
+ return self.value
+
+
+class EnumSet:
+ def __init__(self, family, yaml):
+ self.yaml = yaml
+ self.family = family
+
+ self.render_name = c_lower(family.name + '-' + yaml['name'])
+ self.enum_name = 'enum ' + self.render_name
+
+ self.value_pfx = yaml.get('name-prefix', f"{family.name}-{yaml['name']}-")
+
+ self.type = yaml['type']
+
+ prev_entry = None
+ value_start = self.yaml.get('value-start', 0)
+ self.entries = {}
+ self.entry_list = []
+ for entry in self.yaml['entries']:
+ e = EnumEntry(self, entry, prev_entry, value_start)
+ self.entries[e.name] = e
+ self.entry_list.append(e)
+ prev_entry = e
+
+ def __getitem__(self, key):
+ return self.yaml[key]
+
+ def __contains__(self, key):
+ return key in self.yaml
+
+ def has_doc(self):
+ if 'doc' in self.yaml:
+ return True
+ for entry in self.entry_list:
+ if entry.has_doc():
+ return True
+ return False
+
+ def get_mask(self):
+ mask = 0
+ idx = self.yaml.get('value-start', 0)
+ for _ in self.entry_list:
+ mask |= 1 << idx
+ idx += 1
+ return mask
+
+
+class AttrSet(SpecAttrSet):
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ if self.subset_of is None:
+ if 'name-prefix' in yaml:
+ pfx = yaml['name-prefix']
+ elif self.name == family.name:
+ pfx = family.name + '-a-'
+ else:
+ pfx = f"{family.name}-a-{self.name}-"
+ self.name_prefix = c_upper(pfx)
+ self.max_name = c_upper(self.yaml.get('attr-max-name', f"{self.name_prefix}max"))
+ else:
+ self.name_prefix = family.attr_sets[self.subset_of].name_prefix
+ self.max_name = family.attr_sets[self.subset_of].max_name
+
+ # Added by resolve:
+ self.c_name = None
+ delattr(self, "c_name")
+
+ def resolve(self):
+ self.c_name = c_lower(self.name)
+ if self.c_name in _C_KW:
+ self.c_name += '_'
+ if self.c_name == self.family.c_name:
+ self.c_name = ''
+
+ def new_attr(self, elem, value):
+ if 'multi-attr' in elem and elem['multi-attr']:
+ return TypeMultiAttr(self.family, self, elem, value)
+ elif elem['type'] in scalars:
+ return TypeScalar(self.family, self, elem, value)
+ elif elem['type'] == 'unused':
+ return TypeUnused(self.family, self, elem, value)
+ elif elem['type'] == 'pad':
+ return TypePad(self.family, self, elem, value)
+ elif elem['type'] == 'flag':
+ return TypeFlag(self.family, self, elem, value)
+ elif elem['type'] == 'string':
+ return TypeString(self.family, self, elem, value)
+ elif elem['type'] == 'binary':
+ return TypeBinary(self.family, self, elem, value)
+ elif elem['type'] == 'nest':
+ return TypeNest(self.family, self, elem, value)
+ elif elem['type'] == 'array-nest':
+ return TypeArrayNest(self.family, self, elem, value)
+ elif elem['type'] == 'nest-type-value':
+ return TypeNestTypeValue(self.family, self, elem, value)
+ else:
+ raise Exception(f"No typed class for type {elem['type']}")
+
+
+class Operation(SpecOperation):
+ def __init__(self, family, yaml, req_value, rsp_value):
+ super().__init__(family, yaml, req_value, rsp_value)
+
+ if req_value != rsp_value:
+ raise Exception("Directional messages not supported by codegen")
+
+ self.render_name = family.name + '_' + c_lower(self.name)
+
+ self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \
+ ('dump' in yaml and 'request' in yaml['dump'])
+
+ # Added by resolve:
+ self.enum_name = None
+ delattr(self, "enum_name")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if not self.is_async:
+ self.enum_name = self.family.op_prefix + c_upper(self.name)
+ else:
+ self.enum_name = self.family.async_op_prefix + c_upper(self.name)
+
+ def add_notification(self, op):
+ if 'notify' not in self.yaml:
+ self.yaml['notify'] = dict()
+ self.yaml['notify']['reply'] = self.yaml['do']['reply']
+ self.yaml['notify']['cmds'] = []
+ self.yaml['notify']['cmds'].append(op)
+
+
+class Family(SpecFamily):
+ def __init__(self, file_name):
+ # Added by resolve:
+ self.c_name = None
+ delattr(self, "c_name")
+ self.op_prefix = None
+ delattr(self, "op_prefix")
+ self.async_op_prefix = None
+ delattr(self, "async_op_prefix")
+ self.mcgrps = None
+ delattr(self, "mcgrps")
+ self.consts = None
+ delattr(self, "consts")
+ self.hooks = None
+ delattr(self, "hooks")
+
+ super().__init__(file_name)
+
+ self.fam_key = c_upper(self.yaml.get('c-family-name', self.yaml["name"] + '_FAMILY_NAME'))
+ self.ver_key = c_upper(self.yaml.get('c-version-name', self.yaml["name"] + '_FAMILY_VERSION'))
+
+ if 'definitions' not in self.yaml:
+ self.yaml['definitions'] = []
+
+ if 'uapi-header' in self.yaml:
+ self.uapi_header = self.yaml['uapi-header']
+ else:
+ self.uapi_header = f"linux/{self.name}.h"
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if self.yaml.get('protocol', 'genetlink') not in {'genetlink', 'genetlink-c', 'genetlink-legacy'}:
+ raise Exception("Codegen only supported for genetlink")
+
+ self.c_name = c_lower(self.name)
+ if 'name-prefix' in self.yaml['operations']:
+ self.op_prefix = c_upper(self.yaml['operations']['name-prefix'])
+ else:
+ self.op_prefix = c_upper(self.yaml['name'] + '-cmd-')
+ if 'async-prefix' in self.yaml['operations']:
+ self.async_op_prefix = c_upper(self.yaml['operations']['async-prefix'])
+ else:
+ self.async_op_prefix = self.op_prefix
+
+ self.mcgrps = self.yaml.get('mcast-groups', {'list': []})
+
+ self.consts = dict()
+
+ self.hooks = dict()
+ for when in ['pre', 'post']:
+ self.hooks[when] = dict()
+ for op_mode in ['do', 'dump']:
+ self.hooks[when][op_mode] = dict()
+ self.hooks[when][op_mode]['set'] = set()
+ self.hooks[when][op_mode]['list'] = []
+
+ # dict space-name -> 'request': set(attrs), 'reply': set(attrs)
+ self.root_sets = dict()
+ # dict space-name -> set('request', 'reply')
+ self.pure_nested_structs = dict()
+ self.all_notify = dict()
+
+ self._mock_up_events()
+
+ self._dictify()
+ self._load_root_sets()
+ self._load_nested_sets()
+ self._load_all_notify()
+ self._load_hooks()
+
+ self.kernel_policy = self.yaml.get('kernel-policy', 'split')
+ if self.kernel_policy == 'global':
+ self._load_global_policy()
+
+ def new_attr_set(self, elem):
+ return AttrSet(self, elem)
+
+ def new_operation(self, elem, req_value, rsp_value):
+ return Operation(self, elem, req_value, rsp_value)
+
+ # Fake a 'do' equivalent of all events, so that we can render their response parsing
+ def _mock_up_events(self):
+ for op in self.yaml['operations']['list']:
+ if 'event' in op:
+ op['do'] = {
+ 'reply': {
+ 'attributes': op['event']['attributes']
+ }
+ }
+
+ def _dictify(self):
+ for elem in self.yaml['definitions']:
+ if elem['type'] == 'enum' or elem['type'] == 'flags':
+ self.consts[elem['name']] = EnumSet(self, elem)
+ else:
+ self.consts[elem['name']] = elem
+
+ ntf = []
+ for msg in self.msgs.values():
+ if 'notify' in msg:
+ ntf.append(msg)
+ for n in ntf:
+ self.ops[n['notify']].add_notification(n)
+
+ def _load_root_sets(self):
+ for op_name, op in self.ops.items():
+ if 'attribute-set' not in op:
+ continue
+
+ req_attrs = set()
+ rsp_attrs = set()
+ for op_mode in ['do', 'dump']:
+ if op_mode in op and 'request' in op[op_mode]:
+ req_attrs.update(set(op[op_mode]['request']['attributes']))
+ if op_mode in op and 'reply' in op[op_mode]:
+ rsp_attrs.update(set(op[op_mode]['reply']['attributes']))
+
+ if op['attribute-set'] not in self.root_sets:
+ self.root_sets[op['attribute-set']] = {'request': req_attrs, 'reply': rsp_attrs}
+ else:
+ self.root_sets[op['attribute-set']]['request'].update(req_attrs)
+ self.root_sets[op['attribute-set']]['reply'].update(rsp_attrs)
+
+ def _load_nested_sets(self):
+ for root_set, rs_members in self.root_sets.items():
+ for attr, spec in self.attr_sets[root_set].items():
+ if 'nested-attributes' in spec:
+ inherit = set()
+ nested = spec['nested-attributes']
+ if nested not in self.root_sets:
+ self.pure_nested_structs[nested] = Struct(self, nested, inherited=inherit)
+ if attr in rs_members['request']:
+ self.pure_nested_structs[nested].request = True
+ if attr in rs_members['reply']:
+ self.pure_nested_structs[nested].reply = True
+
+ if 'type-value' in spec:
+ if nested in self.root_sets:
+ raise Exception("Inheriting members to a space used as root not supported")
+ inherit.update(set(spec['type-value']))
+ elif spec['type'] == 'array-nest':
+ inherit.add('idx')
+ self.pure_nested_structs[nested].set_inherited(inherit)
+
+ def _load_all_notify(self):
+ for op_name, op in self.ops.items():
+ if not op:
+ continue
+
+ if 'notify' in op:
+ self.all_notify[op_name] = op['notify']['cmds']
+
+ def _load_global_policy(self):
+ global_set = set()
+ attr_set_name = None
+ for op_name, op in self.ops.items():
+ if not op:
+ continue
+ if 'attribute-set' not in op:
+ continue
+
+ if attr_set_name is None:
+ attr_set_name = op['attribute-set']
+ if attr_set_name != op['attribute-set']:
+ raise Exception('For a global policy all ops must use the same set')
+
+ for op_mode in ['do', 'dump']:
+ if op_mode in op:
+ global_set.update(op[op_mode].get('request', []))
+
+ self.global_policy = []
+ self.global_policy_set = attr_set_name
+ for attr in self.attr_sets[attr_set_name]:
+ if attr in global_set:
+ self.global_policy.append(attr)
+
+ def _load_hooks(self):
+ for op in self.ops.values():
+ for op_mode in ['do', 'dump']:
+ if op_mode not in op:
+ continue
+ for when in ['pre', 'post']:
+ if when not in op[op_mode]:
+ continue
+ name = op[op_mode][when]
+ if name in self.hooks[when][op_mode]['set']:
+ continue
+ self.hooks[when][op_mode]['set'].add(name)
+ self.hooks[when][op_mode]['list'].append(name)
+
+
+class RenderInfo:
+ def __init__(self, cw, family, ku_space, op, op_name, op_mode, attr_set=None):
+ self.family = family
+ self.nl = cw.nlib
+ self.ku_space = ku_space
+ self.op = op
+ self.op_name = op_name
+ self.op_mode = op_mode
+
+ # 'do' and 'dump' response parsing is identical
+ if op_mode != 'do' and 'dump' in op and 'do' in op and 'reply' in op['do'] and \
+ op["do"]["reply"] == op["dump"]["reply"]:
+ self.type_consistent = True
+ else:
+ self.type_consistent = op_mode == 'event'
+
+ self.attr_set = attr_set
+ if not self.attr_set:
+ self.attr_set = op['attribute-set']
+
+ if op:
+ self.type_name = c_lower(op_name)
+ else:
+ self.type_name = c_lower(attr_set)
+
+ self.cw = cw
+
+ self.struct = dict()
+ for op_dir in ['request', 'reply']:
+ if op and op_dir in op[op_mode]:
+ self.struct[op_dir] = Struct(family, self.attr_set,
+ type_list=op[op_mode][op_dir]['attributes'])
+ if op_mode == 'event':
+ self.struct['reply'] = Struct(family, self.attr_set, type_list=op['event']['attributes'])
+
+
+class CodeWriter:
+ def __init__(self, nlib, out_file):
+ self.nlib = nlib
+
+ self._nl = False
+ self._silent_block = False
+ self._ind = 0
+ self._out = out_file
+
+ @classmethod
+ def _is_cond(cls, line):
+ return line.startswith('if') or line.startswith('while') or line.startswith('for')
+
+ def p(self, line, add_ind=0):
+ if self._nl:
+ self._out.write('\n')
+ self._nl = False
+ ind = self._ind
+ if line[-1] == ':':
+ ind -= 1
+ if self._silent_block:
+ ind += 1
+ self._silent_block = line.endswith(')') and CodeWriter._is_cond(line)
+ if add_ind:
+ ind += add_ind
+ self._out.write('\t' * ind + line + '\n')
+
+ def nl(self):
+ self._nl = True
+
+ def block_start(self, line=''):
+ if line:
+ line = line + ' '
+ self.p(line + '{')
+ self._ind += 1
+
+ def block_end(self, line=''):
+ if line and line[0] not in {';', ','}:
+ line = ' ' + line
+ self._ind -= 1
+ self.p('}' + line)
+
+ def write_doc_line(self, doc, indent=True):
+ words = doc.split()
+ line = ' *'
+ for word in words:
+ if len(line) + len(word) >= 79:
+ self.p(line)
+ line = ' *'
+ if indent:
+ line += ' '
+ line += ' ' + word
+ self.p(line)
+
+ def write_func_prot(self, qual_ret, name, args=None, doc=None, suffix=''):
+ if not args:
+ args = ['void']
+
+ if doc:
+ self.p('/*')
+ self.p(' * ' + doc)
+ self.p(' */')
+
+ oneline = qual_ret
+ if qual_ret[-1] != '*':
+ oneline += ' '
+ oneline += f"{name}({', '.join(args)}){suffix}"
+
+ if len(oneline) < 80:
+ self.p(oneline)
+ return
+
+ v = qual_ret
+ if len(v) > 3:
+ self.p(v)
+ v = ''
+ elif qual_ret[-1] != '*':
+ v += ' '
+ v += name + '('
+ ind = '\t' * (len(v) // 8) + ' ' * (len(v) % 8)
+ delta_ind = len(v) - len(ind)
+ v += args[0]
+ i = 1
+ while i < len(args):
+ next_len = len(v) + len(args[i])
+ if v[0] == '\t':
+ next_len += delta_ind
+ if next_len > 76:
+ self.p(v + ',')
+ v = ind
+ else:
+ v += ', '
+ v += args[i]
+ i += 1
+ self.p(v + ')' + suffix)
+
+ def write_func_lvar(self, local_vars):
+ if not local_vars:
+ return
+
+ if type(local_vars) is str:
+ local_vars = [local_vars]
+
+ local_vars.sort(key=len, reverse=True)
+ for var in local_vars:
+ self.p(var)
+ self.nl()
+
+ def write_func(self, qual_ret, name, body, args=None, local_vars=None):
+ self.write_func_prot(qual_ret=qual_ret, name=name, args=args)
+ self.write_func_lvar(local_vars=local_vars)
+
+ self.block_start()
+ for line in body:
+ self.p(line)
+ self.block_end()
+
+ def writes_defines(self, defines):
+ longest = 0
+ for define in defines:
+ if len(define[0]) > longest:
+ longest = len(define[0])
+ longest = ((longest + 8) // 8) * 8
+ for define in defines:
+ line = '#define ' + define[0]
+ line += '\t' * ((longest - len(define[0]) + 7) // 8)
+ if type(define[1]) is int:
+ line += str(define[1])
+ elif type(define[1]) is str:
+ line += '"' + define[1] + '"'
+ self.p(line)
+
+ def write_struct_init(self, members):
+ longest = max([len(x[0]) for x in members])
+ longest += 1 # because we prepend a .
+ longest = ((longest + 8) // 8) * 8
+ for one in members:
+ line = '.' + one[0]
+ line += '\t' * ((longest - len(one[0]) - 1 + 7) // 8)
+ line += '= ' + one[1] + ','
+ self.p(line)
+
+
+scalars = {'u8', 'u16', 'u32', 'u64', 's32', 's64'}
+
+direction_to_suffix = {
+ 'reply': '_rsp',
+ 'request': '_req',
+ '': ''
+}
+
+op_mode_to_wrapper = {
+ 'do': '',
+ 'dump': '_list',
+ 'notify': '_ntf',
+ 'event': '',
+}
+
+_C_KW = {
+ 'do'
+}
+
+
+def rdir(direction):
+ if direction == 'reply':
+ return 'request'
+ if direction == 'request':
+ return 'reply'
+ return direction
+
+
+def op_prefix(ri, direction, deref=False):
+ suffix = f"_{ri.type_name}"
+
+ if not ri.op_mode or ri.op_mode == 'do':
+ suffix += f"{direction_to_suffix[direction]}"
+ else:
+ if direction == 'request':
+ suffix += '_req_dump'
+ else:
+ if ri.type_consistent:
+ if deref:
+ suffix += f"{direction_to_suffix[direction]}"
+ else:
+ suffix += op_mode_to_wrapper[ri.op_mode]
+ else:
+ suffix += '_rsp'
+ suffix += '_dump' if deref else '_list'
+
+ return f"{ri.family['name']}{suffix}"
+
+
+def type_name(ri, direction, deref=False):
+ return f"struct {op_prefix(ri, direction, deref=deref)}"
+
+
+def print_prototype(ri, direction, terminate=True, doc=None):
+ suffix = ';' if terminate else ''
+
+ fname = ri.op.render_name
+ if ri.op_mode == 'dump':
+ fname += '_dump'
+
+ args = ['struct ynl_sock *ys']
+ if 'request' in ri.op[ri.op_mode]:
+ args.append(f"{type_name(ri, direction)} *" + f"{direction_to_suffix[direction][1:]}")
+
+ ret = 'int'
+ if 'reply' in ri.op[ri.op_mode]:
+ ret = f"{type_name(ri, rdir(direction))} *"
+
+ ri.cw.write_func_prot(ret, fname, args, doc=doc, suffix=suffix)
+
+
+def print_req_prototype(ri):
+ print_prototype(ri, "request", doc=ri.op['doc'])
+
+
+def print_dump_prototype(ri):
+ print_prototype(ri, "request")
+
+
+def put_typol_fwd(cw, struct):
+ cw.p(f'extern struct ynl_policy_nest {struct.render_name}_nest;')
+
+
+def put_typol(cw, struct):
+ type_max = struct.attr_set.max_name
+ cw.block_start(line=f'struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =')
+
+ for _, arg in struct.member_list():
+ arg.attr_typol(cw)
+
+ cw.block_end(line=';')
+ cw.nl()
+
+ cw.block_start(line=f'struct ynl_policy_nest {struct.render_name}_nest =')
+ cw.p(f'.max_attr = {type_max},')
+ cw.p(f'.table = {struct.render_name}_policy,')
+ cw.block_end(line=';')
+ cw.nl()
+
+
+def put_req_nested(ri, struct):
+ func_args = ['struct nlmsghdr *nlh',
+ 'unsigned int attr_type',
+ f'{struct.ptr_name}obj']
+
+ ri.cw.write_func_prot('int', f'{struct.render_name}_put', func_args)
+ ri.cw.block_start()
+ ri.cw.write_func_lvar('struct nlattr *nest;')
+
+ ri.cw.p("nest = mnl_attr_nest_start(nlh, attr_type);")
+
+ for _, arg in struct.member_list():
+ arg.attr_put(ri, "obj")
+
+ ri.cw.p("mnl_attr_nest_end(nlh, nest);")
+
+ ri.cw.nl()
+ ri.cw.p('return 0;')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def _multi_parse(ri, struct, init_lines, local_vars):
+ if struct.nested:
+ iter_line = "mnl_attr_for_each_nested(attr, nested)"
+ else:
+ iter_line = "mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr))"
+
+ array_nests = set()
+ multi_attrs = set()
+ needs_parg = False
+ for arg, aspec in struct.member_list():
+ if aspec['type'] == 'array-nest':
+ local_vars.append(f'const struct nlattr *attr_{aspec.c_name};')
+ array_nests.add(arg)
+ if 'multi-attr' in aspec:
+ multi_attrs.add(arg)
+ needs_parg |= 'nested-attributes' in aspec
+ if array_nests or multi_attrs:
+ local_vars.append('int i;')
+ if needs_parg:
+ local_vars.append('struct ynl_parse_arg parg;')
+ init_lines.append('parg.ys = yarg->ys;')
+
+ ri.cw.block_start()
+ ri.cw.write_func_lvar(local_vars)
+
+ for line in init_lines:
+ ri.cw.p(line)
+ ri.cw.nl()
+
+ for arg in struct.inherited:
+ ri.cw.p(f'dst->{arg} = {arg};')
+
+ ri.cw.nl()
+ ri.cw.block_start(line=iter_line)
+
+ first = True
+ for _, arg in struct.member_list():
+ arg.attr_get(ri, 'dst', first=first)
+ first = False
+
+ ri.cw.block_end()
+ ri.cw.nl()
+
+ for anest in sorted(array_nests):
+ aspec = struct[anest]
+
+ ri.cw.block_start(line=f"if (dst->n_{aspec.c_name})")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc(dst->n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p('i = 0;')
+ ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
+ ri.cw.block_start(line=f"mnl_attr_for_each_nested(attr, attr_{aspec.c_name})")
+ ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
+ ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, mnl_attr_get_type(attr)))")
+ ri.cw.p('return MNL_CB_ERROR;')
+ ri.cw.p('i++;')
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.nl()
+
+ for anest in sorted(multi_attrs):
+ aspec = struct[anest]
+ ri.cw.block_start(line=f"if (dst->n_{aspec.c_name})")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc(dst->n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p('i = 0;')
+ if 'nested-attributes' in aspec:
+ ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
+ ri.cw.block_start(line=iter_line)
+ ri.cw.block_start(line=f"if (mnl_attr_get_type(attr) == {aspec.enum_name})")
+ if 'nested-attributes' in aspec:
+ ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
+ ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr))")
+ ri.cw.p('return MNL_CB_ERROR;')
+ elif aspec['type'] in scalars:
+ t = aspec['type']
+ if t[0] == 's':
+ t = 'u' + t[1:]
+ ri.cw.p(f"dst->{aspec.c_name}[i] = mnl_attr_get_{t}(attr);")
+ else:
+ raise Exception('Nest parsing type not supported yet')
+ ri.cw.p('i++;')
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.nl()
+
+ if struct.nested:
+ ri.cw.p('return 0;')
+ else:
+ ri.cw.p('return MNL_CB_OK;')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def parse_rsp_nested(ri, struct):
+ func_args = ['struct ynl_parse_arg *yarg',
+ 'const struct nlattr *nested']
+ for arg in struct.inherited:
+ func_args.append('__u32 ' + arg)
+
+ local_vars = ['const struct nlattr *attr;',
+ f'{struct.ptr_name}dst = yarg->data;']
+ init_lines = []
+
+ ri.cw.write_func_prot('int', f'{struct.render_name}_parse', func_args)
+
+ _multi_parse(ri, struct, init_lines, local_vars)
+
+
+def parse_rsp_msg(ri, deref=False):
+ if 'reply' not in ri.op[ri.op_mode] and ri.op_mode != 'event':
+ return
+
+ func_args = ['const struct nlmsghdr *nlh',
+ 'void *data']
+
+ local_vars = [f'{type_name(ri, "reply", deref=deref)} *dst;',
+ 'struct ynl_parse_arg *yarg = data;',
+ 'const struct nlattr *attr;']
+ init_lines = ['dst = yarg->data;']
+
+ ri.cw.write_func_prot('int', f'{op_prefix(ri, "reply", deref=deref)}_parse', func_args)
+
+ _multi_parse(ri, ri.struct["reply"], init_lines, local_vars)
+
+
+def print_req(ri):
+ ret_ok = '0'
+ ret_err = '-1'
+ direction = "request"
+ local_vars = ['struct nlmsghdr *nlh;',
+ 'int len, err;']
+
+ if 'reply' in ri.op[ri.op_mode]:
+ ret_ok = 'rsp'
+ ret_err = 'NULL'
+ local_vars += [f'{type_name(ri, rdir(direction))} *rsp;',
+ 'struct ynl_parse_arg yarg = { .ys = ys, };']
+
+ print_prototype(ri, direction, terminate=False)
+ ri.cw.block_start()
+ ri.cw.write_func_lvar(local_vars)
+
+ ri.cw.p(f"nlh = ynl_gemsg_start_req(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
+
+ ri.cw.p(f"ys->req_policy = &{ri.struct['request'].render_name}_nest;")
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p(f"yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ ri.cw.nl()
+ for _, attr in ri.struct["request"].member_list():
+ attr.attr_put(ri, "req")
+ ri.cw.nl()
+
+ ri.cw.p('err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len);')
+ ri.cw.p('if (err < 0)')
+ ri.cw.p(f"return {ret_err};")
+ ri.cw.nl()
+ ri.cw.p('len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);')
+ ri.cw.p('if (len < 0)')
+ ri.cw.p(f"return {ret_err};")
+ ri.cw.nl()
+
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p('rsp = calloc(1, sizeof(*rsp));')
+ ri.cw.p('yarg.data = rsp;')
+ ri.cw.nl()
+ ri.cw.p(f"err = {ri.nl.parse_cb_run(op_prefix(ri, 'reply') + '_parse', '&yarg', False)};")
+ ri.cw.p('if (err < 0)')
+ ri.cw.p('goto err_free;')
+ ri.cw.nl()
+
+ ri.cw.p('err = ynl_recv_ack(ys, err);')
+ ri.cw.p('if (err)')
+ ri.cw.p('goto err_free;')
+ ri.cw.nl()
+ ri.cw.p(f"return {ret_ok};")
+ ri.cw.nl()
+ ri.cw.p('err_free:')
+
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p(f"{call_free(ri, rdir(direction), 'rsp')}")
+ ri.cw.p(f"return {ret_err};")
+ ri.cw.block_end()
+
+
+def print_dump(ri):
+ direction = "request"
+ print_prototype(ri, direction, terminate=False)
+ ri.cw.block_start()
+ local_vars = ['struct ynl_dump_state yds = {};',
+ 'struct nlmsghdr *nlh;',
+ 'int len, err;']
+
+ for var in local_vars:
+ ri.cw.p(f'{var}')
+ ri.cw.nl()
+
+ ri.cw.p('yds.ys = ys;')
+ ri.cw.p(f"yds.alloc_sz = sizeof({type_name(ri, rdir(direction))});")
+ ri.cw.p(f"yds.cb = {op_prefix(ri, 'reply', deref=True)}_parse;")
+ ri.cw.p(f"yds.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ ri.cw.nl()
+ ri.cw.p(f"nlh = ynl_gemsg_start_dump(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
+
+ if "request" in ri.op[ri.op_mode]:
+ ri.cw.p(f"ys->req_policy = &{ri.struct['request'].render_name}_nest;")
+ ri.cw.nl()
+ for _, attr in ri.struct["request"].member_list():
+ attr.attr_put(ri, "req")
+ ri.cw.nl()
+
+ ri.cw.p('err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len);')
+ ri.cw.p('if (err < 0)')
+ ri.cw.p('return NULL;')
+ ri.cw.nl()
+
+ ri.cw.block_start(line='do')
+ ri.cw.p('len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);')
+ ri.cw.p('if (len < 0)')
+ ri.cw.p('goto free_list;')
+ ri.cw.nl()
+ ri.cw.p(f"err = {ri.nl.parse_cb_run('ynl_dump_trampoline', '&yds', False, indent=2)};")
+ ri.cw.p('if (err < 0)')
+ ri.cw.p('goto free_list;')
+ ri.cw.block_end(line='while (err > 0);')
+ ri.cw.nl()
+
+ ri.cw.p('return yds.first;')
+ ri.cw.nl()
+ ri.cw.p('free_list:')
+ ri.cw.p(call_free(ri, rdir(direction), 'yds.first'))
+ ri.cw.p('return NULL;')
+ ri.cw.block_end()
+
+
+def call_free(ri, direction, var):
+ return f"{op_prefix(ri, direction)}_free({var});"
+
+
+def free_arg_name(direction):
+ if direction:
+ return direction_to_suffix[direction][1:]
+ return 'obj'
+
+
+def print_free_prototype(ri, direction, suffix=';'):
+ name = op_prefix(ri, direction)
+ arg = free_arg_name(direction)
+ ri.cw.write_func_prot('void', f"{name}_free", [f"struct {name} *{arg}"], suffix=suffix)
+
+
+def _print_type(ri, direction, struct):
+ suffix = f'_{ri.type_name}{direction_to_suffix[direction]}'
+
+ if ri.op_mode == 'dump':
+ suffix += '_dump'
+
+ ri.cw.block_start(line=f"struct {ri.family['name']}{suffix}")
+
+ meta_started = False
+ for _, attr in struct.member_list():
+ for type_filter in ['len', 'bit']:
+ line = attr.presence_member(ri.ku_space, type_filter)
+ if line:
+ if not meta_started:
+ ri.cw.block_start(line=f"struct")
+ meta_started = True
+ ri.cw.p(line)
+ if meta_started:
+ ri.cw.block_end(line='_present;')
+ ri.cw.nl()
+
+ for arg in struct.inherited:
+ ri.cw.p(f"__u32 {arg};")
+
+ for _, attr in struct.member_list():
+ attr.struct_member(ri)
+
+ ri.cw.block_end(line=';')
+ ri.cw.nl()
+
+
+def print_type(ri, direction):
+ _print_type(ri, direction, ri.struct[direction])
+
+
+def print_type_full(ri, struct):
+ _print_type(ri, "", struct)
+
+
+def print_type_helpers(ri, direction, deref=False):
+ print_free_prototype(ri, direction)
+
+ if ri.ku_space == 'user' and direction == 'request':
+ for _, attr in ri.struct[direction].member_list():
+ attr.setter(ri, ri.attr_set, direction, deref=deref)
+ ri.cw.nl()
+
+
+def print_req_type_helpers(ri):
+ print_type_helpers(ri, "request")
+
+
+def print_rsp_type_helpers(ri):
+ if 'reply' not in ri.op[ri.op_mode]:
+ return
+ print_type_helpers(ri, "reply")
+
+
+def print_parse_prototype(ri, direction, terminate=True):
+ suffix = "_rsp" if direction == "reply" else "_req"
+ term = ';' if terminate else ''
+
+ ri.cw.write_func_prot('void', f"{ri.op.render_name}{suffix}_parse",
+ ['const struct nlattr **tb',
+ f"struct {ri.op.render_name}{suffix} *req"],
+ suffix=term)
+
+
+def print_req_type(ri):
+ print_type(ri, "request")
+
+
+def print_rsp_type(ri):
+ if (ri.op_mode == 'do' or ri.op_mode == 'dump') and 'reply' in ri.op[ri.op_mode]:
+ direction = 'reply'
+ elif ri.op_mode == 'event':
+ direction = 'reply'
+ else:
+ return
+ print_type(ri, direction)
+
+
+def print_wrapped_type(ri):
+ ri.cw.block_start(line=f"{type_name(ri, 'reply')}")
+ if ri.op_mode == 'dump':
+ ri.cw.p(f"{type_name(ri, 'reply')} *next;")
+ elif ri.op_mode == 'notify' or ri.op_mode == 'event':
+ ri.cw.p('__u16 family;')
+ ri.cw.p('__u8 cmd;')
+ ri.cw.p(f"void (*free)({type_name(ri, 'reply')} *ntf);")
+ ri.cw.p(f"{type_name(ri, 'reply', deref=True)} obj __attribute__ ((aligned (8)));")
+ ri.cw.block_end(line=';')
+ ri.cw.nl()
+ print_free_prototype(ri, 'reply')
+ ri.cw.nl()
+
+
+def _free_type_members_iter(ri, struct):
+ for _, attr in struct.member_list():
+ if attr.free_needs_iter():
+ ri.cw.p('unsigned int i;')
+ ri.cw.nl()
+ break
+
+
+def _free_type_members(ri, var, struct, ref=''):
+ for _, attr in struct.member_list():
+ attr.free(ri, var, ref)
+
+
+def _free_type(ri, direction, struct):
+ var = free_arg_name(direction)
+
+ print_free_prototype(ri, direction, suffix='')
+ ri.cw.block_start()
+ _free_type_members_iter(ri, struct)
+ _free_type_members(ri, var, struct)
+ if direction:
+ ri.cw.p(f'free({var});')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def free_rsp_nested(ri, struct):
+ _free_type(ri, "", struct)
+
+
+def print_rsp_free(ri):
+ if 'reply' not in ri.op[ri.op_mode]:
+ return
+ _free_type(ri, 'reply', ri.struct['reply'])
+
+
+def print_dump_type_free(ri):
+ sub_type = type_name(ri, 'reply')
+
+ print_free_prototype(ri, 'reply', suffix='')
+ ri.cw.block_start()
+ ri.cw.p(f"{sub_type} *next = rsp;")
+ ri.cw.nl()
+ ri.cw.block_start(line='while (next)')
+ _free_type_members_iter(ri, ri.struct['reply'])
+ ri.cw.p('rsp = next;')
+ ri.cw.p('next = rsp->next;')
+ ri.cw.nl()
+
+ _free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
+ ri.cw.p(f'free(rsp);')
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def print_ntf_type_free(ri):
+ print_free_prototype(ri, 'reply', suffix='')
+ ri.cw.block_start()
+ _free_type_members_iter(ri, ri.struct['reply'])
+ _free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
+ ri.cw.p(f'free(rsp);')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def print_ntf_parse_prototype(family, cw, suffix=';'):
+ cw.write_func_prot('struct ynl_ntf_base_type *', f"{family['name']}_ntf_parse",
+ ['struct ynl_sock *ys'], suffix=suffix)
+
+
+def print_ntf_type_parse(family, cw, ku_mode):
+ print_ntf_parse_prototype(family, cw, suffix='')
+ cw.block_start()
+ cw.write_func_lvar(['struct genlmsghdr *genlh;',
+ 'struct nlmsghdr *nlh;',
+ 'struct ynl_parse_arg yarg = { .ys = ys, };',
+ 'struct ynl_ntf_base_type *rsp;',
+ 'int len, err;',
+ 'mnl_cb_t parse;'])
+ cw.p('len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);')
+ cw.p('if (len < (ssize_t)(sizeof(*nlh) + sizeof(*genlh)))')
+ cw.p('return NULL;')
+ cw.nl()
+ cw.p('nlh = (struct nlmsghdr *)ys->rx_buf;')
+ cw.p('genlh = mnl_nlmsg_get_payload(nlh);')
+ cw.nl()
+ cw.block_start(line='switch (genlh->cmd)')
+ for ntf_op in sorted(family.all_notify.keys()):
+ op = family.ops[ntf_op]
+ ri = RenderInfo(cw, family, ku_mode, op, ntf_op, "notify")
+ for ntf in op['notify']['cmds']:
+ cw.p(f"case {ntf.enum_name}:")
+ cw.p(f"rsp = calloc(1, sizeof({type_name(ri, 'notify')}));")
+ cw.p(f"parse = {op_prefix(ri, 'reply', deref=True)}_parse;")
+ cw.p(f"yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ cw.p(f"rsp->free = (void *){op_prefix(ri, 'notify')}_free;")
+ cw.p('break;')
+ for op_name, op in family.ops.items():
+ if 'event' not in op:
+ continue
+ ri = RenderInfo(cw, family, ku_mode, op, op_name, "event")
+ cw.p(f"case {op.enum_name}:")
+ cw.p(f"rsp = calloc(1, sizeof({type_name(ri, 'event')}));")
+ cw.p(f"parse = {op_prefix(ri, 'reply', deref=True)}_parse;")
+ cw.p(f"yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ cw.p(f"rsp->free = (void *){op_prefix(ri, 'notify')}_free;")
+ cw.p('break;')
+ cw.p('default:')
+ cw.p('ynl_error_unknown_notification(ys, genlh->cmd);')
+ cw.p('return NULL;')
+ cw.block_end()
+ cw.nl()
+ cw.p('yarg.data = rsp->data;')
+ cw.nl()
+ cw.p(f"err = {cw.nlib.parse_cb_run('parse', '&yarg', True)};")
+ cw.p('if (err < 0)')
+ cw.p('goto err_free;')
+ cw.nl()
+ cw.p('rsp->family = nlh->nlmsg_type;')
+ cw.p('rsp->cmd = genlh->cmd;')
+ cw.p('return rsp;')
+ cw.nl()
+ cw.p('err_free:')
+ cw.p('free(rsp);')
+ cw.p('return NULL;')
+ cw.block_end()
+ cw.nl()
+
+
+def print_req_policy_fwd(cw, struct, ri=None, terminate=True):
+ if terminate and ri and kernel_can_gen_family_struct(struct.family):
+ return
+
+ if terminate:
+ prefix = 'extern '
+ else:
+ if kernel_can_gen_family_struct(struct.family) and ri:
+ prefix = 'static '
+ else:
+ prefix = ''
+
+ suffix = ';' if terminate else ' = {'
+
+ max_attr = struct.attr_max_val
+ if ri:
+ name = ri.op.render_name
+ if ri.op.dual_policy:
+ name += '_' + ri.op_mode
+ else:
+ name = struct.render_name
+ cw.p(f"{prefix}const struct nla_policy {name}_nl_policy[{max_attr.enum_name} + 1]{suffix}")
+
+
+def print_req_policy(cw, struct, ri=None):
+ print_req_policy_fwd(cw, struct, ri=ri, terminate=False)
+ for _, arg in struct.member_list():
+ arg.attr_policy(cw)
+ cw.p("};")
+
+
+def kernel_can_gen_family_struct(family):
+ return family.proto == 'genetlink'
+
+
+def print_kernel_op_table_fwd(family, cw, terminate):
+ exported = not kernel_can_gen_family_struct(family)
+
+ if not terminate or exported:
+ cw.p(f"/* Ops table for {family.name} */")
+
+ pol_to_struct = {'global': 'genl_small_ops',
+ 'per-op': 'genl_ops',
+ 'split': 'genl_split_ops'}
+ struct_type = pol_to_struct[family.kernel_policy]
+
+ if family.kernel_policy == 'split':
+ cnt = 0
+ for op in family.ops.values():
+ if 'do' in op:
+ cnt += 1
+ if 'dump' in op:
+ cnt += 1
+ else:
+ cnt = len(family.ops)
+
+ qual = 'static const' if not exported else 'const'
+ line = f"{qual} struct {struct_type} {family.name}_nl_ops[{cnt}]"
+ if terminate:
+ cw.p(f"extern {line};")
+ else:
+ cw.block_start(line=line + ' =')
+
+ if not terminate:
+ return
+
+ cw.nl()
+ for name in family.hooks['pre']['do']['list']:
+ cw.write_func_prot('int', c_lower(name),
+ ['const struct genl_split_ops *ops',
+ 'struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
+ for name in family.hooks['post']['do']['list']:
+ cw.write_func_prot('void', c_lower(name),
+ ['const struct genl_split_ops *ops',
+ 'struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
+ for name in family.hooks['pre']['dump']['list']:
+ cw.write_func_prot('int', c_lower(name),
+ ['struct netlink_callback *cb'], suffix=';')
+ for name in family.hooks['post']['dump']['list']:
+ cw.write_func_prot('int', c_lower(name),
+ ['struct netlink_callback *cb'], suffix=';')
+
+ cw.nl()
+
+ for op_name, op in family.ops.items():
+ if op.is_async:
+ continue
+
+ if 'do' in op:
+ name = c_lower(f"{family.name}-nl-{op_name}-doit")
+ cw.write_func_prot('int', name,
+ ['struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
+
+ if 'dump' in op:
+ name = c_lower(f"{family.name}-nl-{op_name}-dumpit")
+ cw.write_func_prot('int', name,
+ ['struct sk_buff *skb', 'struct netlink_callback *cb'], suffix=';')
+ cw.nl()
+
+
+def print_kernel_op_table_hdr(family, cw):
+ print_kernel_op_table_fwd(family, cw, terminate=True)
+
+
+def print_kernel_op_table(family, cw):
+ print_kernel_op_table_fwd(family, cw, terminate=False)
+ if family.kernel_policy == 'global' or family.kernel_policy == 'per-op':
+ for op_name, op in family.ops.items():
+ if op.is_async:
+ continue
+
+ cw.block_start()
+ members = [('cmd', op.enum_name)]
+ if 'dont-validate' in op:
+ members.append(('validate',
+ ' | '.join([c_upper('genl-dont-validate-' + x)
+ for x in op['dont-validate']])), )
+ for op_mode in ['do', 'dump']:
+ if op_mode in op:
+ name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it")
+ members.append((op_mode + 'it', name))
+ if family.kernel_policy == 'per-op':
+ struct = Struct(family, op['attribute-set'],
+ type_list=op['do']['request']['attributes'])
+
+ name = c_lower(f"{family.name}-{op_name}-nl-policy")
+ members.append(('policy', name))
+ members.append(('maxattr', struct.attr_max_val.enum_name))
+ if 'flags' in op:
+ members.append(('flags', ' | '.join([c_upper('genl-' + x) for x in op['flags']])))
+ cw.write_struct_init(members)
+ cw.block_end(line=',')
+ elif family.kernel_policy == 'split':
+ cb_names = {'do': {'pre': 'pre_doit', 'post': 'post_doit'},
+ 'dump': {'pre': 'start', 'post': 'done'}}
+
+ for op_name, op in family.ops.items():
+ for op_mode in ['do', 'dump']:
+ if op.is_async or op_mode not in op:
+ continue
+
+ cw.block_start()
+ members = [('cmd', op.enum_name)]
+ if 'dont-validate' in op:
+ members.append(('validate',
+ ' | '.join([c_upper('genl-dont-validate-' + x)
+ for x in op['dont-validate']])), )
+ name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it")
+ if 'pre' in op[op_mode]:
+ members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre'])))
+ members.append((op_mode + 'it', name))
+ if 'post' in op[op_mode]:
+ members.append((cb_names[op_mode]['post'], c_lower(op[op_mode]['post'])))
+ if 'request' in op[op_mode]:
+ struct = Struct(family, op['attribute-set'],
+ type_list=op[op_mode]['request']['attributes'])
+
+ if op.dual_policy:
+ name = c_lower(f"{family.name}-{op_name}-{op_mode}-nl-policy")
+ else:
+ name = c_lower(f"{family.name}-{op_name}-nl-policy")
+ members.append(('policy', name))
+ members.append(('maxattr', struct.attr_max_val.enum_name))
+ flags = (op['flags'] if 'flags' in op else []) + ['cmd-cap-' + op_mode]
+ members.append(('flags', ' | '.join([c_upper('genl-' + x) for x in flags])))
+ cw.write_struct_init(members)
+ cw.block_end(line=',')
+
+ cw.block_end(line=';')
+ cw.nl()
+
+
+def print_kernel_mcgrp_hdr(family, cw):
+ if not family.mcgrps['list']:
+ return
+
+ cw.block_start('enum')
+ for grp in family.mcgrps['list']:
+ grp_id = c_upper(f"{family.name}-nlgrp-{grp['name']},")
+ cw.p(grp_id)
+ cw.block_end(';')
+ cw.nl()
+
+
+def print_kernel_mcgrp_src(family, cw):
+ if not family.mcgrps['list']:
+ return
+
+ cw.block_start('static const struct genl_multicast_group ' + family.name + '_nl_mcgrps[] =')
+ for grp in family.mcgrps['list']:
+ name = grp['name']
+ grp_id = c_upper(f"{family.name}-nlgrp-{name}")
+ cw.p('[' + grp_id + '] = { "' + name + '", },')
+ cw.block_end(';')
+ cw.nl()
+
+
+def print_kernel_family_struct_hdr(family, cw):
+ if not kernel_can_gen_family_struct(family):
+ return
+
+ cw.p(f"extern struct genl_family {family.name}_nl_family;")
+ cw.nl()
+
+
+def print_kernel_family_struct_src(family, cw):
+ if not kernel_can_gen_family_struct(family):
+ return
+
+ cw.block_start(f"struct genl_family {family.name}_nl_family __ro_after_init =")
+ cw.p('.name\t\t= ' + family.fam_key + ',')
+ cw.p('.version\t= ' + family.ver_key + ',')
+ cw.p('.netnsok\t= true,')
+ cw.p('.parallel_ops\t= true,')
+ cw.p('.module\t\t= THIS_MODULE,')
+ if family.kernel_policy == 'per-op':
+ cw.p(f'.ops\t\t= {family.name}_nl_ops,')
+ cw.p(f'.n_ops\t\t= ARRAY_SIZE({family.name}_nl_ops),')
+ elif family.kernel_policy == 'split':
+ cw.p(f'.split_ops\t= {family.name}_nl_ops,')
+ cw.p(f'.n_split_ops\t= ARRAY_SIZE({family.name}_nl_ops),')
+ if family.mcgrps['list']:
+ cw.p(f'.mcgrps\t\t= {family.name}_nl_mcgrps,')
+ cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.name}_nl_mcgrps),')
+ cw.block_end(';')
+
+
+def uapi_enum_start(family, cw, obj, ckey='', enum_name='enum-name'):
+ start_line = 'enum'
+ if enum_name in obj:
+ if obj[enum_name]:
+ start_line = 'enum ' + c_lower(obj[enum_name])
+ elif ckey and ckey in obj:
+ start_line = 'enum ' + family.name + '_' + c_lower(obj[ckey])
+ cw.block_start(line=start_line)
+
+
+def render_uapi(family, cw):
+ hdr_prot = f"_UAPI_LINUX_{family.name.upper()}_H"
+ cw.p('#ifndef ' + hdr_prot)
+ cw.p('#define ' + hdr_prot)
+ cw.nl()
+
+ defines = [(family.fam_key, family["name"]),
+ (family.ver_key, family.get('version', 1))]
+ cw.writes_defines(defines)
+ cw.nl()
+
+ defines = []
+ for const in family['definitions']:
+ if const['type'] != 'const':
+ cw.writes_defines(defines)
+ defines = []
+ cw.nl()
+
+ # Write kdoc for enum and flags (one day maybe also structs)
+ if const['type'] == 'enum' or const['type'] == 'flags':
+ enum = family.consts[const['name']]
+
+ if enum.has_doc():
+ cw.p('/**')
+ doc = ''
+ if 'doc' in enum:
+ doc = ' - ' + enum['doc']
+ cw.write_doc_line(enum.enum_name + doc)
+ for entry in enum.entry_list:
+ if entry.has_doc():
+ doc = '@' + entry.c_name + ': ' + entry['doc']
+ cw.write_doc_line(doc)
+ cw.p(' */')
+
+ uapi_enum_start(family, cw, const, 'name')
+ name_pfx = const.get('name-prefix', f"{family.name}-{const['name']}-")
+ for entry in enum.entry_list:
+ suffix = ','
+ if entry.value_change:
+ suffix = f" = {entry.user_value()}" + suffix
+ cw.p(entry.c_name + suffix)
+
+ if const.get('render-max', False):
+ cw.nl()
+ max_name = c_upper(name_pfx + 'max')
+ cw.p('__' + max_name + ',')
+ cw.p(max_name + ' = (__' + max_name + ' - 1)')
+ cw.block_end(line=';')
+ cw.nl()
+ elif const['type'] == 'const':
+ defines.append([c_upper(family.get('c-define-name',
+ f"{family.name}-{const['name']}")),
+ const['value']])
+
+ if defines:
+ cw.writes_defines(defines)
+ cw.nl()
+
+ max_by_define = family.get('max-by-define', False)
+
+ for _, attr_set in family.attr_sets.items():
+ if attr_set.subset_of:
+ continue
+
+ cnt_name = c_upper(family.get('attr-cnt-name', f"__{attr_set.name_prefix}MAX"))
+ max_value = f"({cnt_name} - 1)"
+
+ val = 0
+ uapi_enum_start(family, cw, attr_set.yaml, 'enum-name')
+ for _, attr in attr_set.items():
+ suffix = ','
+ if attr.value != val:
+ suffix = f" = {attr.value},"
+ val = attr.value
+ val += 1
+ cw.p(attr.enum_name + suffix)
+ cw.nl()
+ cw.p(cnt_name + ('' if max_by_define else ','))
+ if not max_by_define:
+ cw.p(f"{attr_set.max_name} = {max_value}")
+ cw.block_end(line=';')
+ if max_by_define:
+ cw.p(f"#define {attr_set.max_name} {max_value}")
+ cw.nl()
+
+ # Commands
+ separate_ntf = 'async-prefix' in family['operations']
+
+ max_name = c_upper(family.get('cmd-max-name', f"{family.op_prefix}MAX"))
+ cnt_name = c_upper(family.get('cmd-cnt-name', f"__{family.op_prefix}MAX"))
+ max_value = f"({cnt_name} - 1)"
+
+ uapi_enum_start(family, cw, family['operations'], 'enum-name')
+ for op in family.msgs.values():
+ if separate_ntf and ('notify' in op or 'event' in op):
+ continue
+
+ suffix = ','
+ if 'value' in op:
+ suffix = f" = {op['value']},"
+ cw.p(op.enum_name + suffix)
+ cw.nl()
+ cw.p(cnt_name + ('' if max_by_define else ','))
+ if not max_by_define:
+ cw.p(f"{max_name} = {max_value}")
+ cw.block_end(line=';')
+ if max_by_define:
+ cw.p(f"#define {max_name} {max_value}")
+ cw.nl()
+
+ if separate_ntf:
+ uapi_enum_start(family, cw, family['operations'], enum_name='async-enum')
+ for op in family.msgs.values():
+ if separate_ntf and not ('notify' in op or 'event' in op):
+ continue
+
+ suffix = ','
+ if 'value' in op:
+ suffix = f" = {op['value']},"
+ cw.p(op.enum_name + suffix)
+ cw.block_end(line=';')
+ cw.nl()
+
+ # Multicast
+ defines = []
+ for grp in family.mcgrps['list']:
+ name = grp['name']
+ defines.append([c_upper(grp.get('c-define-name', f"{family.name}-mcgrp-{name}")),
+ f'{name}'])
+ cw.nl()
+ if defines:
+ cw.writes_defines(defines)
+ cw.nl()
+
+ cw.p(f'#endif /* {hdr_prot} */')
+
+
+def find_kernel_root(full_path):
+ sub_path = ''
+ while True:
+ sub_path = os.path.join(os.path.basename(full_path), sub_path)
+ full_path = os.path.dirname(full_path)
+ maintainers = os.path.join(full_path, "MAINTAINERS")
+ if os.path.exists(maintainers):
+ return full_path, sub_path[:-1]
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Netlink simple parsing generator')
+ parser.add_argument('--mode', dest='mode', type=str, required=True)
+ parser.add_argument('--spec', dest='spec', type=str, required=True)
+ parser.add_argument('--header', dest='header', action='store_true', default=None)
+ parser.add_argument('--source', dest='header', action='store_false')
+ parser.add_argument('--user-header', nargs='+', default=[])
+ parser.add_argument('-o', dest='out_file', type=str)
+ args = parser.parse_args()
+
+ out_file = open(args.out_file, 'w+') if args.out_file else os.sys.stdout
+
+ if args.header is None:
+ parser.error("--header or --source is required")
+
+ try:
+ parsed = Family(args.spec)
+ except yaml.YAMLError as exc:
+ print(exc)
+ os.sys.exit(1)
+ return
+
+ cw = CodeWriter(BaseNlLib(), out_file)
+
+ _, spec_kernel = find_kernel_root(args.spec)
+ if args.mode == 'uapi':
+ cw.p('/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */')
+ else:
+ if args.header:
+ cw.p('/* SPDX-License-Identifier: BSD-3-Clause */')
+ else:
+ cw.p('// SPDX-License-Identifier: BSD-3-Clause')
+ cw.p("/* Do not edit directly, auto-generated from: */")
+ cw.p(f"/*\t{spec_kernel} */")
+ cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
+ cw.nl()
+
+ if args.mode == 'uapi':
+ render_uapi(parsed, cw)
+ return
+
+ hdr_prot = f"_LINUX_{parsed.name.upper()}_GEN_H"
+ if args.header:
+ cw.p('#ifndef ' + hdr_prot)
+ cw.p('#define ' + hdr_prot)
+ cw.nl()
+
+ if args.mode == 'kernel':
+ cw.p('#include <net/netlink.h>')
+ cw.p('#include <net/genetlink.h>')
+ cw.nl()
+ if not args.header:
+ if args.out_file:
+ cw.p(f'#include "{os.path.basename(args.out_file[:-2])}.h"')
+ cw.nl()
+ headers = [parsed.uapi_header]
+ for definition in parsed['definitions']:
+ if 'header' in definition:
+ headers.append(definition['header'])
+ for one in headers:
+ cw.p(f"#include <{one}>")
+ cw.nl()
+
+ if args.mode == "user":
+ if not args.header:
+ cw.p("#include <stdlib.h>")
+ cw.p("#include <stdio.h>")
+ cw.p("#include <string.h>")
+ cw.p("#include <libmnl/libmnl.h>")
+ cw.p("#include <linux/genetlink.h>")
+ cw.nl()
+ for one in args.user_header:
+ cw.p(f'#include "{one}"')
+ else:
+ cw.p('struct ynl_sock;')
+ cw.nl()
+
+ if args.mode == "kernel":
+ if args.header:
+ for _, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ cw.p('/* Common nested types */')
+ break
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ print_req_policy_fwd(cw, struct)
+ cw.nl()
+
+ if parsed.kernel_policy == 'global':
+ cw.p(f"/* Global operation policy for {parsed.name} */")
+
+ struct = Struct(parsed, parsed.global_policy_set, type_list=parsed.global_policy)
+ print_req_policy_fwd(cw, struct)
+ cw.nl()
+
+ if parsed.kernel_policy in {'per-op', 'split'}:
+ for op_name, op in parsed.ops.items():
+ if 'do' in op and 'event' not in op:
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ print_req_policy_fwd(cw, ri.struct['request'], ri=ri)
+ cw.nl()
+
+ print_kernel_op_table_hdr(parsed, cw)
+ print_kernel_mcgrp_hdr(parsed, cw)
+ print_kernel_family_struct_hdr(parsed, cw)
+ else:
+ for _, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ cw.p('/* Common nested types */')
+ break
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ print_req_policy(cw, struct)
+ cw.nl()
+
+ if parsed.kernel_policy == 'global':
+ cw.p(f"/* Global operation policy for {parsed.name} */")
+
+ struct = Struct(parsed, parsed.global_policy_set, type_list=parsed.global_policy)
+ print_req_policy(cw, struct)
+ cw.nl()
+
+ for op_name, op in parsed.ops.items():
+ if parsed.kernel_policy in {'per-op', 'split'}:
+ for op_mode in ['do', 'dump']:
+ if op_mode in op and 'request' in op[op_mode]:
+ cw.p(f"/* {op.enum_name} - {op_mode} */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, op_mode)
+ print_req_policy(cw, ri.struct['request'], ri=ri)
+ cw.nl()
+
+ print_kernel_op_table(parsed, cw)
+ print_kernel_mcgrp_src(parsed, cw)
+ print_kernel_family_struct_src(parsed, cw)
+
+ if args.mode == "user":
+ has_ntf = False
+ if args.header:
+ cw.p('/* Common nested types */')
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ ri = RenderInfo(cw, parsed, args.mode, "", "", "", attr_set)
+ print_type_full(ri, struct)
+
+ for op_name, op in parsed.ops.items():
+ cw.p(f"/* ============== {op.enum_name} ============== */")
+
+ if 'do' in op and 'event' not in op:
+ cw.p(f"/* {op.enum_name} - do */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ print_req_type(ri)
+ print_req_type_helpers(ri)
+ cw.nl()
+ print_rsp_type(ri)
+ print_rsp_type_helpers(ri)
+ cw.nl()
+ print_req_prototype(ri)
+ cw.nl()
+
+ if 'dump' in op:
+ cw.p(f"/* {op.enum_name} - dump */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'dump')
+ if 'request' in op['dump']:
+ print_req_type(ri)
+ print_req_type_helpers(ri)
+ if not ri.type_consistent:
+ print_rsp_type(ri)
+ print_wrapped_type(ri)
+ print_dump_prototype(ri)
+ cw.nl()
+
+ if 'notify' in op:
+ cw.p(f"/* {op.enum_name} - notify */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'notify')
+ has_ntf = True
+ if not ri.type_consistent:
+ raise Exception('Only notifications with consistent types supported')
+ print_wrapped_type(ri)
+
+ if 'event' in op:
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'event')
+ cw.p(f"/* {op.enum_name} - event */")
+ print_rsp_type(ri)
+ cw.nl()
+ print_wrapped_type(ri)
+
+ if has_ntf:
+ cw.p('/* --------------- Common notification parsing --------------- */')
+ print_ntf_parse_prototype(parsed, cw)
+ cw.nl()
+ else:
+ cw.p('/* Policies */')
+ for name, _ in parsed.attr_sets.items():
+ struct = Struct(parsed, name)
+ put_typol_fwd(cw, struct)
+ cw.nl()
+
+ for name, _ in parsed.attr_sets.items():
+ struct = Struct(parsed, name)
+ put_typol(cw, struct)
+
+ cw.p('/* Common nested types */')
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ ri = RenderInfo(cw, parsed, args.mode, "", "", "", attr_set)
+
+ free_rsp_nested(ri, struct)
+ if struct.request:
+ put_req_nested(ri, struct)
+ if struct.reply:
+ parse_rsp_nested(ri, struct)
+
+ for op_name, op in parsed.ops.items():
+ cw.p(f"/* ============== {op.enum_name} ============== */")
+ if 'do' in op and 'event' not in op:
+ cw.p(f"/* {op.enum_name} - do */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ print_rsp_free(ri)
+ parse_rsp_msg(ri)
+ print_req(ri)
+ cw.nl()
+
+ if 'dump' in op:
+ cw.p(f"/* {op.enum_name} - dump */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "dump")
+ if not ri.type_consistent:
+ parse_rsp_msg(ri, deref=True)
+ print_dump_type_free(ri)
+ print_dump(ri)
+ cw.nl()
+
+ if 'notify' in op:
+ cw.p(f"/* {op.enum_name} - notify */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'notify')
+ has_ntf = True
+ if not ri.type_consistent:
+ raise Exception('Only notifications with consistent types supported')
+ print_ntf_type_free(ri)
+
+ if 'event' in op:
+ cw.p(f"/* {op.enum_name} - event */")
+ has_ntf = True
+
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ parse_rsp_msg(ri)
+
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "event")
+ print_ntf_type_free(ri)
+
+ if has_ntf:
+ cw.p('/* --------------- Common notification parsing --------------- */')
+ print_ntf_type_parse(parsed, cw, args.mode)
+
+ if args.header:
+ cw.p(f'#endif /* {hdr_prot} */')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/net/ynl/ynl-regen.sh b/tools/net/ynl/ynl-regen.sh
new file mode 100755
index 000000000000..43989ae48ed0
--- /dev/null
+++ b/tools/net/ynl/ynl-regen.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: BSD-3-Clause
+
+TOOL=$(dirname $(realpath $0))/ynl-gen-c.py
+
+force=
+
+while [ ! -z "$1" ]; do
+ case "$1" in
+ -f ) force=yes; shift ;;
+ * ) echo "Unrecognized option '$1'"; exit 1 ;;
+ esac
+done
+
+KDIR=$(dirname $(dirname $(dirname $(dirname $(realpath $0)))))
+
+files=$(git grep --files-with-matches '^/\* YNL-GEN \(kernel\|uapi\)')
+for f in $files; do
+ # params: 0 1 2 3
+ # $YAML YNL-GEN kernel $mode
+ params=( $(git grep -B1 -h '/\* YNL-GEN' $f | sed 's@/\*\(.*\)\*/@\1@') )
+
+ if [ $f -nt ${params[0]} -a -z "$force" ]; then
+ echo -e "\tSKIP $f"
+ continue
+ fi
+
+ echo -e "\tGEN ${params[2]}\t$f"
+ $TOOL --mode ${params[2]} --${params[3]} --spec $KDIR/${params[0]} -o $f
+done
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 401a75844cc0..116fecf80ca1 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -47,3 +47,5 @@ test_cpp
xskxceiver
xdp_redirect_multi
xdp_synproxy
+xdp_hw_metadata
+xdp_features
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index 3fc3e54b19aa..b89eb87034e4 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -1,89 +1,24 @@
# TEMPORARY
# Alphabetical order
-atomics # attach(add): actual -524 <= expected 0 (trampoline)
bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
bpf_cookie # failed to open_and_load program: -524 (trampoline)
-bpf_iter_setsockopt # JIT does not support calling kernel function (kfunc)
bpf_loop # attaches to __x64_sys_nanosleep
-bpf_mod_race # BPF trampoline
-bpf_nf # JIT does not support calling kernel function
-bpf_tcp_ca # JIT does not support calling kernel function (kfunc)
-cb_refs # expected error message unexpected error: -524 (trampoline)
-cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc)
-cgrp_kfunc # JIT does not support calling kernel function
cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
-core_read_macros # unknown func bpf_probe_read#4 (overlapping)
-d_path # failed to auto-attach program 'prog_stat': -524 (trampoline)
-decap_sanity # JIT does not support calling kernel function (kfunc)
-deny_namespace # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
-dummy_st_ops # test_run unexpected error: -524 (errno 524) (trampoline)
-fentry_fexit # fentry attach failed: -524 (trampoline)
-fentry_test # fentry_first_attach unexpected error: -524 (trampoline)
-fexit_bpf2bpf # freplace_attach_trace unexpected error: -524 (trampoline)
fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
-fexit_stress # fexit attach failed prog 0 failed: -524 (trampoline)
-fexit_test # fexit_first_attach unexpected error: -524 (trampoline)
-get_func_args_test # trampoline
-get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (trampoline)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
-htab_update # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
-kfree_skb # attach fentry unexpected error: -524 (trampoline)
-kfunc_call # 'bpf_prog_active': not found in kernel BTF (?)
-kfunc_dynptr_param # JIT does not support calling kernel function (kfunc)
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test # relies on fentry
ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
ksyms_module_libbpf # JIT does not support calling kernel function (kfunc)
ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?)
-libbpf_get_fd_by_id_opts # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
-linked_list # JIT does not support calling kernel function (kfunc)
-lookup_key # JIT does not support calling kernel function (kfunc)
-lru_bug # prog 'printk': failed to auto-attach: -524
-map_kptr # failed to open_and_load program: -524 (trampoline)
-modify_return # modify_return attach failed: -524 (trampoline)
module_attach # skel_attach skeleton attach failed: -524 (trampoline)
-mptcp
-netcnt # failed to load BPF skeleton 'netcnt_prog': -7 (?)
-probe_user # check_kprobe_res wrong kprobe res from probe read (?)
-rcu_read_lock # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
-recursion # skel_attach unexpected error: -524 (trampoline)
ringbuf # skel_load skeleton load failed (?)
-select_reuseport # intermittently fails on new s390x setup
-send_signal # intermittently fails to receive signal
-setget_sockopt # attach unexpected error: -524 (trampoline)
-sk_assign # Can't read on server: Invalid argument (?)
-sk_lookup # endianness problem
-sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (trampoline)
-skc_to_unix_sock # could not attach BPF object unexpected error: -524 (trampoline)
-socket_cookie # prog_attach unexpected error: -524 (trampoline)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
-tailcalls # tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls (?)
-task_kfunc # JIT does not support calling kernel function
-task_local_storage # failed to auto-attach program 'trace_exit_creds': -524 (trampoline)
-test_bpffs # bpffs test failed 255 (iterator)
-test_bprm_opts # failed to auto-attach program 'secure_exec': -524 (trampoline)
-test_ima # failed to auto-attach program 'ima': -524 (trampoline)
-test_local_storage # failed to auto-attach program 'unlink_hook': -524 (trampoline)
test_lsm # attach unexpected error: -524 (trampoline)
-test_overhead # attach_fentry unexpected error: -524 (trampoline)
-test_profiler # unknown func bpf_probe_read_str#45 (overlapping)
-timer # failed to auto-attach program 'test1': -524 (trampoline)
-timer_crash # trampoline
-timer_mim # failed to auto-attach program 'test1': -524 (trampoline)
-trace_ext # failed to auto-attach program 'test_pkt_md_access_new': -524 (trampoline)
trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?)
trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?)
-tracing_struct # failed to auto-attach: -524 (trampoline)
-trampoline_count # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
-type_cast # JIT does not support calling kernel function
unpriv_bpf_disabled # fentry
user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?)
verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
-verify_pkcs7_sig # JIT does not support calling kernel function (kfunc)
-vmlinux # failed to auto-attach program 'handle__fentry': -524 (trampoline)
-xdp_adjust_tail # case-128 err 0 errno 28 retval 1 size 128 expect-size 3520 (?)
xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
-xdp_bpf2bpf # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
-xdp_do_redirect # prog_run_max_size unexpected error: -22 (errno 22)
-xdp_synproxy # JIT does not support calling kernel function (kfunc)
-xfrm_info # JIT does not support calling kernel function (kfunc)
+xdp_metadata # JIT does not support calling kernel function (kfunc)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index c22c43bbee19..b677dcd0b77a 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -22,10 +22,11 @@ endif
BPF_GCC ?= $(shell command -v bpf-gcc;)
SAN_CFLAGS ?=
+SAN_LDFLAGS ?= $(SAN_CFLAGS)
CFLAGS += -g -O0 -rdynamic -Wall -Werror $(GENFLAGS) $(SAN_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT)
-LDFLAGS += $(SAN_CFLAGS)
+LDFLAGS += $(SAN_LDFLAGS)
LDLIBS += -lelf -lz -lrt -lpthread
# Silence some warnings when compiled with clang
@@ -73,7 +74,8 @@ TEST_PROGS := test_kmod.sh \
test_bpftool.sh \
test_bpftool_metadata.sh \
test_doc_build.sh \
- test_xsk.sh
+ test_xsk.sh \
+ test_xdp_features.sh
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh ima_setup.sh verify_sig_setup.sh \
@@ -83,7 +85,8 @@ TEST_PROGS_EXTENDED := with_addr.sh \
TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
- xskxceiver xdp_redirect_multi xdp_synproxy veristat
+ xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
+ xdp_features
TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read $(OUTPUT)/sign-file
TEST_GEN_FILES += liburandom_read.so
@@ -149,14 +152,13 @@ endif
# NOTE: Semicolon at the end is critical to override lib.mk's default static
# rule for binaries.
$(notdir $(TEST_GEN_PROGS) \
- $(TEST_PROGS) \
- $(TEST_PROGS_EXTENDED) \
$(TEST_GEN_PROGS_EXTENDED) \
$(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
# sort removes libbpf duplicates when not cross-building
-MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \
- $(HOST_BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/resolve_btfids \
+MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \
+ $(BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/bpftool \
+ $(HOST_BUILD_DIR)/resolve_btfids \
$(RUNQSLOWER_OUTPUT) $(INCLUDE_DIR))
$(MAKE_DIRS):
$(call msg,MKDIR,,$@)
@@ -181,14 +183,15 @@ endif
# do not fail. Static builds leave urandom_read relying on system-wide shared libraries.
$(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c
$(call msg,LIB,,$@)
- $(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $^ $(LDLIBS) \
+ $(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) \
+ $^ $(filter-out -static,$(LDLIBS)) \
-fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
-fPIC -shared -o $@
$(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
$(call msg,BINARY,,$@)
$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
- liburandom_read.so $(LDLIBS) \
+ -lurandom_read $(filter-out -static,$(LDLIBS)) -L$(OUTPUT) \
-fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
-Wl,-rpath=. -o $@
@@ -205,16 +208,26 @@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_tes
$(Q)cp bpf_testmod/bpf_testmod.ko $@
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
+ifneq ($(CROSS_COMPILE),)
+CROSS_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
+TRUNNER_BPFTOOL := $(CROSS_BPFTOOL)
+USE_BOOTSTRAP := ""
+else
+TRUNNER_BPFTOOL := $(DEFAULT_BPFTOOL)
+USE_BOOTSTRAP := "bootstrap/"
+endif
$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF) \
BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf \
- BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \
+ BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) \
+ EXTRA_CFLAGS='-g -O0 $(SAN_CFLAGS)' \
+ EXTRA_LDFLAGS='$(SAN_LDFLAGS)' && \
cp $(RUNQSLOWER_OUTPUT)runqslower $@
-TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL)
+TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL)
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
@@ -240,19 +253,30 @@ $(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
$(OUTPUT)/test_maps: $(TESTING_HELPERS)
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS)
$(OUTPUT)/xsk.o: $(BPFOBJ)
-$(OUTPUT)/xskxceiver: $(OUTPUT)/xsk.o
BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
- ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \
+ ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" \
EXTRA_CFLAGS='-g -O0' \
OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \
prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install-bin
+ifneq ($(CROSS_COMPILE),)
+$(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
+ $(BPFOBJ) | $(BUILD_DIR)/bpftool
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
+ ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \
+ EXTRA_CFLAGS='-g -O0' \
+ OUTPUT=$(BUILD_DIR)/bpftool/ \
+ LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \
+ LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \
+ prefix= DESTDIR=$(SCRATCH_DIR)/ install-bin
+endif
+
all: docs
docs:
@@ -269,7 +293,8 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
- EXTRA_CFLAGS='-g -O0' \
+ EXTRA_CFLAGS='-g -O0 $(SAN_CFLAGS)' \
+ EXTRA_LDFLAGS='$(SAN_LDFLAGS)' \
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
ifneq ($(BPFOBJ),$(HOST_BPFOBJ))
@@ -278,7 +303,8 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
| $(HOST_BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
EXTRA_CFLAGS='-g -O0' ARCH= CROSS_COMPILE= \
- OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \
+ OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
+ CC="$(HOSTCC)" LD="$(HOSTLD)" \
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
endif
@@ -299,7 +325,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \
$(TOOLSDIR)/lib/ctype.c \
$(TOOLSDIR)/lib/str_error_r.c
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \
- CC=$(HOSTCC) LD=$(HOSTLD) AR=$(HOSTAR) \
+ CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \
LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \
OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ)
@@ -383,6 +409,9 @@ linked_maps.skel.h-deps := linked_maps1.bpf.o linked_maps2.bpf.o
test_subskeleton.skel.h-deps := test_subskeleton_lib2.bpf.o test_subskeleton_lib.bpf.o test_subskeleton.bpf.o
test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.bpf.o test_subskeleton_lib.bpf.o
test_usdt.skel.h-deps := test_usdt.bpf.o test_usdt_multispec.bpf.o
+xsk_xdp_progs.skel.h-deps := xsk_xdp_progs.bpf.o
+xdp_hw_metadata.skel.h-deps := xdp_hw_metadata.bpf.o
+xdp_features.skel.h-deps := xdp_features.bpf.o
LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
@@ -513,11 +542,13 @@ endif
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \
$(RESOLVE_BTFIDS) \
+ $(TRUNNER_BPFTOOL) \
| $(TRUNNER_BINARY)-extras
$$(call msg,BINARY,,$$@)
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
$(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@
- $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/bootstrap/bpftool $(if $2,$2/)bpftool
+ $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \
+ $(OUTPUT)/$(if $2,$2/)bpftool
endef
@@ -527,7 +558,7 @@ TRUNNER_BPF_PROGS_DIR := progs
TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
network_helpers.c testing_helpers.c \
btf_helpers.c flow_dissector_load.h \
- cap_helpers.c test_loader.c
+ cap_helpers.c test_loader.c xsk.c
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
$(OUTPUT)/liburandom_read.so \
$(OUTPUT)/xdp_synproxy \
@@ -576,6 +607,18 @@ $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+$(OUTPUT)/xskxceiver: xskxceiver.c $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+
+$(OUTPUT)/xdp_hw_metadata: xdp_hw_metadata.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xsk.o $(OUTPUT)/xdp_hw_metadata.skel.h | $(OUTPUT)
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+
+$(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp_features.skel.h | $(OUTPUT)
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+
# Make sure we are able to include and link libbpf against c++.
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
$(call msg,CXX,,$@)
@@ -595,6 +638,7 @@ $(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h
$(OUTPUT)/bench_bpf_hashmap_full_update.o: $(OUTPUT)/bpf_hashmap_full_update_bench.skel.h
$(OUTPUT)/bench_local_storage.o: $(OUTPUT)/local_storage_bench.skel.h
$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o: $(OUTPUT)/local_storage_rcu_tasks_trace_bench.skel.h
+$(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
$(OUTPUT)/bench: LDLIBS += -lm
$(OUTPUT)/bench: $(OUTPUT)/bench.o \
@@ -609,7 +653,9 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
$(OUTPUT)/bench_strncmp.o \
$(OUTPUT)/bench_bpf_hashmap_full_update.o \
$(OUTPUT)/bench_local_storage.o \
- $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o
+ $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o \
+ $(OUTPUT)/bench_bpf_hashmap_lookup.o \
+ #
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
@@ -626,3 +672,6 @@ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
liburandom_read.so)
.PHONY: docs docs-clean
+
+# Delete partially updated (corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index c1f20a147462..0b2a53bb8460 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -16,6 +16,7 @@ struct env env = {
.warmup_sec = 1,
.duration_sec = 5,
.affinity = false,
+ .quiet = false,
.consumer_cnt = 1,
.producer_cnt = 1,
};
@@ -262,6 +263,7 @@ static const struct argp_option opts[] = {
{ "consumers", 'c', "NUM", 0, "Number of consumer threads"},
{ "verbose", 'v', NULL, 0, "Verbose debug output"},
{ "affinity", 'a', NULL, 0, "Set consumer/producer thread affinity"},
+ { "quiet", 'q', NULL, 0, "Be more quiet"},
{ "prod-affinity", ARG_PROD_AFFINITY_SET, "CPUSET", 0,
"Set of CPUs for producer threads; implies --affinity"},
{ "cons-affinity", ARG_CONS_AFFINITY_SET, "CPUSET", 0,
@@ -275,6 +277,7 @@ extern struct argp bench_bpf_loop_argp;
extern struct argp bench_local_storage_argp;
extern struct argp bench_local_storage_rcu_tasks_trace_argp;
extern struct argp bench_strncmp_argp;
+extern struct argp bench_hashmap_lookup_argp;
static const struct argp_child bench_parsers[] = {
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
@@ -284,13 +287,15 @@ static const struct argp_child bench_parsers[] = {
{ &bench_strncmp_argp, 0, "bpf_strncmp helper benchmark", 0 },
{ &bench_local_storage_rcu_tasks_trace_argp, 0,
"local_storage RCU Tasks Trace slowdown benchmark", 0 },
+ { &bench_hashmap_lookup_argp, 0, "Hashmap lookup benchmark", 0 },
{},
};
+/* Make pos_args global, so that we can run argp_parse twice, if necessary */
+static int pos_args;
+
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
- static int pos_args;
-
switch (key) {
case 'v':
env.verbose = true;
@@ -329,6 +334,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
case 'a':
env.affinity = true;
break;
+ case 'q':
+ env.quiet = true;
+ break;
case ARG_PROD_AFFINITY_SET:
env.affinity = true;
if (parse_num_list(arg, &env.prod_cpus.cpus,
@@ -359,7 +367,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
return 0;
}
-static void parse_cmdline_args(int argc, char **argv)
+static void parse_cmdline_args_init(int argc, char **argv)
{
static const struct argp argp = {
.options = opts,
@@ -369,9 +377,25 @@ static void parse_cmdline_args(int argc, char **argv)
};
if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
exit(1);
- if (!env.list && !env.bench_name) {
- argp_help(&argp, stderr, ARGP_HELP_DOC, "bench");
- exit(1);
+}
+
+static void parse_cmdline_args_final(int argc, char **argv)
+{
+ struct argp_child bench_parsers[2] = {};
+ const struct argp argp = {
+ .options = opts,
+ .parser = parse_arg,
+ .doc = argp_program_doc,
+ .children = bench_parsers,
+ };
+
+ /* Parse arguments the second time with the correct set of parsers */
+ if (bench->argp) {
+ bench_parsers[0].argp = bench->argp;
+ bench_parsers[0].header = bench->name;
+ pos_args = 0;
+ if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
+ exit(1);
}
}
@@ -490,6 +514,7 @@ extern const struct bench bench_local_storage_cache_seq_get;
extern const struct bench bench_local_storage_cache_interleaved_get;
extern const struct bench bench_local_storage_cache_hashmap_control;
extern const struct bench bench_local_storage_tasks_trace;
+extern const struct bench bench_bpf_hashmap_lookup;
static const struct bench *benchs[] = {
&bench_count_global,
@@ -529,17 +554,17 @@ static const struct bench *benchs[] = {
&bench_local_storage_cache_interleaved_get,
&bench_local_storage_cache_hashmap_control,
&bench_local_storage_tasks_trace,
+ &bench_bpf_hashmap_lookup,
};
-static void setup_benchmark()
+static void find_benchmark(void)
{
- int i, err;
+ int i;
if (!env.bench_name) {
fprintf(stderr, "benchmark name is not specified\n");
exit(1);
}
-
for (i = 0; i < ARRAY_SIZE(benchs); i++) {
if (strcmp(benchs[i]->name, env.bench_name) == 0) {
bench = benchs[i];
@@ -550,8 +575,14 @@ static void setup_benchmark()
fprintf(stderr, "benchmark '%s' not found\n", env.bench_name);
exit(1);
}
+}
- printf("Setting up benchmark '%s'...\n", bench->name);
+static void setup_benchmark(void)
+{
+ int i, err;
+
+ if (!env.quiet)
+ printf("Setting up benchmark '%s'...\n", bench->name);
state.producers = calloc(env.producer_cnt, sizeof(*state.producers));
state.consumers = calloc(env.consumer_cnt, sizeof(*state.consumers));
@@ -597,7 +628,8 @@ static void setup_benchmark()
next_cpu(&env.prod_cpus));
}
- printf("Benchmark '%s' started.\n", bench->name);
+ if (!env.quiet)
+ printf("Benchmark '%s' started.\n", bench->name);
}
static pthread_mutex_t bench_done_mtx = PTHREAD_MUTEX_INITIALIZER;
@@ -621,7 +653,7 @@ static void collect_measurements(long delta_ns) {
int main(int argc, char **argv)
{
- parse_cmdline_args(argc, argv);
+ parse_cmdline_args_init(argc, argv);
if (env.list) {
int i;
@@ -633,6 +665,9 @@ int main(int argc, char **argv)
return 0;
}
+ find_benchmark();
+ parse_cmdline_args_final(argc, argv);
+
setup_benchmark();
setup_timer();
diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
index d748255877e2..402729c6a3ac 100644
--- a/tools/testing/selftests/bpf/bench.h
+++ b/tools/testing/selftests/bpf/bench.h
@@ -24,6 +24,7 @@ struct env {
bool verbose;
bool list;
bool affinity;
+ bool quiet;
int consumer_cnt;
int producer_cnt;
struct cpu_set prod_cpus;
@@ -47,6 +48,7 @@ struct bench_res {
struct bench {
const char *name;
+ const struct argp *argp;
void (*validate)(void);
void (*setup)(void);
void *(*producer_thread)(void *ctx);
diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
index 5bcb8a8cdeb2..7c8ccc108313 100644
--- a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
+++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
@@ -428,6 +428,7 @@ static void *consumer(void *input)
const struct bench bench_bloom_lookup = {
.name = "bloom-lookup",
+ .argp = &bench_bloom_map_argp,
.validate = validate,
.setup = bloom_lookup_setup,
.producer_thread = producer,
@@ -439,6 +440,7 @@ const struct bench bench_bloom_lookup = {
const struct bench bench_bloom_update = {
.name = "bloom-update",
+ .argp = &bench_bloom_map_argp,
.validate = validate,
.setup = bloom_update_setup,
.producer_thread = producer,
@@ -450,6 +452,7 @@ const struct bench bench_bloom_update = {
const struct bench bench_bloom_false_positive = {
.name = "bloom-false-positive",
+ .argp = &bench_bloom_map_argp,
.validate = validate,
.setup = false_positive_setup,
.producer_thread = producer,
@@ -461,6 +464,7 @@ const struct bench bench_bloom_false_positive = {
const struct bench bench_hashmap_without_bloom = {
.name = "hashmap-without-bloom",
+ .argp = &bench_bloom_map_argp,
.validate = validate,
.setup = hashmap_no_bloom_setup,
.producer_thread = producer,
@@ -472,6 +476,7 @@ const struct bench bench_hashmap_without_bloom = {
const struct bench bench_hashmap_with_bloom = {
.name = "hashmap-with-bloom",
+ .argp = &bench_bloom_map_argp,
.validate = validate,
.setup = hashmap_with_bloom_setup,
.producer_thread = producer,
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
index cec51e0ff4b8..75abe8137b6c 100644
--- a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Bytedance */
-#include <argp.h>
#include "bench.h"
#include "bpf_hashmap_full_update_bench.skel.h"
#include "bpf_util.h"
@@ -68,7 +67,7 @@ static void setup(void)
bpf_map_update_elem(map_fd, &i, &i, BPF_ANY);
}
-void hashmap_report_final(struct bench_res res[], int res_cnt)
+static void hashmap_report_final(struct bench_res res[], int res_cnt)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
int i;
@@ -85,7 +84,7 @@ void hashmap_report_final(struct bench_res res[], int res_cnt)
}
const struct bench bench_bpf_hashmap_full_update = {
- .name = "bpf-hashmap-ful-update",
+ .name = "bpf-hashmap-full-update",
.validate = validate,
.setup = setup,
.producer_thread = producer,
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
new file mode 100644
index 000000000000..8dbb02f75cff
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+
+#include <sys/random.h>
+#include <argp.h>
+#include "bench.h"
+#include "bpf_hashmap_lookup.skel.h"
+#include "bpf_util.h"
+
+/* BPF triggering benchmarks */
+static struct ctx {
+ struct bpf_hashmap_lookup *skel;
+} ctx;
+
+/* only available to kernel, so define it here */
+#define BPF_MAX_LOOPS (1<<23)
+
+#define MAX_KEY_SIZE 1024 /* the size of the key map */
+
+static struct {
+ __u32 key_size;
+ __u32 map_flags;
+ __u32 max_entries;
+ __u32 nr_entries;
+ __u32 nr_loops;
+} args = {
+ .key_size = 4,
+ .map_flags = 0,
+ .max_entries = 1000,
+ .nr_entries = 500,
+ .nr_loops = 1000000,
+};
+
+enum {
+ ARG_KEY_SIZE = 8001,
+ ARG_MAP_FLAGS,
+ ARG_MAX_ENTRIES,
+ ARG_NR_ENTRIES,
+ ARG_NR_LOOPS,
+};
+
+static const struct argp_option opts[] = {
+ { "key_size", ARG_KEY_SIZE, "KEY_SIZE", 0,
+ "The hashmap key size (max 1024)"},
+ { "map_flags", ARG_MAP_FLAGS, "MAP_FLAGS", 0,
+ "The hashmap flags passed to BPF_MAP_CREATE"},
+ { "max_entries", ARG_MAX_ENTRIES, "MAX_ENTRIES", 0,
+ "The hashmap max entries"},
+ { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
+ "The number of entries to insert/lookup"},
+ { "nr_loops", ARG_NR_LOOPS, "NR_LOOPS", 0,
+ "The number of loops for the benchmark"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_KEY_SIZE:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > MAX_KEY_SIZE) {
+ fprintf(stderr, "invalid key_size");
+ argp_usage(state);
+ }
+ args.key_size = ret;
+ break;
+ case ARG_MAP_FLAGS:
+ ret = strtol(arg, NULL, 0);
+ if (ret < 0 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid map_flags");
+ argp_usage(state);
+ }
+ args.map_flags = ret;
+ break;
+ case ARG_MAX_ENTRIES:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid max_entries");
+ argp_usage(state);
+ }
+ args.max_entries = ret;
+ break;
+ case ARG_NR_ENTRIES:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid nr_entries");
+ argp_usage(state);
+ }
+ args.nr_entries = ret;
+ break;
+ case ARG_NR_LOOPS:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > BPF_MAX_LOOPS) {
+ fprintf(stderr, "invalid nr_loops: %ld (min=1 max=%u)\n",
+ ret, BPF_MAX_LOOPS);
+ argp_usage(state);
+ }
+ args.nr_loops = ret;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_hashmap_lookup_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+static void validate(void)
+{
+ if (env.consumer_cnt != 1) {
+ fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ exit(1);
+ }
+
+ if (args.nr_entries > args.max_entries) {
+ fprintf(stderr, "args.nr_entries is too big! (max %u, got %u)\n",
+ args.max_entries, args.nr_entries);
+ exit(1);
+ }
+}
+
+static void *producer(void *input)
+{
+ while (true) {
+ /* trigger the bpf program */
+ syscall(__NR_getpgid);
+ }
+ return NULL;
+}
+
+static void *consumer(void *input)
+{
+ return NULL;
+}
+
+static void measure(struct bench_res *res)
+{
+}
+
+static inline void patch_key(u32 i, u32 *key)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ *key = i + 1;
+#else
+ *key = __builtin_bswap32(i + 1);
+#endif
+ /* the rest of key is random */
+}
+
+static void setup(void)
+{
+ struct bpf_link *link;
+ int map_fd;
+ int ret;
+ int i;
+
+ setup_libbpf();
+
+ ctx.skel = bpf_hashmap_lookup__open();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ bpf_map__set_max_entries(ctx.skel->maps.hash_map_bench, args.max_entries);
+ bpf_map__set_key_size(ctx.skel->maps.hash_map_bench, args.key_size);
+ bpf_map__set_value_size(ctx.skel->maps.hash_map_bench, 8);
+ bpf_map__set_map_flags(ctx.skel->maps.hash_map_bench, args.map_flags);
+
+ ctx.skel->bss->nr_entries = args.nr_entries;
+ ctx.skel->bss->nr_loops = args.nr_loops / args.nr_entries;
+
+ if (args.key_size > 4) {
+ for (i = 1; i < args.key_size/4; i++)
+ ctx.skel->bss->key[i] = 2654435761 * i;
+ }
+
+ ret = bpf_hashmap_lookup__load(ctx.skel);
+ if (ret) {
+ bpf_hashmap_lookup__destroy(ctx.skel);
+ fprintf(stderr, "failed to load map: %s", strerror(-ret));
+ exit(1);
+ }
+
+ /* fill in the hash_map */
+ map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
+ for (u64 i = 0; i < args.nr_entries; i++) {
+ patch_key(i, ctx.skel->bss->key);
+ bpf_map_update_elem(map_fd, ctx.skel->bss->key, &i, BPF_ANY);
+ }
+
+ link = bpf_program__attach(ctx.skel->progs.benchmark);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+}
+
+static inline double events_from_time(u64 time)
+{
+ if (time)
+ return args.nr_loops * 1000000000llu / time / 1000000.0L;
+
+ return 0;
+}
+
+static int compute_events(u64 *times, double *events_mean, double *events_stddev, u64 *mean_time)
+{
+ int i, n = 0;
+
+ *events_mean = 0;
+ *events_stddev = 0;
+ *mean_time = 0;
+
+ for (i = 0; i < 32; i++) {
+ if (!times[i])
+ break;
+ *mean_time += times[i];
+ *events_mean += events_from_time(times[i]);
+ n += 1;
+ }
+ if (!n)
+ return 0;
+
+ *mean_time /= n;
+ *events_mean /= n;
+
+ if (n > 1) {
+ for (i = 0; i < n; i++) {
+ double events_i = *events_mean - events_from_time(times[i]);
+ *events_stddev += events_i * events_i / (n - 1);
+ }
+ *events_stddev = sqrt(*events_stddev);
+ }
+
+ return n;
+}
+
+static void hashmap_report_final(struct bench_res res[], int res_cnt)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ double events_mean, events_stddev;
+ u64 mean_time;
+ int i, n;
+
+ for (i = 0; i < nr_cpus; i++) {
+ n = compute_events(ctx.skel->bss->percpu_times[i], &events_mean,
+ &events_stddev, &mean_time);
+ if (n == 0)
+ continue;
+
+ if (env.quiet) {
+ /* we expect only one cpu to be present */
+ if (env.affinity)
+ printf("%.3lf\n", events_mean);
+ else
+ printf("cpu%02d %.3lf\n", i, events_mean);
+ } else {
+ printf("cpu%02d: lookup %.3lfM ± %.3lfM events/sec"
+ " (approximated from %d samples of ~%lums)\n",
+ i, events_mean, 2*events_stddev,
+ n, mean_time / 1000000);
+ }
+ }
+}
+
+const struct bench bench_bpf_hashmap_lookup = {
+ .name = "bpf-hashmap-lookup",
+ .argp = &bench_hashmap_lookup_argp,
+ .validate = validate,
+ .setup = setup,
+ .producer_thread = producer,
+ .consumer_thread = consumer,
+ .measure = measure,
+ .report_progress = NULL,
+ .report_final = hashmap_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c
index d0a6572bfab6..d8a0394e10b1 100644
--- a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c
@@ -95,6 +95,7 @@ static void setup(void)
const struct bench bench_bpf_loop = {
.name = "bpf-loop",
+ .argp = &bench_bpf_loop_argp,
.validate = validate,
.setup = setup,
.producer_thread = producer,
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage.c b/tools/testing/selftests/bpf/benchs/bench_local_storage.c
index 5a378c84e81f..d4b2817306d4 100644
--- a/tools/testing/selftests/bpf/benchs/bench_local_storage.c
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage.c
@@ -255,6 +255,7 @@ static void *producer(void *input)
*/
const struct bench bench_local_storage_cache_seq_get = {
.name = "local-storage-cache-seq-get",
+ .argp = &bench_local_storage_argp,
.validate = validate,
.setup = local_storage_cache_get_setup,
.producer_thread = producer,
@@ -266,6 +267,7 @@ const struct bench bench_local_storage_cache_seq_get = {
const struct bench bench_local_storage_cache_interleaved_get = {
.name = "local-storage-cache-int-get",
+ .argp = &bench_local_storage_argp,
.validate = validate,
.setup = local_storage_cache_get_interleaved_setup,
.producer_thread = producer,
@@ -277,6 +279,7 @@ const struct bench bench_local_storage_cache_interleaved_get = {
const struct bench bench_local_storage_cache_hashmap_control = {
.name = "local-storage-cache-hashmap-control",
+ .argp = &bench_local_storage_argp,
.validate = validate,
.setup = hashmap_setup,
.producer_thread = producer,
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
index 43f109d93130..d5eb5587f2aa 100644
--- a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
@@ -12,17 +12,14 @@
static struct {
__u32 nr_procs;
__u32 kthread_pid;
- bool quiet;
} args = {
.nr_procs = 1000,
.kthread_pid = 0,
- .quiet = false,
};
enum {
ARG_NR_PROCS = 7000,
ARG_KTHREAD_PID = 7001,
- ARG_QUIET = 7002,
};
static const struct argp_option opts[] = {
@@ -30,8 +27,6 @@ static const struct argp_option opts[] = {
"Set number of user processes to spin up"},
{ "kthread_pid", ARG_KTHREAD_PID, "PID", 0,
"Pid of rcu_tasks_trace kthread for ticks tracking"},
- { "quiet", ARG_QUIET, "{0,1}", 0,
- "If true, don't report progress"},
{},
};
@@ -56,14 +51,6 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
}
args.kthread_pid = ret;
break;
- case ARG_QUIET:
- ret = strtol(arg, NULL, 10);
- if (ret < 0 || ret > 1) {
- fprintf(stderr, "invalid quiet %ld\n", ret);
- argp_usage(state);
- }
- args.quiet = ret;
- break;
break;
default:
return ARGP_ERR_UNKNOWN;
@@ -230,7 +217,7 @@ static void report_progress(int iter, struct bench_res *res, long delta_ns)
exit(1);
}
- if (args.quiet)
+ if (env.quiet)
return;
printf("Iter %d\t avg tasks_trace grace period latency\t%lf ns\n",
@@ -271,6 +258,7 @@ static void report_final(struct bench_res res[], int res_cnt)
*/
const struct bench bench_local_storage_tasks_trace = {
.name = "local-storage-tasks-trace",
+ .argp = &bench_local_storage_rcu_tasks_trace_argp,
.validate = validate,
.setup = local_storage_tasks_trace_setup,
.producer_thread = producer,
diff --git a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
index c2554f9695ff..fc91fdac4faa 100644
--- a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
+++ b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
@@ -518,6 +518,7 @@ static void *perfbuf_custom_consumer(void *input)
const struct bench bench_rb_libbpf = {
.name = "rb-libbpf",
+ .argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = ringbuf_libbpf_setup,
.producer_thread = bufs_sample_producer,
@@ -529,6 +530,7 @@ const struct bench bench_rb_libbpf = {
const struct bench bench_rb_custom = {
.name = "rb-custom",
+ .argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = ringbuf_custom_setup,
.producer_thread = bufs_sample_producer,
@@ -540,6 +542,7 @@ const struct bench bench_rb_custom = {
const struct bench bench_pb_libbpf = {
.name = "pb-libbpf",
+ .argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = perfbuf_libbpf_setup,
.producer_thread = bufs_sample_producer,
@@ -551,6 +554,7 @@ const struct bench bench_pb_libbpf = {
const struct bench bench_pb_custom = {
.name = "pb-custom",
+ .argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = perfbuf_libbpf_setup,
.producer_thread = bufs_sample_producer,
diff --git a/tools/testing/selftests/bpf/benchs/bench_strncmp.c b/tools/testing/selftests/bpf/benchs/bench_strncmp.c
index 494b591c0289..d3fad2ba6916 100644
--- a/tools/testing/selftests/bpf/benchs/bench_strncmp.c
+++ b/tools/testing/selftests/bpf/benchs/bench_strncmp.c
@@ -140,6 +140,7 @@ static void strncmp_measure(struct bench_res *res)
const struct bench bench_strncmp_no_helper = {
.name = "strncmp-no-helper",
+ .argp = &bench_strncmp_argp,
.validate = strncmp_validate,
.setup = strncmp_no_helper_setup,
.producer_thread = strncmp_producer,
@@ -151,6 +152,7 @@ const struct bench bench_strncmp_no_helper = {
const struct bench bench_strncmp_helper = {
.name = "strncmp-helper",
+ .argp = &bench_strncmp_argp,
.validate = strncmp_validate,
.setup = strncmp_helper_setup,
.producer_thread = strncmp_producer,
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh b/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh
index 1e2de838f9fa..cd2efd3fdef3 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh
@@ -6,6 +6,6 @@ source ./benchs/run_common.sh
set -eufo pipefail
nr_threads=`expr $(cat /proc/cpuinfo | grep "processor"| wc -l) - 1`
-summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-ful-update)
+summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-full-update)
printf "$summary"
printf "\n"
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh b/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh
index 5dac1f02892c..3e8a969f2096 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh
@@ -8,4 +8,4 @@ if [ -z $kthread_pid ]; then
exit 1
fi
-./bench --nr_procs 15000 --kthread_pid $kthread_pid -d 600 --quiet 1 local-storage-tasks-trace
+./bench --nr_procs 15000 --kthread_pid $kthread_pid -d 600 --quiet local-storage-tasks-trace
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 424f7bbbfe9b..dbd2c729781a 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -65,4 +65,28 @@ extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ks
*/
extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
+/* Description
+ * Remove 'node' from rbtree with root 'root'
+ * Returns
+ * Pointer to the removed node, or NULL if 'root' didn't contain 'node'
+ */
+extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
+ struct bpf_rb_node *node) __ksym;
+
+/* Description
+ * Add 'node' to rbtree with root 'root' using comparator 'less'
+ * Returns
+ * Nothing
+ */
+extern void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
+ bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) __ksym;
+
+/* Description
+ * Return the first (leftmost) node in input tree
+ * Returns
+ * Pointer to the node, which is _not_ removed from the tree. If the tree
+ * contains no nodes, returns NULL.
+ */
+extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
+
#endif
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 5085fea3cac5..46500636d8cd 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -59,7 +59,7 @@ bpf_testmod_test_struct_arg_5(void) {
return bpf_testmod_test_struct_arg_result;
}
-noinline void
+__bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)
{
*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
diff --git a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
index f472d28ad11a..16f1671e4bde 100644
--- a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
+++ b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
@@ -18,7 +18,7 @@ static __u32 get_map_id_from_fd(int map_fd)
uint32_t info_len = sizeof(map_info);
int ret;
- ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ ret = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
CHECK(ret < 0, "Finding map info failed", "error:%s\n",
strerror(errno));
diff --git a/tools/testing/selftests/bpf/netcnt_common.h b/tools/testing/selftests/bpf/netcnt_common.h
index 0ab1c88041cd..2d4a58e4e39c 100644
--- a/tools/testing/selftests/bpf/netcnt_common.h
+++ b/tools/testing/selftests/bpf/netcnt_common.h
@@ -8,11 +8,11 @@
/* sizeof(struct bpf_local_storage_elem):
*
- * It really is about 128 bytes on x86_64, but allocate more to account for
- * possible layout changes, different architectures, etc.
+ * It is about 128 bytes on x86_64 and 512 bytes on s390x, but allocate more to
+ * account for possible layout changes, different architectures, etc.
* The kernel will wrap up to PAGE_SIZE internally anyway.
*/
-#define SIZEOF_BPF_LOCAL_STORAGE_ELEM 256
+#define SIZEOF_BPF_LOCAL_STORAGE_ELEM 768
/* Try to estimate kernel's BPF_LOCAL_STORAGE_MAX_VALUE_SIZE: */
#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE (0xFFFF - \
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 9566d9d2f6ee..56374c8b5436 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -33,8 +33,8 @@ void test_attach_probe(void)
struct test_attach_probe* skel;
ssize_t uprobe_offset, ref_ctr_offset;
struct bpf_link *uprobe_err_link;
+ FILE *devnull;
bool legacy;
- char *mem;
/* Check if new-style kprobe/uprobe API is supported.
* Kernels that support new FD-based kprobe and uprobe BPF attachment
@@ -147,7 +147,7 @@ void test_attach_probe(void)
/* test attach by name for a library function, using the library
* as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
*/
- uprobe_opts.func_name = "malloc";
+ uprobe_opts.func_name = "fopen";
uprobe_opts.retprobe = false;
skel->links.handle_uprobe_byname2 =
bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
@@ -157,7 +157,7 @@ void test_attach_probe(void)
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
goto cleanup;
- uprobe_opts.func_name = "free";
+ uprobe_opts.func_name = "fclose";
uprobe_opts.retprobe = true;
skel->links.handle_uretprobe_byname2 =
bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
@@ -195,8 +195,8 @@ void test_attach_probe(void)
usleep(1);
/* trigger & validate shared library u[ret]probes attached by name */
- mem = malloc(1);
- free(mem);
+ devnull = fopen("/dev/null", "r");
+ fclose(devnull);
/* trigger & validate uprobe & uretprobe */
trigger_func();
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 2be2d61954bc..26b2d1bffdfd 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -472,6 +472,7 @@ static void lsm_subtest(struct test_bpf_cookie *skel)
int prog_fd;
int lsm_fd = -1;
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+ int err;
skel->bss->lsm_res = 0;
@@ -482,8 +483,9 @@ static void lsm_subtest(struct test_bpf_cookie *skel)
if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create"))
goto cleanup;
- stack_mprotect();
- if (!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ err = stack_mprotect();
+ if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
+ !ASSERT_EQ(errno, EPERM, "stack_mprotect"))
goto cleanup;
usleep(1);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 3af6450763e9..1f02168103dd 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -195,8 +195,8 @@ static void check_bpf_link_info(const struct bpf_program *prog)
return;
info_len = sizeof(info);
- err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
- ASSERT_OK(err, "bpf_obj_get_info_by_fd");
+ err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
+ ASSERT_OK(err, "bpf_link_get_info_by_fd");
ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
bpf_link__destroy(link);
@@ -684,13 +684,13 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
/* setup filtering map_id in bpf program */
map_info_len = sizeof(map_info);
- err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
+ err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
if (CHECK(err, "get_map_info", "get map info failed: %s\n",
strerror(errno)))
goto free_map2;
skel->bss->map1_id = map_info.id;
- err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
+ err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
if (CHECK(err, "get_map_info", "get map info failed: %s\n",
strerror(errno)))
goto free_map2;
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
index e1c1e521cca2..675b90b15280 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
@@ -44,7 +44,7 @@ void serial_test_bpf_obj_id(void)
CHECK(err >= 0 || errno != ENOENT,
"get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno);
- /* Check bpf_obj_get_info_by_fd() */
+ /* Check bpf_map_get_info_by_fd() */
bzero(zeros, sizeof(zeros));
for (i = 0; i < nr_iters; i++) {
now = time(NULL);
@@ -79,7 +79,7 @@ void serial_test_bpf_obj_id(void)
/* Check getting map info */
info_len = sizeof(struct bpf_map_info) * 2;
bzero(&map_infos[i], info_len);
- err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
+ err = bpf_map_get_info_by_fd(map_fds[i], &map_infos[i],
&info_len);
if (CHECK(err ||
map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
@@ -118,8 +118,8 @@ void serial_test_bpf_obj_id(void)
err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
if (CHECK_FAIL(err))
goto done;
- err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
- &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fds[i], &prog_infos[i],
+ &info_len);
load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
+ (prog_infos[i].load_time / nsec_per_sec);
if (CHECK(err ||
@@ -161,8 +161,8 @@ void serial_test_bpf_obj_id(void)
bzero(&link_infos[i], info_len);
link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
- err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
- &link_infos[i], &info_len);
+ err = bpf_link_get_info_by_fd(bpf_link__fd(links[i]),
+ &link_infos[i], &info_len);
if (CHECK(err ||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
link_infos[i].prog_id != prog_infos[i].id ||
@@ -217,7 +217,7 @@ void serial_test_bpf_obj_id(void)
* prog_info.map_ids = NULL
*/
prog_info.nr_map_ids = 1;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
if (CHECK(!err || errno != EFAULT,
"get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
err, errno, EFAULT))
@@ -228,7 +228,7 @@ void serial_test_bpf_obj_id(void)
saved_map_id = *(int *)((long)prog_infos[i].map_ids);
prog_info.map_ids = prog_infos[i].map_ids;
prog_info.nr_map_ids = 2;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
prog_infos[i].jited_prog_insns = 0;
prog_infos[i].xlated_prog_insns = 0;
CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
@@ -277,7 +277,7 @@ void serial_test_bpf_obj_id(void)
if (CHECK_FAIL(err))
goto done;
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
CHECK(err || info_len != sizeof(struct bpf_map_info) ||
memcmp(&map_info, &map_infos[i], info_len) ||
array_value != array_magic_value,
@@ -322,7 +322,7 @@ void serial_test_bpf_obj_id(void)
nr_id_found++;
- err = bpf_obj_get_info_by_fd(link_fd, &link_info, &info_len);
+ err = bpf_link_get_info_by_fd(link_fd, &link_info, &info_len);
cmp_res = memcmp(&link_info, &link_infos[i],
offsetof(struct bpf_link_info, raw_tracepoint));
CHECK(err || info_len != sizeof(link_info) || cmp_res,
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index de1b5b9eb93a..cbb600be943d 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -4422,7 +4422,7 @@ static int test_big_btf_info(unsigned int test_num)
info->btf = ptr_to_u64(user_btf);
info->btf_size = raw_btf_size;
- err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(!err, "!err")) {
err = -1;
goto done;
@@ -4435,7 +4435,7 @@ static int test_big_btf_info(unsigned int test_num)
* to userspace.
*/
info_garbage.garbage = 0;
- err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(err || info_len != sizeof(*info),
"err:%d errno:%d info_len:%u sizeof(*info):%zu",
err, errno, info_len, sizeof(*info))) {
@@ -4499,7 +4499,7 @@ static int test_btf_id(unsigned int test_num)
/* Test BPF_OBJ_GET_INFO_BY_ID on btf_id */
info_len = sizeof(info[0]);
- err = bpf_obj_get_info_by_fd(btf_fd[0], &info[0], &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd[0], &info[0], &info_len);
if (CHECK(err, "errno:%d", errno)) {
err = -1;
goto done;
@@ -4512,7 +4512,7 @@ static int test_btf_id(unsigned int test_num)
}
ret = 0;
- err = bpf_obj_get_info_by_fd(btf_fd[1], &info[1], &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd[1], &info[1], &info_len);
if (CHECK(err || info[0].id != info[1].id ||
info[0].btf_size != info[1].btf_size ||
(ret = memcmp(user_btf[0], user_btf[1], info[0].btf_size)),
@@ -4535,7 +4535,7 @@ static int test_btf_id(unsigned int test_num)
}
info_len = sizeof(map_info);
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
if (CHECK(err || map_info.btf_id != info[0].id ||
map_info.btf_key_type_id != 1 || map_info.btf_value_type_id != 2,
"err:%d errno:%d info.id:%u btf_id:%u btf_key_type_id:%u btf_value_type_id:%u",
@@ -4638,7 +4638,7 @@ static void do_test_get_info(unsigned int test_num)
info.btf_size = user_btf_size;
ret = 0;
- err = bpf_obj_get_info_by_fd(btf_fd, &info, &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len);
if (CHECK(err || !info.id || info_len != sizeof(info) ||
info.btf_size != raw_btf_size ||
(ret = memcmp(raw_btf, user_btf, expected_nbytes)),
@@ -4755,7 +4755,7 @@ static void do_test_file(unsigned int test_num)
/* get necessary program info */
info_len = sizeof(struct bpf_prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
@@ -4787,7 +4787,7 @@ static void do_test_file(unsigned int test_num)
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
@@ -6405,7 +6405,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
/* get necessary lens */
info_len = sizeof(struct bpf_prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
return -1;
@@ -6435,7 +6435,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
info.nr_func_info = nr_func_info;
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
@@ -6499,7 +6499,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
nr_jited_func_lens = nr_jited_ksyms;
info_len = sizeof(struct bpf_prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "err:%d errno:%d", err, errno)) {
err = -1;
goto done;
@@ -6573,7 +6573,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
info.jited_func_lens = ptr_to_u64(jited_func_lens);
}
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
/*
* Only recheck the info.*line_info* fields.
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
index eb90a6b8850d..a8b53b8736f0 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
@@ -14,7 +14,7 @@ static __u32 bpf_map_id(struct bpf_map *map)
int err;
memset(&info, 0, info_len);
- err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
+ err = bpf_map_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
if (err)
return 0;
return info.id;
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
index 973f0c5af965..b3f7985c8504 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
@@ -8,9 +8,6 @@
#include "cgrp_kfunc_failure.skel.h"
#include "cgrp_kfunc_success.skel.h"
-static size_t log_buf_sz = 1 << 20; /* 1 MB */
-static char obj_log_buf[1048576];
-
static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void)
{
struct cgrp_kfunc_success *skel;
@@ -89,65 +86,6 @@ static const char * const success_tests[] = {
"test_cgrp_get_ancestors",
};
-static struct {
- const char *prog_name;
- const char *expected_err_msg;
-} failure_tests[] = {
- {"cgrp_kfunc_acquire_untrusted", "R1 must be referenced or trusted"},
- {"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"},
- {"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"},
- {"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"},
- {"cgrp_kfunc_acquire_null", "arg#0 pointer type STRUCT cgroup must point"},
- {"cgrp_kfunc_acquire_unreleased", "Unreleased reference"},
- {"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"},
- {"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"},
- {"cgrp_kfunc_get_null", "arg#0 expected pointer to map value"},
- {"cgrp_kfunc_xchg_unreleased", "Unreleased reference"},
- {"cgrp_kfunc_get_unreleased", "Unreleased reference"},
- {"cgrp_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"},
- {"cgrp_kfunc_release_fp", "arg#0 pointer type STRUCT cgroup must point"},
- {"cgrp_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"},
- {"cgrp_kfunc_release_unacquired", "release kernel function bpf_cgroup_release expects"},
-};
-
-static void verify_fail(const char *prog_name, const char *expected_err_msg)
-{
- LIBBPF_OPTS(bpf_object_open_opts, opts);
- struct cgrp_kfunc_failure *skel;
- int err, i;
-
- opts.kernel_log_buf = obj_log_buf;
- opts.kernel_log_size = log_buf_sz;
- opts.kernel_log_level = 1;
-
- skel = cgrp_kfunc_failure__open_opts(&opts);
- if (!ASSERT_OK_PTR(skel, "cgrp_kfunc_failure__open_opts"))
- goto cleanup;
-
- for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
- struct bpf_program *prog;
- const char *curr_name = failure_tests[i].prog_name;
-
- prog = bpf_object__find_program_by_name(skel->obj, curr_name);
- if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
- goto cleanup;
-
- bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name));
- }
-
- err = cgrp_kfunc_failure__load(skel);
- if (!ASSERT_ERR(err, "unexpected load success"))
- goto cleanup;
-
- if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
- fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
- fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
- }
-
-cleanup:
- cgrp_kfunc_failure__destroy(skel);
-}
-
void test_cgrp_kfunc(void)
{
int i, err;
@@ -163,12 +101,7 @@ void test_cgrp_kfunc(void)
run_success_test(success_tests[i]);
}
- for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
- if (!test__start_subtest(failure_tests[i].prog_name))
- continue;
-
- verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
- }
+ RUN_TESTS(cgrp_kfunc_failure);
cleanup:
cleanup_cgroup_environment();
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
index 33a2776737e7..2cc759956e3b 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
@@ -16,7 +16,7 @@
struct socket_cookie {
__u64 cookie_key;
- __u32 cookie_value;
+ __u64 cookie_value;
};
static void test_tp_btf(int cgroup_fd)
diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
index 12f4395f18b3..5338d2ea0460 100644
--- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c
+++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
@@ -59,7 +59,7 @@ static void test_check_mtu_xdp_attach(void)
memset(&link_info, 0, sizeof(link_info));
fd = bpf_link__fd(link);
- err = bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len);
if (CHECK(err, "link_info", "failed: %d\n", err))
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
new file mode 100644
index 000000000000..5fbe457c4ebe
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "cpumask_failure.skel.h"
+#include "cpumask_success.skel.h"
+
+static const char * const cpumask_success_testcases[] = {
+ "test_alloc_free_cpumask",
+ "test_set_clear_cpu",
+ "test_setall_clear_cpu",
+ "test_first_firstzero_cpu",
+ "test_test_and_set_clear",
+ "test_and_or_xor",
+ "test_intersects_subset",
+ "test_copy_any_anyand",
+ "test_insert_leave",
+ "test_insert_remove_release",
+ "test_insert_kptr_get_release",
+};
+
+static void verify_success(const char *prog_name)
+{
+ struct cpumask_success *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link = NULL;
+ pid_t child_pid;
+ int status;
+
+ skel = cpumask_success__open();
+ if (!ASSERT_OK_PTR(skel, "cpumask_success__open"))
+ return;
+
+ skel->bss->pid = getpid();
+ skel->bss->nr_cpus = libbpf_num_possible_cpus();
+
+ cpumask_success__load(skel);
+ if (!ASSERT_OK_PTR(skel, "cpumask_success__load"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
+ goto cleanup;
+
+ child_pid = fork();
+ if (!ASSERT_GT(child_pid, -1, "child_pid"))
+ goto cleanup;
+ if (child_pid == 0)
+ _exit(0);
+ waitpid(child_pid, &status, 0);
+ ASSERT_OK(skel->bss->err, "post_wait_err");
+
+cleanup:
+ bpf_link__destroy(link);
+ cpumask_success__destroy(skel);
+}
+
+void test_cpumask(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cpumask_success_testcases); i++) {
+ if (!test__start_subtest(cpumask_success_testcases[i]))
+ continue;
+
+ verify_success(cpumask_success_testcases[i]);
+ }
+
+ RUN_TESTS(cpumask_failure);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
index 0b2f73b88c53..2853883b7cbb 100644
--- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
+++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
@@ -80,6 +80,6 @@ fail:
bpf_tc_hook_destroy(&qdisc_hook);
close_netns(nstoken);
}
- system("ip netns del " NS_TEST " >& /dev/null");
+ system("ip netns del " NS_TEST " &> /dev/null");
decap_sanity__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
index c11832657d2b..f43fcb13d2c4 100644
--- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <test_progs.h>
-#include "dummy_st_ops.skel.h"
+#include "dummy_st_ops_success.skel.h"
+#include "dummy_st_ops_fail.skel.h"
#include "trace_dummy_st_ops.skel.h"
/* Need to keep consistent with definition in include/linux/bpf.h */
@@ -11,17 +12,17 @@ struct bpf_dummy_ops_state {
static void test_dummy_st_ops_attach(void)
{
- struct dummy_st_ops *skel;
+ struct dummy_st_ops_success *skel;
struct bpf_link *link;
- skel = dummy_st_ops__open_and_load();
+ skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
link = bpf_map__attach_struct_ops(skel->maps.dummy_1);
ASSERT_EQ(libbpf_get_error(link), -EOPNOTSUPP, "dummy_st_ops_attach");
- dummy_st_ops__destroy(skel);
+ dummy_st_ops_success__destroy(skel);
}
static void test_dummy_init_ret_value(void)
@@ -31,10 +32,10 @@ static void test_dummy_init_ret_value(void)
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
- struct dummy_st_ops *skel;
+ struct dummy_st_ops_success *skel;
int fd, err;
- skel = dummy_st_ops__open_and_load();
+ skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
@@ -43,7 +44,7 @@ static void test_dummy_init_ret_value(void)
ASSERT_OK(err, "test_run");
ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret");
- dummy_st_ops__destroy(skel);
+ dummy_st_ops_success__destroy(skel);
}
static void test_dummy_init_ptr_arg(void)
@@ -58,10 +59,10 @@ static void test_dummy_init_ptr_arg(void)
.ctx_size_in = sizeof(args),
);
struct trace_dummy_st_ops *trace_skel;
- struct dummy_st_ops *skel;
+ struct dummy_st_ops_success *skel;
int fd, err;
- skel = dummy_st_ops__open_and_load();
+ skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
@@ -91,7 +92,7 @@ static void test_dummy_init_ptr_arg(void)
ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val");
done:
- dummy_st_ops__destroy(skel);
+ dummy_st_ops_success__destroy(skel);
trace_dummy_st_ops__destroy(trace_skel);
}
@@ -102,12 +103,12 @@ static void test_dummy_multiple_args(void)
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
- struct dummy_st_ops *skel;
+ struct dummy_st_ops_success *skel;
int fd, err;
size_t i;
char name[8];
- skel = dummy_st_ops__open_and_load();
+ skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
@@ -119,7 +120,28 @@ static void test_dummy_multiple_args(void)
ASSERT_EQ(skel->bss->test_2_args[i], args[i], name);
}
- dummy_st_ops__destroy(skel);
+ dummy_st_ops_success__destroy(skel);
+}
+
+static void test_dummy_sleepable(void)
+{
+ __u64 args[1] = {0};
+ LIBBPF_OPTS(bpf_test_run_opts, attr,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ );
+ struct dummy_st_ops_success *skel;
+ int fd, err;
+
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ return;
+
+ fd = bpf_program__fd(skel->progs.test_sleepable);
+ err = bpf_prog_test_run_opts(fd, &attr);
+ ASSERT_OK(err, "test_run");
+
+ dummy_st_ops_success__destroy(skel);
}
void test_dummy_st_ops(void)
@@ -132,4 +154,8 @@ void test_dummy_st_ops(void)
test_dummy_init_ptr_arg();
if (test__start_subtest("dummy_multiple_args"))
test_dummy_multiple_args();
+ if (test__start_subtest("dummy_sleepable"))
+ test_dummy_sleepable();
+
+ RUN_TESTS(dummy_st_ops_fail);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c
index 7faaf6d9e0d4..b99264ec0d9c 100644
--- a/tools/testing/selftests/bpf/prog_tests/dynptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c
@@ -5,14 +5,10 @@
#include "dynptr_fail.skel.h"
#include "dynptr_success.skel.h"
-static struct {
- const char *prog_name;
- const char *expected_err_msg;
-} dynptr_tests[] = {
- /* success cases */
- {"test_read_write", NULL},
- {"test_data_slice", NULL},
- {"test_ringbuf", NULL},
+static const char * const success_tests[] = {
+ "test_read_write",
+ "test_data_slice",
+ "test_ringbuf",
};
static void verify_success(const char *prog_name)
@@ -53,11 +49,11 @@ void test_dynptr(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(dynptr_tests); i++) {
- if (!test__start_subtest(dynptr_tests[i].prog_name))
+ for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
+ if (!test__start_subtest(success_tests[i]))
continue;
- verify_success(dynptr_tests[i].prog_name);
+ verify_success(success_tests[i]);
}
RUN_TESTS(dynptr_fail);
diff --git a/tools/testing/selftests/bpf/prog_tests/enable_stats.c b/tools/testing/selftests/bpf/prog_tests/enable_stats.c
index 2cb2085917e7..75f85d0fe74a 100644
--- a/tools/testing/selftests/bpf/prog_tests/enable_stats.c
+++ b/tools/testing/selftests/bpf/prog_tests/enable_stats.c
@@ -28,7 +28,7 @@ void test_enable_stats(void)
prog_fd = bpf_program__fd(skel->progs.test_enable_stats);
memset(&info, 0, info_len);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err, "get_prog_info",
"failed to get bpf_prog_info for fd %d\n", prog_fd))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index 20f5fa0fcec9..8ec73fdfcdab 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -79,7 +79,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
return;
info_len = sizeof(prog_info);
- err = bpf_obj_get_info_by_fd(tgt_fd, &prog_info, &info_len);
+ err = bpf_prog_get_info_by_fd(tgt_fd, &prog_info, &info_len);
if (!ASSERT_OK(err, "tgt_fd_get_info"))
goto close_prog;
@@ -136,8 +136,8 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
info_len = sizeof(link_info);
memset(&link_info, 0, sizeof(link_info));
- err = bpf_obj_get_info_by_fd(bpf_link__fd(link[i]),
- &link_info, &info_len);
+ err = bpf_link_get_info_by_fd(bpf_link__fd(link[i]),
+ &link_info, &info_len);
ASSERT_OK(err, "link_fd_get_info");
ASSERT_EQ(link_info.tracing.attach_type,
bpf_program__expected_attach_type(prog[i]),
@@ -417,7 +417,7 @@ static int find_prog_btf_id(const char *name, __u32 attach_prog_fd)
struct btf *btf;
int ret;
- ret = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
+ ret = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
if (ret)
return ret;
@@ -483,12 +483,12 @@ static void test_fentry_to_cgroup_bpf(void)
if (!ASSERT_GE(fentry_fd, 0, "load_fentry"))
goto cleanup;
- /* Make sure bpf_obj_get_info_by_fd works correctly when attaching
+ /* Make sure bpf_prog_get_info_by_fd works correctly when attaching
* to another BPF program.
*/
- ASSERT_OK(bpf_obj_get_info_by_fd(fentry_fd, &info, &info_len),
- "bpf_obj_get_info_by_fd");
+ ASSERT_OK(bpf_prog_get_info_by_fd(fentry_fd, &info, &info_len),
+ "bpf_prog_get_info_by_fd");
ASSERT_EQ(info.btf_id, 0, "info.btf_id");
ASSERT_EQ(info.attach_btf_id, btf_id, "info.attach_btf_id");
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 5a7e6011f6bf..596536def43d 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -2,14 +2,19 @@
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-/* that's kernel internal BPF_MAX_TRAMP_PROGS define */
-#define CNT 38
-
void serial_test_fexit_stress(void)
{
- int fexit_fd[CNT] = {};
- int link_fd[CNT] = {};
- int err, i;
+ int bpf_max_tramp_links, err, i;
+ int *fd, *fexit_fd, *link_fd;
+
+ bpf_max_tramp_links = get_bpf_max_tramp_links();
+ if (!ASSERT_GE(bpf_max_tramp_links, 1, "bpf_max_tramp_links"))
+ return;
+ fd = calloc(bpf_max_tramp_links * 2, sizeof(*fd));
+ if (!ASSERT_OK_PTR(fd, "fd"))
+ return;
+ fexit_fd = fd;
+ link_fd = fd + bpf_max_tramp_links;
const struct bpf_insn trace_program[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -28,7 +33,7 @@ void serial_test_fexit_stress(void)
goto out;
trace_opts.attach_btf_id = err;
- for (i = 0; i < CNT; i++) {
+ for (i = 0; i < bpf_max_tramp_links; i++) {
fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
trace_program,
sizeof(trace_program) / sizeof(struct bpf_insn),
@@ -44,10 +49,11 @@ void serial_test_fexit_stress(void)
ASSERT_OK(err, "bpf_prog_test_run_opts");
out:
- for (i = 0; i < CNT; i++) {
+ for (i = 0; i < bpf_max_tramp_links; i++) {
if (link_fd[i])
close(link_fd[i]);
if (fexit_fd[i])
close(fexit_fd[i]);
}
+ free(fd);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
new file mode 100644
index 000000000000..61ccddccf485
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <sys/types.h>
+#include <net/if.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "fib_lookup.skel.h"
+
+#define SYS(fmt, ...) \
+ ({ \
+ char cmd[1024]; \
+ snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ if (!ASSERT_OK(system(cmd), cmd)) \
+ goto fail; \
+ })
+
+#define NS_TEST "fib_lookup_ns"
+#define IPV6_IFACE_ADDR "face::face"
+#define IPV6_NUD_FAILED_ADDR "face::1"
+#define IPV6_NUD_STALE_ADDR "face::2"
+#define IPV4_IFACE_ADDR "10.0.0.254"
+#define IPV4_NUD_FAILED_ADDR "10.0.0.1"
+#define IPV4_NUD_STALE_ADDR "10.0.0.2"
+#define DMAC "11:11:11:11:11:11"
+#define DMAC_INIT { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, }
+
+struct fib_lookup_test {
+ const char *desc;
+ const char *daddr;
+ int expected_ret;
+ int lookup_flags;
+ __u8 dmac[6];
+};
+
+static const struct fib_lookup_test tests[] = {
+ { .desc = "IPv6 failed neigh",
+ .daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, },
+ { .desc = "IPv6 stale neigh",
+ .daddr = IPV6_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .dmac = DMAC_INIT, },
+ { .desc = "IPv6 skip neigh",
+ .daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv4 failed neigh",
+ .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, },
+ { .desc = "IPv4 stale neigh",
+ .daddr = IPV4_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .dmac = DMAC_INIT, },
+ { .desc = "IPv4 skip neigh",
+ .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, },
+};
+
+static int ifindex;
+
+static int setup_netns(void)
+{
+ int err;
+
+ SYS("ip link add veth1 type veth peer name veth2");
+ SYS("ip link set dev veth1 up");
+
+ SYS("ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR);
+ SYS("ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR);
+ SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC);
+
+ SYS("ip addr add %s/24 dev veth1 nodad", IPV4_IFACE_ADDR);
+ SYS("ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR);
+ SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC);
+
+ err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1");
+ if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)"))
+ goto fail;
+
+ err = write_sysctl("/proc/sys/net/ipv6/conf/veth1/forwarding", "1");
+ if (!ASSERT_OK(err, "write_sysctl(net.ipv6.conf.veth1.forwarding)"))
+ goto fail;
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr)
+{
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+
+ params->l4_protocol = IPPROTO_TCP;
+ params->ifindex = ifindex;
+
+ if (inet_pton(AF_INET6, daddr, params->ipv6_dst) == 1) {
+ params->family = AF_INET6;
+ ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src);
+ if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)"))
+ return -1;
+ return 0;
+ }
+
+ ret = inet_pton(AF_INET, daddr, &params->ipv4_dst);
+ if (!ASSERT_EQ(ret, 1, "convert IP[46] address"))
+ return -1;
+ params->family = AF_INET;
+ ret = inet_pton(AF_INET, IPV4_IFACE_ADDR, &params->ipv4_src);
+ if (!ASSERT_EQ(ret, 1, "inet_pton(IPV4_IFACE_ADDR)"))
+ return -1;
+
+ return 0;
+}
+
+static void mac_str(char *b, const __u8 *mac)
+{
+ sprintf(b, "%02X:%02X:%02X:%02X:%02X:%02X",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+}
+
+void test_fib_lookup(void)
+{
+ struct bpf_fib_lookup *fib_params;
+ struct nstoken *nstoken = NULL;
+ struct __sk_buff skb = { };
+ struct fib_lookup *skel;
+ int prog_fd, err, ret, i;
+
+ /* The test does not use the skb->data, so
+ * use pkt_v6 for both v6 and v4 test.
+ */
+ LIBBPF_OPTS(bpf_test_run_opts, run_opts,
+ .data_in = &pkt_v6,
+ .data_size_in = sizeof(pkt_v6),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ skel = fib_lookup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
+ return;
+ prog_fd = bpf_program__fd(skel->progs.fib_lookup);
+
+ SYS("ip netns add %s", NS_TEST);
+
+ nstoken = open_netns(NS_TEST);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto fail;
+
+ if (setup_netns())
+ goto fail;
+
+ ifindex = if_nametoindex("veth1");
+ skb.ifindex = ifindex;
+ fib_params = &skel->bss->fib_params;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ printf("Testing %s\n", tests[i].desc);
+
+ if (set_lookup_params(fib_params, tests[i].daddr))
+ continue;
+ skel->bss->fib_lookup_ret = -1;
+ skel->bss->lookup_flags = BPF_FIB_LOOKUP_OUTPUT |
+ tests[i].lookup_flags;
+
+ err = bpf_prog_test_run_opts(prog_fd, &run_opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ continue;
+
+ ASSERT_EQ(tests[i].expected_ret, skel->bss->fib_lookup_ret,
+ "fib_lookup_ret");
+
+ ret = memcmp(tests[i].dmac, fib_params->dmac, sizeof(tests[i].dmac));
+ if (!ASSERT_EQ(ret, 0, "dmac not match")) {
+ char expected[18], actual[18];
+
+ mac_str(expected, tests[i].dmac);
+ mac_str(actual, fib_params->dmac);
+ printf("dmac expected %s actual %s\n", expected, actual);
+ }
+ }
+
+fail:
+ if (nstoken)
+ close_netns(nstoken);
+ system("ip netns del " NS_TEST " &> /dev/null");
+ fib_lookup__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
index 7c79462d2702..9333f7346d15 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
@@ -60,9 +60,9 @@ static __u32 query_prog_id(int prog)
__u32 info_len = sizeof(info);
int err;
- err = bpf_obj_get_info_by_fd(prog, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog, &info, &info_len);
if (CHECK_FAIL(err || info_len != sizeof(info))) {
- perror("bpf_obj_get_info_by_fd");
+ perror("bpf_prog_get_info_by_fd");
return 0;
}
@@ -497,7 +497,7 @@ static void test_link_get_info(int netns, int prog1, int prog2)
}
info_len = sizeof(info);
- err = bpf_obj_get_info_by_fd(link, &info, &info_len);
+ err = bpf_link_get_info_by_fd(link, &info, &info_len);
if (CHECK_FAIL(err)) {
perror("bpf_obj_get_info");
goto out_unlink;
@@ -521,7 +521,7 @@ static void test_link_get_info(int netns, int prog1, int prog2)
link_id = info.id;
info_len = sizeof(info);
- err = bpf_obj_get_info_by_fd(link, &info, &info_len);
+ err = bpf_link_get_info_by_fd(link, &info, &info_len);
if (CHECK_FAIL(err)) {
perror("bpf_obj_get_info");
goto out_unlink;
@@ -546,7 +546,7 @@ static void test_link_get_info(int netns, int prog1, int prog2)
netns = -1;
info_len = sizeof(info);
- err = bpf_obj_get_info_by_fd(link, &info, &info_len);
+ err = bpf_link_get_info_by_fd(link, &info, &info_len);
if (CHECK_FAIL(err)) {
perror("bpf_obj_get_info");
goto out_unlink;
diff --git a/tools/testing/selftests/bpf/prog_tests/htab_reuse.c b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c
new file mode 100644
index 000000000000..a742dd994d60
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdbool.h>
+#include <test_progs.h>
+#include "htab_reuse.skel.h"
+
+struct htab_op_ctx {
+ int fd;
+ int loop;
+ bool stop;
+};
+
+struct htab_val {
+ unsigned int lock;
+ unsigned int data;
+};
+
+static void *htab_lookup_fn(void *arg)
+{
+ struct htab_op_ctx *ctx = arg;
+ int i = 0;
+
+ while (i++ < ctx->loop && !ctx->stop) {
+ struct htab_val value;
+ unsigned int key;
+
+ /* Use BPF_F_LOCK to use spin-lock in map value. */
+ key = 7;
+ bpf_map_lookup_elem_flags(ctx->fd, &key, &value, BPF_F_LOCK);
+ }
+
+ return NULL;
+}
+
+static void *htab_update_fn(void *arg)
+{
+ struct htab_op_ctx *ctx = arg;
+ int i = 0;
+
+ while (i++ < ctx->loop && !ctx->stop) {
+ struct htab_val value;
+ unsigned int key;
+
+ key = 7;
+ value.lock = 0;
+ value.data = key;
+ bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK);
+ bpf_map_delete_elem(ctx->fd, &key);
+
+ key = 24;
+ value.lock = 0;
+ value.data = key;
+ bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK);
+ bpf_map_delete_elem(ctx->fd, &key);
+ }
+
+ return NULL;
+}
+
+void test_htab_reuse(void)
+{
+ unsigned int i, wr_nr = 1, rd_nr = 4;
+ pthread_t tids[wr_nr + rd_nr];
+ struct htab_reuse *skel;
+ struct htab_op_ctx ctx;
+ int err;
+
+ skel = htab_reuse__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "htab_reuse__open_and_load"))
+ return;
+
+ ctx.fd = bpf_map__fd(skel->maps.htab);
+ ctx.loop = 500;
+ ctx.stop = false;
+
+ memset(tids, 0, sizeof(tids));
+ for (i = 0; i < wr_nr; i++) {
+ err = pthread_create(&tids[i], NULL, htab_update_fn, &ctx);
+ if (!ASSERT_OK(err, "pthread_create")) {
+ ctx.stop = true;
+ goto reap;
+ }
+ }
+ for (i = 0; i < rd_nr; i++) {
+ err = pthread_create(&tids[i + wr_nr], NULL, htab_lookup_fn, &ctx);
+ if (!ASSERT_OK(err, "pthread_create")) {
+ ctx.stop = true;
+ goto reap;
+ }
+ }
+
+reap:
+ for (i = 0; i < wr_nr + rd_nr; i++) {
+ if (!tids[i])
+ continue;
+ pthread_join(tids[i], NULL);
+ }
+ htab_reuse__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c b/tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c
new file mode 100644
index 000000000000..5639428607e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "jit_probe_mem.skel.h"
+
+void test_jit_probe_mem(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct jit_probe_mem *skel;
+ int ret;
+
+ skel = jit_probe_mem__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "jit_probe_mem__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_jit_probe_mem), &opts);
+ ASSERT_OK(ret, "jit_probe_mem ret");
+ ASSERT_OK(opts.retval, "jit_probe_mem opts.retval");
+ ASSERT_EQ(skel->data->total_sum, 192, "jit_probe_mem total_sum");
+
+ jit_probe_mem__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
index 73579370bfbd..c07991544a78 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
@@ -36,7 +36,7 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
"cb32_0 %x != %x\n",
meta->cb32_0, cb.cb32[0]))
return;
- if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth",
+ if (CHECK(pkt_v6->eth.h_proto != htons(ETH_P_IPV6), "check_eth",
"h_proto %x\n", pkt_v6->eth.h_proto))
return;
if (CHECK(pkt_v6->iph.nexthdr != 6, "check_ip",
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index 5af1ee8f0e6e..a543742cd7bd 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -72,10 +72,12 @@ static struct kfunc_test_params kfunc_tests[] = {
/* success cases */
TC_TEST(kfunc_call_test1, 12),
TC_TEST(kfunc_call_test2, 3),
+ TC_TEST(kfunc_call_test4, -1234),
TC_TEST(kfunc_call_test_ref_btf_id, 0),
TC_TEST(kfunc_call_test_get_mem, 42),
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
+ TC_TEST(kfunc_call_test_static_unused_arg, 0),
};
struct syscall_test_args {
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
index a9229260a6ce..8cd298b78e44 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
@@ -10,17 +10,11 @@
#include <test_progs.h>
#include "test_kfunc_dynptr_param.skel.h"
-static size_t log_buf_sz = 1048576; /* 1 MB */
-static char obj_log_buf[1048576];
-
static struct {
const char *prog_name;
- const char *expected_verifier_err_msg;
int expected_runtime_err;
} kfunc_dynptr_tests[] = {
- {"not_valid_dynptr", "Expected an initialized dynptr as arg #1", 0},
- {"not_ptr_to_stack", "arg#0 expected pointer to stack or dynptr_ptr", 0},
- {"dynptr_data_null", NULL, -EBADMSG},
+ {"dynptr_data_null", -EBADMSG},
};
static bool kfunc_not_supported;
@@ -38,29 +32,15 @@ static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
return 0;
}
-static void verify_fail(const char *prog_name, const char *expected_err_msg)
+static bool has_pkcs7_kfunc_support(void)
{
struct test_kfunc_dynptr_param *skel;
- LIBBPF_OPTS(bpf_object_open_opts, opts);
libbpf_print_fn_t old_print_cb;
- struct bpf_program *prog;
int err;
- opts.kernel_log_buf = obj_log_buf;
- opts.kernel_log_size = log_buf_sz;
- opts.kernel_log_level = 1;
-
- skel = test_kfunc_dynptr_param__open_opts(&opts);
- if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open_opts"))
- goto cleanup;
-
- prog = bpf_object__find_program_by_name(skel->obj, prog_name);
- if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
- goto cleanup;
-
- bpf_program__set_autoload(prog, true);
-
- bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize());
+ skel = test_kfunc_dynptr_param__open();
+ if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open"))
+ return false;
kfunc_not_supported = false;
@@ -72,26 +52,18 @@ static void verify_fail(const char *prog_name, const char *expected_err_msg)
fprintf(stderr,
"%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n",
__func__);
- test__skip();
- goto cleanup;
- }
-
- if (!ASSERT_ERR(err, "unexpected load success"))
- goto cleanup;
-
- if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
- fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
- fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
+ test_kfunc_dynptr_param__destroy(skel);
+ return false;
}
-cleanup:
test_kfunc_dynptr_param__destroy(skel);
+
+ return true;
}
static void verify_success(const char *prog_name, int expected_runtime_err)
{
struct test_kfunc_dynptr_param *skel;
- libbpf_print_fn_t old_print_cb;
struct bpf_program *prog;
struct bpf_link *link;
__u32 next_id;
@@ -103,21 +75,7 @@ static void verify_success(const char *prog_name, int expected_runtime_err)
skel->bss->pid = getpid();
- bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize());
-
- kfunc_not_supported = false;
-
- old_print_cb = libbpf_set_print(libbpf_print_cb);
err = test_kfunc_dynptr_param__load(skel);
- libbpf_set_print(old_print_cb);
-
- if (err < 0 && kfunc_not_supported) {
- fprintf(stderr,
- "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n",
- __func__);
- test__skip();
- goto cleanup;
- }
if (!ASSERT_OK(err, "test_kfunc_dynptr_param__load"))
goto cleanup;
@@ -147,15 +105,15 @@ void test_kfunc_dynptr_param(void)
{
int i;
+ if (!has_pkcs7_kfunc_support())
+ return;
+
for (i = 0; i < ARRAY_SIZE(kfunc_dynptr_tests); i++) {
if (!test__start_subtest(kfunc_dynptr_tests[i].prog_name))
continue;
- if (kfunc_dynptr_tests[i].expected_verifier_err_msg)
- verify_fail(kfunc_dynptr_tests[i].prog_name,
- kfunc_dynptr_tests[i].expected_verifier_err_msg);
- else
- verify_success(kfunc_dynptr_tests[i].prog_name,
- kfunc_dynptr_tests[i].expected_runtime_err);
+ verify_success(kfunc_dynptr_tests[i].prog_name,
+ kfunc_dynptr_tests[i].expected_runtime_err);
}
+ RUN_TESTS(test_kfunc_dynptr_param);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index c6f37e825f11..113dba349a57 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -322,7 +322,7 @@ static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
return strcmp((const char *) key1, (const char *) key2) == 0;
}
-static int get_syms(char ***symsp, size_t *cntp)
+static int get_syms(char ***symsp, size_t *cntp, bool kernel)
{
size_t cap = 0, cnt = 0, i;
char *name = NULL, **syms = NULL;
@@ -349,8 +349,9 @@ static int get_syms(char ***symsp, size_t *cntp)
}
while (fgets(buf, sizeof(buf), f)) {
- /* skip modules */
- if (strchr(buf, '['))
+ if (kernel && strchr(buf, '['))
+ continue;
+ if (!kernel && !strchr(buf, '['))
continue;
free(name);
@@ -404,7 +405,7 @@ error:
return err;
}
-void serial_test_kprobe_multi_bench_attach(void)
+static void test_kprobe_multi_bench_attach(bool kernel)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct kprobe_multi_empty *skel = NULL;
@@ -415,7 +416,7 @@ void serial_test_kprobe_multi_bench_attach(void)
char **syms = NULL;
size_t cnt = 0, i;
- if (!ASSERT_OK(get_syms(&syms, &cnt), "get_syms"))
+ if (!ASSERT_OK(get_syms(&syms, &cnt, kernel), "get_syms"))
return;
skel = kprobe_multi_empty__open_and_load();
@@ -453,6 +454,14 @@ cleanup:
}
}
+void serial_test_kprobe_multi_bench_attach(void)
+{
+ if (test__start_subtest("kernel"))
+ test_kprobe_multi_bench_attach(true);
+ if (test__start_subtest("modules"))
+ test_kprobe_multi_bench_attach(false);
+}
+
void test_kprobe_multi_test(void)
{
if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c
index 25e5dfa9c315..a3f238f51d05 100644
--- a/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c
@@ -29,9 +29,9 @@ void test_libbpf_get_fd_by_id_opts(void)
if (!ASSERT_OK(ret, "test_libbpf_get_fd_by_id_opts__attach"))
goto close_prog;
- ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.data_input),
+ ret = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.data_input),
&info_m, &len);
- if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd"))
+ if (!ASSERT_OK(ret, "bpf_map_get_info_by_fd"))
goto close_prog;
fd = bpf_map_get_fd_by_id(info_m.id);
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
index 9a7d4c47af63..0ed8132ce1c3 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -58,12 +58,12 @@ static struct {
TEST(inner_map, pop_front)
TEST(inner_map, pop_back)
#undef TEST
- { "map_compat_kprobe", "tracing progs cannot use bpf_list_head yet" },
- { "map_compat_kretprobe", "tracing progs cannot use bpf_list_head yet" },
- { "map_compat_tp", "tracing progs cannot use bpf_list_head yet" },
- { "map_compat_perf", "tracing progs cannot use bpf_list_head yet" },
- { "map_compat_raw_tp", "tracing progs cannot use bpf_list_head yet" },
- { "map_compat_raw_tp_w", "tracing progs cannot use bpf_list_head yet" },
+ { "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
{ "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
{ "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
@@ -78,8 +78,6 @@ static struct {
{ "direct_write_head", "direct access to bpf_list_head is disallowed" },
{ "direct_read_node", "direct access to bpf_list_node is disallowed" },
{ "direct_write_node", "direct access to bpf_list_node is disallowed" },
- { "write_after_push_front", "only read is supported" },
- { "write_after_push_back", "only read is supported" },
{ "use_after_unlock_push_front", "invalid mem access 'scalar'" },
{ "use_after_unlock_push_back", "invalid mem access 'scalar'" },
{ "double_push_front", "arg#1 expected pointer to allocated object" },
@@ -717,6 +715,43 @@ static void test_btf(void)
btf__free(btf);
break;
}
+
+ while (test__start_subtest("btf: list_node and rb_node in same struct")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+
+ id = btf__add_struct(btf, "bpf_rb_node", 24);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct bpf_rb_node"))
+ break;
+ id = btf__add_struct(btf, "bar", 40);
+ if (!ASSERT_EQ(id, 6, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+ err = btf__add_field(btf, "c", 5, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ break;
+
+ id = btf__add_struct(btf, "foo", 20);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:a", 7, 0);
+ if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:bar:a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EINVAL, "check btf");
+ btf__free(btf);
+ break;
+ }
}
void test_linked_list(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
index f117bfef68a1..130a3b21e467 100644
--- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
+++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
@@ -47,7 +47,8 @@ static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func)
fd = bpf_prog_get_fd_by_id(p.prog_ids[i]);
ASSERT_GE(fd, 0, "prog_get_fd_by_id");
- ASSERT_OK(bpf_obj_get_info_by_fd(fd, &info, &info_len), "prog_info_by_fd");
+ ASSERT_OK(bpf_prog_get_info_by_fd(fd, &info, &info_len),
+ "prog_info_by_fd");
close(fd);
if (info.attach_btf_id ==
diff --git a/tools/testing/selftests/bpf/prog_tests/metadata.c b/tools/testing/selftests/bpf/prog_tests/metadata.c
index 2c53eade88e3..8b67dfc10f5c 100644
--- a/tools/testing/selftests/bpf/prog_tests/metadata.c
+++ b/tools/testing/selftests/bpf/prog_tests/metadata.c
@@ -16,7 +16,7 @@ static int duration;
static int prog_holds_map(int prog_fd, int map_fd)
{
struct bpf_prog_info prog_info = {};
- struct bpf_prog_info map_info = {};
+ struct bpf_map_info map_info = {};
__u32 prog_info_len;
__u32 map_info_len;
__u32 *map_ids;
@@ -25,12 +25,12 @@ static int prog_holds_map(int prog_fd, int map_fd)
int i;
map_info_len = sizeof(map_info);
- ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
+ ret = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
if (ret)
return -errno;
prog_info_len = sizeof(prog_info);
- ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
return -errno;
@@ -44,7 +44,7 @@ static int prog_holds_map(int prog_fd, int map_fd)
prog_info.map_ids = ptr_to_u64(map_ids);
prog_info_len = sizeof(prog_info);
- ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret) {
ret = -errno;
goto free_map_ids;
diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
index eb2feaac81fe..653b0a20fab9 100644
--- a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
@@ -488,7 +488,7 @@ static void run_test(struct migrate_reuseport_test_case *test_case,
goto close_servers;
}
- /* Tie requests to the first four listners */
+ /* Tie requests to the first four listeners */
err = start_clients(test_case);
if (!ASSERT_OK(err, "start_clients"))
goto close_clients;
diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c
index 37b002ca1167..a271d5a0f7ab 100644
--- a/tools/testing/selftests/bpf/prog_tests/mmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
@@ -64,7 +64,7 @@ void test_mmap(void)
/* get map's ID */
memset(&map_info, 0, map_info_sz);
- err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
+ err = bpf_map_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
if (CHECK(err, "map_get_info", "failed %d\n", errno))
goto cleanup;
data_map_id = map_info.id;
diff --git a/tools/testing/selftests/bpf/prog_tests/nested_trust.c b/tools/testing/selftests/bpf/prog_tests/nested_trust.c
new file mode 100644
index 000000000000..39886f58924e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/nested_trust.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "nested_trust_failure.skel.h"
+#include "nested_trust_success.skel.h"
+
+void test_nested_trust(void)
+{
+ RUN_TESTS(nested_trust_success);
+ RUN_TESTS(nested_trust_failure);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c
index 224eba6fef2e..3a25f1c743a1 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c
@@ -54,7 +54,7 @@ void serial_test_perf_link(void)
goto cleanup;
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len);
+ err = bpf_link_get_info_by_fd(link_fd, &info, &info_len);
if (!ASSERT_OK(err, "link_get_info"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c
index d95cee5867b7..c799a3c5ad1f 100644
--- a/tools/testing/selftests/bpf/prog_tests/pinning.c
+++ b/tools/testing/selftests/bpf/prog_tests/pinning.c
@@ -18,7 +18,7 @@ __u32 get_map_id(struct bpf_object *obj, const char *name)
if (CHECK(!map, "find map", "NULL map"))
return 0;
- err = bpf_obj_get_info_by_fd(bpf_map__fd(map),
+ err = bpf_map_get_info_by_fd(bpf_map__fd(map),
&map_info, &map_info_len);
CHECK(err, "get map info", "err %d errno %d", err, errno);
return map_info.id;
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c
index 1ccd2bdf8fa8..01f1d1b6715a 100644
--- a/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c
@@ -12,7 +12,7 @@ static void check_run_cnt(int prog_fd, __u64 run_cnt)
__u32 info_len = sizeof(info);
int err;
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c
new file mode 100644
index 000000000000..156fa95c42f6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "rbtree.skel.h"
+#include "rbtree_fail.skel.h"
+#include "rbtree_btf_fail__wrong_node_type.skel.h"
+#include "rbtree_btf_fail__add_wrong_type.skel.h"
+
+static void test_rbtree_add_nodes(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts);
+ ASSERT_OK(ret, "rbtree_add_nodes run");
+ ASSERT_OK(opts.retval, "rbtree_add_nodes retval");
+ ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran");
+
+ rbtree__destroy(skel);
+}
+
+static void test_rbtree_add_and_remove(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove), &opts);
+ ASSERT_OK(ret, "rbtree_add_and_remove");
+ ASSERT_OK(opts.retval, "rbtree_add_and_remove retval");
+ ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key");
+
+ rbtree__destroy(skel);
+}
+
+static void test_rbtree_first_and_remove(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_first_and_remove), &opts);
+ ASSERT_OK(ret, "rbtree_first_and_remove");
+ ASSERT_OK(opts.retval, "rbtree_first_and_remove retval");
+ ASSERT_EQ(skel->data->first_data[0], 2, "rbtree_first_and_remove first rbtree_first()");
+ ASSERT_EQ(skel->data->removed_key, 1, "rbtree_first_and_remove first removed key");
+ ASSERT_EQ(skel->data->first_data[1], 4, "rbtree_first_and_remove second rbtree_first()");
+
+ rbtree__destroy(skel);
+}
+
+void test_rbtree_success(void)
+{
+ if (test__start_subtest("rbtree_add_nodes"))
+ test_rbtree_add_nodes();
+ if (test__start_subtest("rbtree_add_and_remove"))
+ test_rbtree_add_and_remove();
+ if (test__start_subtest("rbtree_first_and_remove"))
+ test_rbtree_first_and_remove();
+}
+
+#define BTF_FAIL_TEST(suffix) \
+void test_rbtree_btf_fail__##suffix(void) \
+{ \
+ struct rbtree_btf_fail__##suffix *skel; \
+ \
+ skel = rbtree_btf_fail__##suffix##__open_and_load(); \
+ if (!ASSERT_ERR_PTR(skel, \
+ "rbtree_btf_fail__" #suffix "__open_and_load unexpected success")) \
+ rbtree_btf_fail__##suffix##__destroy(skel); \
+}
+
+#define RUN_BTF_FAIL_TEST(suffix) \
+ if (test__start_subtest("rbtree_btf_fail__" #suffix)) \
+ test_rbtree_btf_fail__##suffix();
+
+BTF_FAIL_TEST(wrong_node_type);
+BTF_FAIL_TEST(add_wrong_type);
+
+void test_rbtree_btf_fail(void)
+{
+ RUN_BTF_FAIL_TEST(wrong_node_type);
+ RUN_BTF_FAIL_TEST(add_wrong_type);
+}
+
+void test_rbtree_fail(void)
+{
+ RUN_TESTS(rbtree_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/recursion.c b/tools/testing/selftests/bpf/prog_tests/recursion.c
index f3af2627b599..23552d3e3365 100644
--- a/tools/testing/selftests/bpf/prog_tests/recursion.c
+++ b/tools/testing/selftests/bpf/prog_tests/recursion.c
@@ -31,8 +31,8 @@ void test_recursion(void)
bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key);
ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2");
- err = bpf_obj_get_info_by_fd(bpf_program__fd(skel->progs.on_delete),
- &prog_info, &prog_info_len);
+ err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.on_delete),
+ &prog_info, &prog_info_len);
if (!ASSERT_OK(err, "get_prog_info"))
goto out;
ASSERT_EQ(prog_info.recursion_misses, 2, "recursion_misses");
diff --git a/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c
index 018611e6b248..7d4a9b3d3722 100644
--- a/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c
+++ b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c
@@ -4,6 +4,7 @@
#define _GNU_SOURCE
#include <sched.h>
#include <linux/socket.h>
+#include <linux/tls.h>
#include <net/if.h>
#include "test_progs.h"
@@ -83,6 +84,76 @@ static void test_udp(int family)
ASSERT_EQ(bss->nr_binddev, 1, "nr_bind");
}
+static void test_ktls(int family)
+{
+ struct tls12_crypto_info_aes_gcm_128 aes128;
+ struct setget_sockopt__bss *bss = skel->bss;
+ int cfd = -1, sfd = -1, fd = -1, ret;
+ char buf;
+
+ memset(bss, 0, sizeof(*bss));
+
+ sfd = start_server(family, SOCK_STREAM,
+ family == AF_INET6 ? addr6_str : addr4_str, 0, 0);
+ if (!ASSERT_GE(sfd, 0, "start_server"))
+ return;
+ fd = connect_to_fd(sfd, 0);
+ if (!ASSERT_GE(fd, 0, "connect_to_fd"))
+ goto err_out;
+
+ cfd = accept(sfd, NULL, 0);
+ if (!ASSERT_GE(cfd, 0, "accept"))
+ goto err_out;
+
+ close(sfd);
+ sfd = -1;
+
+ /* Setup KTLS */
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (!ASSERT_OK(ret, "setsockopt"))
+ goto err_out;
+ ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (!ASSERT_OK(ret, "setsockopt"))
+ goto err_out;
+
+ memset(&aes128, 0, sizeof(aes128));
+ aes128.info.version = TLS_1_2_VERSION;
+ aes128.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &aes128, sizeof(aes128));
+ if (!ASSERT_OK(ret, "setsockopt"))
+ goto err_out;
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &aes128, sizeof(aes128));
+ if (!ASSERT_OK(ret, "setsockopt"))
+ goto err_out;
+
+ /* KTLS is enabled */
+
+ close(fd);
+ /* At this point, the cfd socket is at the CLOSE_WAIT state
+ * and still run TLS protocol. The test for
+ * BPF_TCP_CLOSE_WAIT should be run at this point.
+ */
+ ret = read(cfd, &buf, sizeof(buf));
+ ASSERT_EQ(ret, 0, "read");
+ close(cfd);
+
+ ASSERT_EQ(bss->nr_listen, 1, "nr_listen");
+ ASSERT_EQ(bss->nr_connect, 1, "nr_connect");
+ ASSERT_EQ(bss->nr_active, 1, "nr_active");
+ ASSERT_EQ(bss->nr_passive, 1, "nr_passive");
+ ASSERT_EQ(bss->nr_socket_post_create, 2, "nr_socket_post_create");
+ ASSERT_EQ(bss->nr_binddev, 2, "nr_bind");
+ ASSERT_EQ(bss->nr_fin_wait1, 1, "nr_fin_wait1");
+ return;
+
+err_out:
+ close(fd);
+ close(cfd);
+ close(sfd);
+}
+
void test_setget_sockopt(void)
{
cg_fd = test__join_cgroup(CG_NAME);
@@ -118,6 +189,8 @@ void test_setget_sockopt(void)
test_tcp(AF_INET);
test_udp(AF_INET6);
test_udp(AF_INET);
+ test_ktls(AF_INET6);
+ test_ktls(AF_INET);
done:
setget_sockopt__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
index 3e190ed63976..1374b626a985 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
@@ -29,7 +29,23 @@ static int stop, duration;
static bool
configure_stack(void)
{
+ char tc_version[128];
char tc_cmd[BUFSIZ];
+ char *prog;
+ FILE *tc;
+
+ /* Check whether tc is built with libbpf. */
+ tc = popen("tc -V", "r");
+ if (CHECK_FAIL(!tc))
+ return false;
+ if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc)))
+ return false;
+ if (strstr(tc_version, ", libbpf "))
+ prog = "test_sk_assign_libbpf.bpf.o";
+ else
+ prog = "test_sk_assign.bpf.o";
+ if (CHECK_FAIL(pclose(tc)))
+ return false;
/* Move to a new networking namespace */
if (CHECK_FAIL(unshare(CLONE_NEWNET)))
@@ -46,8 +62,8 @@ configure_stack(void)
/* Load qdisc, BPF program */
if (CHECK_FAIL(system("tc qdisc add dev lo clsact")))
return false;
- sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
- "direct-action object-file ./test_sk_assign.bpf.o",
+ sprintf(tc_cmd, "%s %s %s %s %s", "tc filter add dev lo ingress bpf",
+ "direct-action object-file", prog,
"section tc",
(env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose");
if (CHECK(system(tc_cmd), "BPF load failed;",
@@ -129,15 +145,12 @@ get_port(int fd)
static ssize_t
rcv_msg(int srv_client, int type)
{
- struct sockaddr_storage ss;
char buf[BUFSIZ];
- socklen_t slen;
if (type == SOCK_STREAM)
return read(srv_client, &buf, sizeof(buf));
else
- return recvfrom(srv_client, &buf, sizeof(buf), 0,
- (struct sockaddr *)&ss, &slen);
+ return recvfrom(srv_client, &buf, sizeof(buf), 0, NULL, NULL);
}
static int
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index 0aa088900699..0ce25a967481 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -299,9 +299,9 @@ static __u32 query_prog_id(int prog_fd)
__u32 info_len = sizeof(info);
int err;
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd") ||
- !ASSERT_EQ(info_len, sizeof(info), "bpf_obj_get_info_by_fd"))
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
+ !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
return 0;
return info.id;
diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
index 18848c31e36f..f79fa5bc9a8d 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
@@ -9,9 +9,6 @@
#include "task_kfunc_failure.skel.h"
#include "task_kfunc_success.skel.h"
-static size_t log_buf_sz = 1 << 20; /* 1 MB */
-static char obj_log_buf[1048576];
-
static struct task_kfunc_success *open_load_task_kfunc_skel(void)
{
struct task_kfunc_success *skel;
@@ -83,67 +80,6 @@ static const char * const success_tests[] = {
"test_task_from_pid_invalid",
};
-static struct {
- const char *prog_name;
- const char *expected_err_msg;
-} failure_tests[] = {
- {"task_kfunc_acquire_untrusted", "R1 must be referenced or trusted"},
- {"task_kfunc_acquire_fp", "arg#0 pointer type STRUCT task_struct must point"},
- {"task_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"},
- {"task_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"},
- {"task_kfunc_acquire_null", "arg#0 pointer type STRUCT task_struct must point"},
- {"task_kfunc_acquire_unreleased", "Unreleased reference"},
- {"task_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"},
- {"task_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"},
- {"task_kfunc_get_null", "arg#0 expected pointer to map value"},
- {"task_kfunc_xchg_unreleased", "Unreleased reference"},
- {"task_kfunc_get_unreleased", "Unreleased reference"},
- {"task_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"},
- {"task_kfunc_release_fp", "arg#0 pointer type STRUCT task_struct must point"},
- {"task_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"},
- {"task_kfunc_release_unacquired", "release kernel function bpf_task_release expects"},
- {"task_kfunc_from_pid_no_null_check", "arg#0 is ptr_or_null_ expected ptr_ or socket"},
- {"task_kfunc_from_lsm_task_free", "reg type unsupported for arg#0 function"},
-};
-
-static void verify_fail(const char *prog_name, const char *expected_err_msg)
-{
- LIBBPF_OPTS(bpf_object_open_opts, opts);
- struct task_kfunc_failure *skel;
- int err, i;
-
- opts.kernel_log_buf = obj_log_buf;
- opts.kernel_log_size = log_buf_sz;
- opts.kernel_log_level = 1;
-
- skel = task_kfunc_failure__open_opts(&opts);
- if (!ASSERT_OK_PTR(skel, "task_kfunc_failure__open_opts"))
- goto cleanup;
-
- for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
- struct bpf_program *prog;
- const char *curr_name = failure_tests[i].prog_name;
-
- prog = bpf_object__find_program_by_name(skel->obj, curr_name);
- if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
- goto cleanup;
-
- bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name));
- }
-
- err = task_kfunc_failure__load(skel);
- if (!ASSERT_ERR(err, "unexpected load success"))
- goto cleanup;
-
- if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
- fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
- fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
- }
-
-cleanup:
- task_kfunc_failure__destroy(skel);
-}
-
void test_task_kfunc(void)
{
int i;
@@ -155,10 +91,5 @@ void test_task_kfunc(void)
run_success_test(success_tests[i]);
}
- for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
- if (!test__start_subtest(failure_tests[i].prog_name))
- continue;
-
- verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
- }
+ RUN_TESTS(task_kfunc_failure);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
index a176bd75a748..ea8537c54413 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
@@ -119,19 +119,19 @@ static void test_recursion(void)
prog_fd = bpf_program__fd(skel->progs.on_lookup);
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
ASSERT_OK(err, "get prog info");
ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion");
prog_fd = bpf_program__fd(skel->progs.on_update);
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
ASSERT_OK(err, "get prog info");
ASSERT_EQ(info.recursion_misses, 0, "on_update prog recursion");
prog_fd = bpf_program__fd(skel->progs.on_enter);
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
ASSERT_OK(err, "get prog info");
ASSERT_EQ(info.recursion_misses, 0, "on_enter prog recursion");
@@ -221,7 +221,7 @@ static void test_nodeadlock(void)
info_len = sizeof(info);
prog_fd = bpf_program__fd(skel->progs.socket_post_create);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
ASSERT_OK(err, "get prog info");
ASSERT_EQ(info.recursion_misses, 0, "prog recursion");
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
index 4a505a5adf4d..e873766276d1 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
@@ -29,8 +29,8 @@ static int test_tc_bpf_basic(const struct bpf_tc_hook *hook, int fd)
__u32 info_len = sizeof(info);
int ret;
- ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
- if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd"))
+ ret = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(ret, "bpf_prog_get_info_by_fd"))
return ret;
ret = bpf_tc_attach(hook, &opts);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c
index c381faaae741..2900c5e9a016 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2022 Sony Group Corporation */
+#define _GNU_SOURCE
+#include <fcntl.h>
#include <sys/prctl.h>
#include <test_progs.h>
#include "bpf_syscall_macro.skel.h"
@@ -13,6 +15,8 @@ void test_bpf_syscall_macro(void)
unsigned long exp_arg3 = 13;
unsigned long exp_arg4 = 14;
unsigned long exp_arg5 = 15;
+ loff_t off_in, off_out;
+ ssize_t r;
/* check whether it can open program */
skel = bpf_syscall_macro__open();
@@ -33,6 +37,7 @@ void test_bpf_syscall_macro(void)
/* check whether args of syscall are copied correctly */
prctl(exp_arg1, exp_arg2, exp_arg3, exp_arg4, exp_arg5);
+
#if defined(__aarch64__) || defined(__s390__)
ASSERT_NEQ(skel->bss->arg1, exp_arg1, "syscall_arg1");
#else
@@ -68,6 +73,18 @@ void test_bpf_syscall_macro(void)
ASSERT_EQ(skel->bss->arg4_syscall, exp_arg4, "BPF_KPROBE_SYSCALL_arg4");
ASSERT_EQ(skel->bss->arg5_syscall, exp_arg5, "BPF_KPROBE_SYSCALL_arg5");
+ r = splice(-42, &off_in, 42, &off_out, 0x12340000, SPLICE_F_NONBLOCK);
+ err = -errno;
+ ASSERT_EQ(r, -1, "splice_res");
+ ASSERT_EQ(err, -EBADF, "splice_err");
+
+ ASSERT_EQ(skel->bss->splice_fd_in, -42, "splice_arg1");
+ ASSERT_EQ(skel->bss->splice_off_in, (__u64)&off_in, "splice_arg2");
+ ASSERT_EQ(skel->bss->splice_fd_out, 42, "splice_arg3");
+ ASSERT_EQ(skel->bss->splice_off_out, (__u64)&off_out, "splice_arg4");
+ ASSERT_EQ(skel->bss->splice_len, 0x12340000, "splice_arg5");
+ ASSERT_EQ(skel->bss->splice_flags, SPLICE_F_NONBLOCK, "splice_arg6");
+
cleanup:
bpf_syscall_macro__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
index 7295cc60f724..e0879df38639 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
@@ -1,104 +1,43 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
-
-const char *err_str;
-bool found;
-
-static int libbpf_debug_print(enum libbpf_print_level level,
- const char *format, va_list args)
-{
- char *log_buf;
-
- if (level != LIBBPF_WARN ||
- strcmp(format, "libbpf: \n%s\n")) {
- vprintf(format, args);
- return 0;
- }
-
- log_buf = va_arg(args, char *);
- if (!log_buf)
- goto out;
- if (err_str && strstr(log_buf, err_str) == 0)
- found = true;
-out:
- printf(format, log_buf);
- return 0;
-}
-
-extern int extra_prog_load_log_flags;
-
-static int check_load(const char *file)
-{
- struct bpf_object *obj = NULL;
- struct bpf_program *prog;
- int err;
-
- found = false;
-
- obj = bpf_object__open_file(file, NULL);
- err = libbpf_get_error(obj);
- if (err)
- return err;
-
- prog = bpf_object__next_program(obj, NULL);
- if (!prog) {
- err = -ENOENT;
- goto err_out;
- }
-
- bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32);
- bpf_program__set_log_level(prog, extra_prog_load_log_flags);
-
- err = bpf_object__load(obj);
-
-err_out:
- bpf_object__close(obj);
- return err;
-}
-
-struct test_def {
- const char *file;
- const char *err_str;
-};
+#include "test_global_func1.skel.h"
+#include "test_global_func2.skel.h"
+#include "test_global_func3.skel.h"
+#include "test_global_func4.skel.h"
+#include "test_global_func5.skel.h"
+#include "test_global_func6.skel.h"
+#include "test_global_func7.skel.h"
+#include "test_global_func8.skel.h"
+#include "test_global_func9.skel.h"
+#include "test_global_func10.skel.h"
+#include "test_global_func11.skel.h"
+#include "test_global_func12.skel.h"
+#include "test_global_func13.skel.h"
+#include "test_global_func14.skel.h"
+#include "test_global_func15.skel.h"
+#include "test_global_func16.skel.h"
+#include "test_global_func17.skel.h"
+#include "test_global_func_ctx_args.skel.h"
void test_test_global_funcs(void)
{
- struct test_def tests[] = {
- { "test_global_func1.bpf.o", "combined stack size of 4 calls is 544" },
- { "test_global_func2.bpf.o" },
- { "test_global_func3.bpf.o", "the call stack of 8 frames" },
- { "test_global_func4.bpf.o" },
- { "test_global_func5.bpf.o", "expected pointer to ctx, but got PTR" },
- { "test_global_func6.bpf.o", "modified ctx ptr R2" },
- { "test_global_func7.bpf.o", "foo() doesn't return scalar" },
- { "test_global_func8.bpf.o" },
- { "test_global_func9.bpf.o" },
- { "test_global_func10.bpf.o", "invalid indirect read from stack" },
- { "test_global_func11.bpf.o", "Caller passes invalid args into func#1" },
- { "test_global_func12.bpf.o", "invalid mem access 'mem_or_null'" },
- { "test_global_func13.bpf.o", "Caller passes invalid args into func#1" },
- { "test_global_func14.bpf.o", "reference type('FWD S') size cannot be determined" },
- { "test_global_func15.bpf.o", "At program exit the register R0 has value" },
- { "test_global_func16.bpf.o", "invalid indirect read from stack" },
- { "test_global_func17.bpf.o", "Caller passes invalid args into func#1" },
- };
- libbpf_print_fn_t old_print_fn = NULL;
- int err, i, duration = 0;
-
- old_print_fn = libbpf_set_print(libbpf_debug_print);
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- const struct test_def *test = &tests[i];
-
- if (!test__start_subtest(test->file))
- continue;
-
- err_str = test->err_str;
- err = check_load(test->file);
- CHECK_FAIL(!!err ^ !!err_str);
- if (err_str)
- CHECK(found, "", "expected string '%s'", err_str);
- }
- libbpf_set_print(old_print_fn);
+ RUN_TESTS(test_global_func1);
+ RUN_TESTS(test_global_func2);
+ RUN_TESTS(test_global_func3);
+ RUN_TESTS(test_global_func4);
+ RUN_TESTS(test_global_func5);
+ RUN_TESTS(test_global_func6);
+ RUN_TESTS(test_global_func7);
+ RUN_TESTS(test_global_func8);
+ RUN_TESTS(test_global_func9);
+ RUN_TESTS(test_global_func10);
+ RUN_TESTS(test_global_func11);
+ RUN_TESTS(test_global_func12);
+ RUN_TESTS(test_global_func13);
+ RUN_TESTS(test_global_func14);
+ RUN_TESTS(test_global_func15);
+ RUN_TESTS(test_global_func16);
+ RUN_TESTS(test_global_func17);
+ RUN_TESTS(test_global_func_ctx_args);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
index 244c01125126..16175d579bc7 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
@@ -75,7 +75,8 @@ static int test_lsm(struct lsm *skel)
skel->bss->monitored_pid = getpid();
err = stack_mprotect();
- if (!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
+ !ASSERT_EQ(errno, EPERM, "stack_mprotect"))
return err;
ASSERT_EQ(skel->bss->mprotect_count, 1, "mprotect_count");
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
index a479080533db..770fcc3bb1ba 100644
--- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
+++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
@@ -45,8 +45,9 @@ void serial_test_tp_attach_query(void)
prog_info.xlated_prog_len = 0;
prog_info.nr_map_ids = 0;
info_len = sizeof(prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
- if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
+ err = bpf_prog_get_info_by_fd(prog_fd[i], &prog_info,
+ &info_len);
+ if (CHECK(err, "bpf_prog_get_info_by_fd", "err %d errno %d\n",
err, errno))
goto cleanup1;
saved_prog_ids[i] = prog_info.id;
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 564b75bc087f..e91d0d1769f1 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -2,8 +2,6 @@
#define _GNU_SOURCE
#include <test_progs.h>
-#define MAX_TRAMP_PROGS 38
-
struct inst {
struct bpf_object *obj;
struct bpf_link *link;
@@ -37,14 +35,21 @@ void serial_test_trampoline_count(void)
{
char *file = "test_trampoline_count.bpf.o";
char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" };
- struct inst inst[MAX_TRAMP_PROGS + 1] = {};
+ int bpf_max_tramp_links, err, i, prog_fd;
struct bpf_program *prog;
struct bpf_link *link;
- int prog_fd, err, i;
+ struct inst *inst;
LIBBPF_OPTS(bpf_test_run_opts, opts);
+ bpf_max_tramp_links = get_bpf_max_tramp_links();
+ if (!ASSERT_GE(bpf_max_tramp_links, 1, "bpf_max_tramp_links"))
+ return;
+ inst = calloc(bpf_max_tramp_links + 1, sizeof(*inst));
+ if (!ASSERT_OK_PTR(inst, "inst"))
+ return;
+
/* attach 'allowed' trampoline programs */
- for (i = 0; i < MAX_TRAMP_PROGS; i++) {
+ for (i = 0; i < bpf_max_tramp_links; i++) {
prog = load_prog(file, progs[i % ARRAY_SIZE(progs)], &inst[i]);
if (!prog)
goto cleanup;
@@ -74,7 +79,7 @@ void serial_test_trampoline_count(void)
if (!ASSERT_EQ(link, NULL, "ptr_is_null"))
goto cleanup;
- /* and finaly execute the probe */
+ /* and finally execute the probe */
prog_fd = bpf_program__fd(prog);
if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd"))
goto cleanup;
@@ -91,4 +96,5 @@ cleanup:
bpf_link__destroy(inst[i].link);
bpf_object__close(inst[i].obj);
}
+ free(inst);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
index 1ed3cc2092db..8383a99f610f 100644
--- a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
+++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
@@ -179,7 +179,7 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s
ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails");
ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails");
- if (ASSERT_OK(bpf_obj_get_info_by_fd(map_fds[0], &map_info, &map_info_len),
+ if (ASSERT_OK(bpf_map_get_info_by_fd(map_fds[0], &map_info, &map_info_len),
"obj_get_info_by_fd")) {
ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails");
ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM,
@@ -187,8 +187,8 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s
}
ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails");
- if (ASSERT_OK(bpf_obj_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter),
- &link_info, &link_info_len),
+ if (ASSERT_OK(bpf_link_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter),
+ &link_info, &link_info_len),
"obj_get_info_by_fd")) {
ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails");
ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM,
@@ -269,7 +269,7 @@ void test_unpriv_bpf_disabled(void)
}
prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter);
- ASSERT_OK(bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len),
+ ASSERT_OK(bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len),
"obj_get_info_by_fd");
prog_id = prog_info.id;
ASSERT_GT(prog_id, 0, "valid_prog_id");
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
index 35b87c7ba5be..6558c857e620 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
@@ -3,20 +3,23 @@
#include <test_progs.h>
#include "test_uprobe_autoattach.skel.h"
+#include "progs/bpf_misc.h"
/* uprobe attach point */
-static noinline int autoattach_trigger_func(int arg)
+static noinline int autoattach_trigger_func(int arg1, int arg2, int arg3,
+ int arg4, int arg5, int arg6,
+ int arg7, int arg8)
{
asm volatile ("");
- return arg + 1;
+ return arg1 + arg2 + arg3 + arg4 + arg5 + arg6 + arg7 + arg8 + 1;
}
void test_uprobe_autoattach(void)
{
+ const char *devnull_str = "/dev/null";
struct test_uprobe_autoattach *skel;
- int trigger_val = 100, trigger_ret;
- size_t malloc_sz = 1;
- char *mem;
+ int trigger_ret;
+ FILE *devnull;
skel = test_uprobe_autoattach__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
@@ -28,23 +31,45 @@ void test_uprobe_autoattach(void)
skel->bss->test_pid = getpid();
/* trigger & validate uprobe & uretprobe */
- trigger_ret = autoattach_trigger_func(trigger_val);
+ trigger_ret = autoattach_trigger_func(1, 2, 3, 4, 5, 6, 7, 8);
skel->bss->test_pid = getpid();
/* trigger & validate shared library u[ret]probes attached by name */
- mem = malloc(malloc_sz);
+ devnull = fopen(devnull_str, "r");
- ASSERT_EQ(skel->bss->uprobe_byname_parm1, trigger_val, "check_uprobe_byname_parm1");
+ ASSERT_EQ(skel->bss->uprobe_byname_parm1, 1, "check_uprobe_byname_parm1");
ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran");
ASSERT_EQ(skel->bss->uretprobe_byname_rc, trigger_ret, "check_uretprobe_byname_rc");
+ ASSERT_EQ(skel->bss->uretprobe_byname_ret, trigger_ret, "check_uretprobe_byname_ret");
ASSERT_EQ(skel->bss->uretprobe_byname_ran, 2, "check_uretprobe_byname_ran");
- ASSERT_EQ(skel->bss->uprobe_byname2_parm1, malloc_sz, "check_uprobe_byname2_parm1");
+ ASSERT_EQ(skel->bss->uprobe_byname2_parm1, (__u64)(long)devnull_str,
+ "check_uprobe_byname2_parm1");
ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran");
- ASSERT_EQ(skel->bss->uretprobe_byname2_rc, mem, "check_uretprobe_byname2_rc");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_rc, (__u64)(long)devnull,
+ "check_uretprobe_byname2_rc");
ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran");
- free(mem);
+ ASSERT_EQ(skel->bss->a[0], 1, "arg1");
+ ASSERT_EQ(skel->bss->a[1], 2, "arg2");
+ ASSERT_EQ(skel->bss->a[2], 3, "arg3");
+#if FUNC_REG_ARG_CNT > 3
+ ASSERT_EQ(skel->bss->a[3], 4, "arg4");
+#endif
+#if FUNC_REG_ARG_CNT > 4
+ ASSERT_EQ(skel->bss->a[4], 5, "arg5");
+#endif
+#if FUNC_REG_ARG_CNT > 5
+ ASSERT_EQ(skel->bss->a[5], 6, "arg6");
+#endif
+#if FUNC_REG_ARG_CNT > 6
+ ASSERT_EQ(skel->bss->a[6], 7, "arg7");
+#endif
+#if FUNC_REG_ARG_CNT > 7
+ ASSERT_EQ(skel->bss->a[7], 8, "arg8");
+#endif
+
+ fclose(devnull);
cleanup:
test_uprobe_autoattach__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c
index 9ad9da0f215e..56ed1eb9b527 100644
--- a/tools/testing/selftests/bpf/prog_tests/usdt.c
+++ b/tools/testing/selftests/bpf/prog_tests/usdt.c
@@ -314,6 +314,7 @@ static FILE *urand_spawn(int *pid)
if (fscanf(f, "%d", pid) != 1) {
pclose(f);
+ errno = EINVAL;
return NULL;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
index dae68de285b9..3a13e102c149 100644
--- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
@@ -19,8 +19,6 @@
#include "../progs/test_user_ringbuf.h"
-static size_t log_buf_sz = 1 << 20; /* 1 MB */
-static char obj_log_buf[1048576];
static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
static const long c_ringbuf_size = 1 << 12; /* 1 small page */
static const long c_max_entries = c_ringbuf_size / c_sample_size;
@@ -663,23 +661,6 @@ cleanup:
user_ringbuf_success__destroy(skel);
}
-static struct {
- const char *prog_name;
- const char *expected_err_msg;
-} failure_tests[] = {
- /* failure cases */
- {"user_ringbuf_callback_bad_access1", "negative offset dynptr_ptr ptr"},
- {"user_ringbuf_callback_bad_access2", "dereference of modified dynptr_ptr ptr"},
- {"user_ringbuf_callback_write_forbidden", "invalid mem access 'dynptr_ptr'"},
- {"user_ringbuf_callback_null_context_write", "invalid mem access 'scalar'"},
- {"user_ringbuf_callback_null_context_read", "invalid mem access 'scalar'"},
- {"user_ringbuf_callback_discard_dynptr", "cannot release unowned const bpf_dynptr"},
- {"user_ringbuf_callback_submit_dynptr", "cannot release unowned const bpf_dynptr"},
- {"user_ringbuf_callback_invalid_return", "At callback return the register R0 has value"},
- {"user_ringbuf_callback_reinit_dynptr_mem", "Dynptr has to be an uninitialized dynptr"},
- {"user_ringbuf_callback_reinit_dynptr_ringbuf", "Dynptr has to be an uninitialized dynptr"},
-};
-
#define SUCCESS_TEST(_func) { _func, #_func }
static struct {
@@ -700,42 +681,6 @@ static struct {
SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
};
-static void verify_fail(const char *prog_name, const char *expected_err_msg)
-{
- LIBBPF_OPTS(bpf_object_open_opts, opts);
- struct bpf_program *prog;
- struct user_ringbuf_fail *skel;
- int err;
-
- opts.kernel_log_buf = obj_log_buf;
- opts.kernel_log_size = log_buf_sz;
- opts.kernel_log_level = 1;
-
- skel = user_ringbuf_fail__open_opts(&opts);
- if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts"))
- goto cleanup;
-
- prog = bpf_object__find_program_by_name(skel->obj, prog_name);
- if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
- goto cleanup;
-
- bpf_program__set_autoload(prog, true);
-
- bpf_map__set_max_entries(skel->maps.user_ringbuf, getpagesize());
-
- err = user_ringbuf_fail__load(skel);
- if (!ASSERT_ERR(err, "unexpected load success"))
- goto cleanup;
-
- if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
- fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
- fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
- }
-
-cleanup:
- user_ringbuf_fail__destroy(skel);
-}
-
void test_user_ringbuf(void)
{
int i;
@@ -747,10 +692,5 @@ void test_user_ringbuf(void)
success_tests[i].test_callback();
}
- for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
- if (!test__start_subtest(failure_tests[i].prog_name))
- continue;
-
- verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
- }
+ RUN_TESTS(user_ringbuf_fail);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/verif_stats.c b/tools/testing/selftests/bpf/prog_tests/verif_stats.c
index a47e7c0e1ffd..af4b95f57ac1 100644
--- a/tools/testing/selftests/bpf/prog_tests/verif_stats.c
+++ b/tools/testing/selftests/bpf/prog_tests/verif_stats.c
@@ -16,8 +16,9 @@ void test_verif_stats(void)
if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load"))
goto cleanup;
- err = bpf_obj_get_info_by_fd(skel->progs.sys_enter.prog_fd, &info, &len);
- if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd"))
+ err = bpf_prog_get_info_by_fd(skel->progs.sys_enter.prog_fd,
+ &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto cleanup;
if (!ASSERT_GT(info.verified_insns, 0, "verified_insns"))
diff --git a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
index 579d6ee83ce0..dd7f2bc70048 100644
--- a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
+++ b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
@@ -61,6 +61,9 @@ static bool kfunc_not_supported;
static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
va_list args)
{
+ if (level == LIBBPF_WARN)
+ vprintf(fmt, args);
+
if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n"))
return 0;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
index 39973ea1ce43..f09505f8b038 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -76,10 +76,15 @@ static void test_xdp_adjust_tail_grow2(void)
{
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
char buf[4096]; /* avoid segfault: large buf to hold grow results */
- int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/;
struct bpf_object *obj;
int err, cnt, i;
int max_grow, prog_fd;
+ /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
+#if defined(__s390x__)
+ int tailroom = 512;
+#else
+ int tailroom = 320;
+#endif
LIBBPF_OPTS(bpf_test_run_opts, tattr,
.repeat = 1,
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
index 062fbc8c8e5e..d4cd9f873c14 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
@@ -18,7 +18,7 @@ void serial_test_xdp_attach(void)
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
if (CHECK_FAIL(err))
return;
- err = bpf_obj_get_info_by_fd(fd1, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd1, &info, &len);
if (CHECK_FAIL(err))
goto out_1;
id1 = info.id;
@@ -28,7 +28,7 @@ void serial_test_xdp_attach(void)
goto out_1;
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(fd2, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd2, &info, &len);
if (CHECK_FAIL(err))
goto out_2;
id2 = info.id;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
index f775a1613833..481626a875d1 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
@@ -33,8 +33,8 @@ static void test_xdp_with_cpumap_helpers(void)
prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm);
map_fd = bpf_map__fd(skel->maps.cpu_map);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &len);
- if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd"))
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = prog_fd;
@@ -85,8 +85,8 @@ static void test_xdp_with_cpumap_frags_helpers(void)
frags_prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags);
map_fd = bpf_map__fd(skel->maps.cpu_map);
- err = bpf_obj_get_info_by_fd(frags_prog_fd, &info, &len);
- if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd"))
+ err = bpf_prog_get_info_by_fd(frags_prog_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = frags_prog_fd;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
index ead40016c324..ce6812558287 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
@@ -35,8 +35,8 @@ static void test_xdp_with_devmap_helpers(void)
dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
map_fd = bpf_map__fd(skel->maps.dm_ports);
- err = bpf_obj_get_info_by_fd(dm_fd, &info, &len);
- if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd"))
+ err = bpf_prog_get_info_by_fd(dm_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = dm_fd;
@@ -98,8 +98,8 @@ static void test_xdp_with_devmap_frags_helpers(void)
dm_fd_frags = bpf_program__fd(skel->progs.xdp_dummy_dm_frags);
map_fd = bpf_map__fd(skel->maps.dm_ports);
- err = bpf_obj_get_info_by_fd(dm_fd_frags, &info, &len);
- if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd"))
+ err = bpf_prog_get_info_by_fd(dm_fd_frags, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = dm_fd_frags;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
index a50971c6cf4a..2666c84dbd01 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
@@ -4,10 +4,12 @@
#include <net/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
+#include <linux/if_link.h>
#include <linux/ipv6.h>
#include <linux/in6.h>
#include <linux/udp.h>
#include <bpf/bpf_endian.h>
+#include <uapi/linux/netdev.h>
#include "test_xdp_do_redirect.skel.h"
#define SYS(fmt, ...) \
@@ -65,7 +67,11 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) -
* sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes
*/
+#if defined(__s390x__)
+#define MAX_PKT_SIZE 3176
+#else
#define MAX_PKT_SIZE 3368
+#endif
static void test_max_pkt_size(int fd)
{
char data[MAX_PKT_SIZE + 1] = {};
@@ -92,7 +98,7 @@ void test_xdp_do_redirect(void)
struct test_xdp_do_redirect *skel = NULL;
struct nstoken *nstoken = NULL;
struct bpf_link *link;
-
+ LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
struct xdp_md ctx_in = { .data = sizeof(__u32),
.data_end = sizeof(data) };
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
@@ -153,6 +159,29 @@ void test_xdp_do_redirect(void)
!ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst"))
goto out;
+ /* Check xdp features supported by veth driver */
+ err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_src bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "veth_src query_opts.feature_flags"))
+ goto out;
+
+ err = bpf_xdp_query(ifindex_dst, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_dst bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "veth_dst query_opts.feature_flags"))
+ goto out;
+
memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN);
skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */
skel->rodata->ifindex_in = ifindex_src;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c
index cd3aa340e65e..1dbddcab87a8 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c
@@ -8,6 +8,7 @@ void serial_test_xdp_info(void)
{
__u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id;
const char *file = "./xdp_dummy.bpf.o";
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
struct bpf_prog_info info = {};
struct bpf_object *obj;
int err, prog_fd;
@@ -33,7 +34,7 @@ void serial_test_xdp_info(void)
if (CHECK_FAIL(err))
return;
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
if (CHECK(err, "get_prog_info", "errno=%d\n", errno))
goto out_close;
@@ -61,6 +62,13 @@ void serial_test_xdp_info(void)
if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id))
goto out;
+ /* Check xdp features supported by lo device */
+ opts.feature_flags = ~0;
+ err = bpf_xdp_query(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &opts);
+ if (!ASSERT_OK(err, "bpf_xdp_query"))
+ goto out;
+
+ ASSERT_EQ(opts.feature_flags, 0, "opts.feature_flags");
out:
bpf_xdp_detach(IFINDEX_LO, 0, NULL);
out_close:
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c
index 3e9d5c5521f0..e7e9f3c22edf 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c
@@ -29,13 +29,13 @@ void serial_test_xdp_link(void)
prog_fd2 = bpf_program__fd(skel2->progs.xdp_handler);
memset(&prog_info, 0, sizeof(prog_info));
- err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len);
if (!ASSERT_OK(err, "fd_info1"))
goto cleanup;
id1 = prog_info.id;
memset(&prog_info, 0, sizeof(prog_info));
- err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len);
if (!ASSERT_OK(err, "fd_info2"))
goto cleanup;
id2 = prog_info.id;
@@ -119,7 +119,8 @@ void serial_test_xdp_link(void)
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
- err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len);
+ err = bpf_link_get_info_by_fd(bpf_link__fd(link),
+ &link_info, &link_info_len);
if (!ASSERT_OK(err, "link_info"))
goto cleanup;
@@ -137,7 +138,8 @@ void serial_test_xdp_link(void)
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
- err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len);
+ err = bpf_link_get_info_by_fd(bpf_link__fd(link),
+ &link_info, &link_info_len);
ASSERT_OK(err, "link_info");
ASSERT_EQ(link_info.prog_id, id1, "link_prog_id");
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
new file mode 100644
index 000000000000..aa4beae99f4f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "xdp_metadata.skel.h"
+#include "xdp_metadata2.skel.h"
+#include "xdp_metadata.h"
+#include "xsk.h"
+
+#include <bpf/btf.h>
+#include <linux/errqueue.h>
+#include <linux/if_link.h>
+#include <linux/net_tstamp.h>
+#include <linux/udp.h>
+#include <sys/mman.h>
+#include <net/if.h>
+#include <poll.h>
+
+#define TX_NAME "veTX"
+#define RX_NAME "veRX"
+
+#define UDP_PAYLOAD_BYTES 4
+
+#define AF_XDP_SOURCE_PORT 1234
+#define AF_XDP_CONSUMER_PORT 8080
+
+#define UMEM_NUM 16
+#define UMEM_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE
+#define UMEM_SIZE (UMEM_FRAME_SIZE * UMEM_NUM)
+#define XDP_FLAGS XDP_FLAGS_DRV_MODE
+#define QUEUE_ID 0
+
+#define TX_ADDR "10.0.0.1"
+#define RX_ADDR "10.0.0.2"
+#define PREFIX_LEN "8"
+#define FAMILY AF_INET
+
+#define SYS(cmd) ({ \
+ if (!ASSERT_OK(system(cmd), (cmd))) \
+ goto out; \
+})
+
+struct xsk {
+ void *umem_area;
+ struct xsk_umem *umem;
+ struct xsk_ring_prod fill;
+ struct xsk_ring_cons comp;
+ struct xsk_ring_prod tx;
+ struct xsk_ring_cons rx;
+ struct xsk_socket *socket;
+};
+
+static int open_xsk(int ifindex, struct xsk *xsk)
+{
+ int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
+ const struct xsk_socket_config socket_config = {
+ .rx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .bind_flags = XDP_COPY,
+ };
+ const struct xsk_umem_config umem_config = {
+ .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
+ .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG,
+ };
+ __u32 idx;
+ u64 addr;
+ int ret;
+ int i;
+
+ xsk->umem_area = mmap(NULL, UMEM_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
+ if (!ASSERT_NEQ(xsk->umem_area, MAP_FAILED, "mmap"))
+ return -1;
+
+ ret = xsk_umem__create(&xsk->umem,
+ xsk->umem_area, UMEM_SIZE,
+ &xsk->fill,
+ &xsk->comp,
+ &umem_config);
+ if (!ASSERT_OK(ret, "xsk_umem__create"))
+ return ret;
+
+ ret = xsk_socket__create(&xsk->socket, ifindex, QUEUE_ID,
+ xsk->umem,
+ &xsk->rx,
+ &xsk->tx,
+ &socket_config);
+ if (!ASSERT_OK(ret, "xsk_socket__create"))
+ return ret;
+
+ /* First half of umem is for TX. This way address matches 1-to-1
+ * to the completion queue index.
+ */
+
+ for (i = 0; i < UMEM_NUM / 2; i++) {
+ addr = i * UMEM_FRAME_SIZE;
+ printf("%p: tx_desc[%d] -> %lx\n", xsk, i, addr);
+ }
+
+ /* Second half of umem is for RX. */
+
+ ret = xsk_ring_prod__reserve(&xsk->fill, UMEM_NUM / 2, &idx);
+ if (!ASSERT_EQ(UMEM_NUM / 2, ret, "xsk_ring_prod__reserve"))
+ return ret;
+ if (!ASSERT_EQ(idx, 0, "fill idx != 0"))
+ return -1;
+
+ for (i = 0; i < UMEM_NUM / 2; i++) {
+ addr = (UMEM_NUM / 2 + i) * UMEM_FRAME_SIZE;
+ printf("%p: rx_desc[%d] -> %lx\n", xsk, i, addr);
+ *xsk_ring_prod__fill_addr(&xsk->fill, i) = addr;
+ }
+ xsk_ring_prod__submit(&xsk->fill, ret);
+
+ return 0;
+}
+
+static void close_xsk(struct xsk *xsk)
+{
+ if (xsk->umem)
+ xsk_umem__delete(xsk->umem);
+ if (xsk->socket)
+ xsk_socket__delete(xsk->socket);
+ munmap(xsk->umem_area, UMEM_SIZE);
+}
+
+static void ip_csum(struct iphdr *iph)
+{
+ __u32 sum = 0;
+ __u16 *p;
+ int i;
+
+ iph->check = 0;
+ p = (void *)iph;
+ for (i = 0; i < sizeof(*iph) / sizeof(*p); i++)
+ sum += p[i];
+
+ while (sum >> 16)
+ sum = (sum & 0xffff) + (sum >> 16);
+
+ iph->check = ~sum;
+}
+
+static int generate_packet(struct xsk *xsk, __u16 dst_port)
+{
+ struct xdp_desc *tx_desc;
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ void *data;
+ __u32 idx;
+ int ret;
+
+ ret = xsk_ring_prod__reserve(&xsk->tx, 1, &idx);
+ if (!ASSERT_EQ(ret, 1, "xsk_ring_prod__reserve"))
+ return -1;
+
+ tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx);
+ tx_desc->addr = idx % (UMEM_NUM / 2) * UMEM_FRAME_SIZE;
+ printf("%p: tx_desc[%u]->addr=%llx\n", xsk, idx, tx_desc->addr);
+ data = xsk_umem__get_data(xsk->umem_area, tx_desc->addr);
+
+ eth = data;
+ iph = (void *)(eth + 1);
+ udph = (void *)(iph + 1);
+
+ memcpy(eth->h_dest, "\x00\x00\x00\x00\x00\x02", ETH_ALEN);
+ memcpy(eth->h_source, "\x00\x00\x00\x00\x00\x01", ETH_ALEN);
+ eth->h_proto = htons(ETH_P_IP);
+
+ iph->version = 0x4;
+ iph->ihl = 0x5;
+ iph->tos = 0x9;
+ iph->tot_len = htons(sizeof(*iph) + sizeof(*udph) + UDP_PAYLOAD_BYTES);
+ iph->id = 0;
+ iph->frag_off = 0;
+ iph->ttl = 0;
+ iph->protocol = IPPROTO_UDP;
+ ASSERT_EQ(inet_pton(FAMILY, TX_ADDR, &iph->saddr), 1, "inet_pton(TX_ADDR)");
+ ASSERT_EQ(inet_pton(FAMILY, RX_ADDR, &iph->daddr), 1, "inet_pton(RX_ADDR)");
+ ip_csum(iph);
+
+ udph->source = htons(AF_XDP_SOURCE_PORT);
+ udph->dest = htons(dst_port);
+ udph->len = htons(sizeof(*udph) + UDP_PAYLOAD_BYTES);
+ udph->check = 0;
+
+ memset(udph + 1, 0xAA, UDP_PAYLOAD_BYTES);
+
+ tx_desc->len = sizeof(*eth) + sizeof(*iph) + sizeof(*udph) + UDP_PAYLOAD_BYTES;
+ xsk_ring_prod__submit(&xsk->tx, 1);
+
+ ret = sendto(xsk_socket__fd(xsk->socket), NULL, 0, MSG_DONTWAIT, NULL, 0);
+ if (!ASSERT_GE(ret, 0, "sendto"))
+ return ret;
+
+ return 0;
+}
+
+static void complete_tx(struct xsk *xsk)
+{
+ __u32 idx;
+ __u64 addr;
+
+ if (ASSERT_EQ(xsk_ring_cons__peek(&xsk->comp, 1, &idx), 1, "xsk_ring_cons__peek")) {
+ addr = *xsk_ring_cons__comp_addr(&xsk->comp, idx);
+
+ printf("%p: complete tx idx=%u addr=%llx\n", xsk, idx, addr);
+ xsk_ring_cons__release(&xsk->comp, 1);
+ }
+}
+
+static void refill_rx(struct xsk *xsk, __u64 addr)
+{
+ __u32 idx;
+
+ if (ASSERT_EQ(xsk_ring_prod__reserve(&xsk->fill, 1, &idx), 1, "xsk_ring_prod__reserve")) {
+ printf("%p: complete idx=%u addr=%llx\n", xsk, idx, addr);
+ *xsk_ring_prod__fill_addr(&xsk->fill, idx) = addr;
+ xsk_ring_prod__submit(&xsk->fill, 1);
+ }
+}
+
+static int verify_xsk_metadata(struct xsk *xsk)
+{
+ const struct xdp_desc *rx_desc;
+ struct pollfd fds = {};
+ struct xdp_meta *meta;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ __u64 comp_addr;
+ void *data;
+ __u64 addr;
+ __u32 idx;
+ int ret;
+
+ ret = recvfrom(xsk_socket__fd(xsk->socket), NULL, 0, MSG_DONTWAIT, NULL, NULL);
+ if (!ASSERT_EQ(ret, 0, "recvfrom"))
+ return -1;
+
+ fds.fd = xsk_socket__fd(xsk->socket);
+ fds.events = POLLIN;
+
+ ret = poll(&fds, 1, 1000);
+ if (!ASSERT_GT(ret, 0, "poll"))
+ return -1;
+
+ ret = xsk_ring_cons__peek(&xsk->rx, 1, &idx);
+ if (!ASSERT_EQ(ret, 1, "xsk_ring_cons__peek"))
+ return -2;
+
+ rx_desc = xsk_ring_cons__rx_desc(&xsk->rx, idx);
+ comp_addr = xsk_umem__extract_addr(rx_desc->addr);
+ addr = xsk_umem__add_offset_to_addr(rx_desc->addr);
+ printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n",
+ xsk, idx, rx_desc->addr, addr, comp_addr);
+ data = xsk_umem__get_data(xsk->umem_area, addr);
+
+ /* Make sure we got the packet offset correctly. */
+
+ eth = data;
+ ASSERT_EQ(eth->h_proto, htons(ETH_P_IP), "eth->h_proto");
+ iph = (void *)(eth + 1);
+ ASSERT_EQ((int)iph->version, 4, "iph->version");
+
+ /* custom metadata */
+
+ meta = data - sizeof(struct xdp_meta);
+
+ if (!ASSERT_NEQ(meta->rx_timestamp, 0, "rx_timestamp"))
+ return -1;
+
+ if (!ASSERT_NEQ(meta->rx_hash, 0, "rx_hash"))
+ return -1;
+
+ xsk_ring_cons__release(&xsk->rx, 1);
+ refill_rx(xsk, comp_addr);
+
+ return 0;
+}
+
+void test_xdp_metadata(void)
+{
+ struct xdp_metadata2 *bpf_obj2 = NULL;
+ struct xdp_metadata *bpf_obj = NULL;
+ struct bpf_program *new_prog, *prog;
+ struct nstoken *tok = NULL;
+ __u32 queue_id = QUEUE_ID;
+ struct bpf_map *prog_arr;
+ struct xsk tx_xsk = {};
+ struct xsk rx_xsk = {};
+ __u32 val, key = 0;
+ int retries = 10;
+ int rx_ifindex;
+ int tx_ifindex;
+ int sock_fd;
+ int ret;
+
+ /* Setup new networking namespace, with a veth pair. */
+
+ SYS("ip netns add xdp_metadata");
+ tok = open_netns("xdp_metadata");
+ SYS("ip link add numtxqueues 1 numrxqueues 1 " TX_NAME
+ " type veth peer " RX_NAME " numtxqueues 1 numrxqueues 1");
+ SYS("ip link set dev " TX_NAME " address 00:00:00:00:00:01");
+ SYS("ip link set dev " RX_NAME " address 00:00:00:00:00:02");
+ SYS("ip link set dev " TX_NAME " up");
+ SYS("ip link set dev " RX_NAME " up");
+ SYS("ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME);
+ SYS("ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME);
+
+ rx_ifindex = if_nametoindex(RX_NAME);
+ tx_ifindex = if_nametoindex(TX_NAME);
+
+ /* Setup separate AF_XDP for TX and RX interfaces. */
+
+ ret = open_xsk(tx_ifindex, &tx_xsk);
+ if (!ASSERT_OK(ret, "open_xsk(TX_NAME)"))
+ goto out;
+
+ ret = open_xsk(rx_ifindex, &rx_xsk);
+ if (!ASSERT_OK(ret, "open_xsk(RX_NAME)"))
+ goto out;
+
+ bpf_obj = xdp_metadata__open();
+ if (!ASSERT_OK_PTR(bpf_obj, "open skeleton"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(bpf_obj->obj, "rx");
+ bpf_program__set_ifindex(prog, rx_ifindex);
+ bpf_program__set_flags(prog, BPF_F_XDP_DEV_BOUND_ONLY);
+
+ if (!ASSERT_OK(xdp_metadata__load(bpf_obj), "load skeleton"))
+ goto out;
+
+ /* Make sure we can't add dev-bound programs to prog maps. */
+ prog_arr = bpf_object__find_map_by_name(bpf_obj->obj, "prog_arr");
+ if (!ASSERT_OK_PTR(prog_arr, "no prog_arr map"))
+ goto out;
+
+ val = bpf_program__fd(prog);
+ if (!ASSERT_ERR(bpf_map__update_elem(prog_arr, &key, sizeof(key),
+ &val, sizeof(val), BPF_ANY),
+ "update prog_arr"))
+ goto out;
+
+ /* Attach BPF program to RX interface. */
+
+ ret = bpf_xdp_attach(rx_ifindex,
+ bpf_program__fd(bpf_obj->progs.rx),
+ XDP_FLAGS, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
+ goto out;
+
+ sock_fd = xsk_socket__fd(rx_xsk.socket);
+ ret = bpf_map_update_elem(bpf_map__fd(bpf_obj->maps.xsk), &queue_id, &sock_fd, 0);
+ if (!ASSERT_GE(ret, 0, "bpf_map_update_elem"))
+ goto out;
+
+ /* Send packet destined to RX AF_XDP socket. */
+ if (!ASSERT_GE(generate_packet(&tx_xsk, AF_XDP_CONSUMER_PORT), 0,
+ "generate AF_XDP_CONSUMER_PORT"))
+ goto out;
+
+ /* Verify AF_XDP RX packet has proper metadata. */
+ if (!ASSERT_GE(verify_xsk_metadata(&rx_xsk), 0,
+ "verify_xsk_metadata"))
+ goto out;
+
+ complete_tx(&tx_xsk);
+
+ /* Make sure freplace correctly picks up original bound device
+ * and doesn't crash.
+ */
+
+ bpf_obj2 = xdp_metadata2__open();
+ if (!ASSERT_OK_PTR(bpf_obj2, "open skeleton"))
+ goto out;
+
+ new_prog = bpf_object__find_program_by_name(bpf_obj2->obj, "freplace_rx");
+ bpf_program__set_attach_target(new_prog, bpf_program__fd(prog), "rx");
+
+ if (!ASSERT_OK(xdp_metadata2__load(bpf_obj2), "load freplace skeleton"))
+ goto out;
+
+ if (!ASSERT_OK(xdp_metadata2__attach(bpf_obj2), "attach freplace"))
+ goto out;
+
+ /* Send packet to trigger . */
+ if (!ASSERT_GE(generate_packet(&tx_xsk, AF_XDP_CONSUMER_PORT), 0,
+ "generate freplace packet"))
+ goto out;
+
+ while (!retries--) {
+ if (bpf_obj2->bss->called)
+ break;
+ usleep(10);
+ }
+ ASSERT_GT(bpf_obj2->bss->called, 0, "not called");
+
+out:
+ close_xsk(&rx_xsk);
+ close_xsk(&tx_xsk);
+ xdp_metadata2__destroy(bpf_obj2);
+ xdp_metadata__destroy(bpf_obj);
+ if (tok)
+ close_netns(tok);
+ system("ip netns del xdp_metadata");
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c b/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
new file mode 100644
index 000000000000..1eb74ddca414
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+} hash_map_bench SEC(".maps");
+
+/* The number of slots to store times */
+#define NR_SLOTS 32
+#define NR_CPUS 256
+#define CPU_MASK (NR_CPUS-1)
+
+/* Configured by userspace */
+u64 nr_entries;
+u64 nr_loops;
+u32 __attribute__((__aligned__(8))) key[NR_CPUS];
+
+/* Filled by us */
+u64 __attribute__((__aligned__(256))) percpu_times_index[NR_CPUS];
+u64 __attribute__((__aligned__(256))) percpu_times[NR_CPUS][NR_SLOTS];
+
+static inline void patch_key(u32 i)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ key[0] = i + 1;
+#else
+ key[0] = __builtin_bswap32(i + 1);
+#endif
+ /* the rest of key is random and is configured by userspace */
+}
+
+static int lookup_callback(__u32 index, u32 *unused)
+{
+ patch_key(index);
+ return bpf_map_lookup_elem(&hash_map_bench, key) ? 0 : 1;
+}
+
+static int loop_lookup_callback(__u32 index, u32 *unused)
+{
+ return bpf_loop(nr_entries, lookup_callback, NULL, 0) ? 0 : 1;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int benchmark(void *ctx)
+{
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 times_index;
+ u64 start_time;
+
+ times_index = percpu_times_index[cpu & CPU_MASK] % NR_SLOTS;
+ start_time = bpf_ktime_get_ns();
+ bpf_loop(nr_loops, loop_lookup_callback, NULL, 0);
+ percpu_times[cpu & CPU_MASK][times_index] = bpf_ktime_get_ns() - start_time;
+ percpu_times_index[cpu & CPU_MASK] += 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index 4a01ea9113bf..14e28f991451 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -7,6 +7,13 @@
#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
+/* Convenience macro for use with 'asm volatile' blocks */
+#define __naked __attribute__((naked))
+#define __clobber_all "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "memory"
+#define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory"
+#define __imm(name) [name]"i"(name)
+#define __imm_addr(name) [name]"i"(&name)
+
#if defined(__TARGET_ARCH_x86)
#define SYSCALL_WRAPPER 1
#define SYS_PREFIX "__x64_"
@@ -21,4 +28,29 @@
#define SYS_PREFIX "__se_"
#endif
+/* How many arguments are passed to function in register */
+#if defined(__TARGET_ARCH_x86) || defined(__x86_64__)
+#define FUNC_REG_ARG_CNT 6
+#elif defined(__i386__)
+#define FUNC_REG_ARG_CNT 3
+#elif defined(__TARGET_ARCH_s390) || defined(__s390x__)
+#define FUNC_REG_ARG_CNT 5
+#elif defined(__TARGET_ARCH_arm) || defined(__arm__)
+#define FUNC_REG_ARG_CNT 4
+#elif defined(__TARGET_ARCH_arm64) || defined(__aarch64__)
+#define FUNC_REG_ARG_CNT 8
+#elif defined(__TARGET_ARCH_mips) || defined(__mips__)
+#define FUNC_REG_ARG_CNT 8
+#elif defined(__TARGET_ARCH_powerpc) || defined(__powerpc__) || defined(__powerpc64__)
+#define FUNC_REG_ARG_CNT 8
+#elif defined(__TARGET_ARCH_sparc) || defined(__sparc__)
+#define FUNC_REG_ARG_CNT 6
+#elif defined(__TARGET_ARCH_riscv) || defined(__riscv__)
+#define FUNC_REG_ARG_CNT 8
+#else
+/* default to 5 for others */
+#define FUNC_REG_ARG_CNT 5
+#endif
+
+
#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
index e1e11897e99b..1a476d8ed354 100644
--- a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
+++ b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
@@ -81,4 +81,30 @@ int BPF_KSYSCALL(prctl_enter, int option, unsigned long arg2,
return 0;
}
+__u64 splice_fd_in;
+__u64 splice_off_in;
+__u64 splice_fd_out;
+__u64 splice_off_out;
+__u64 splice_len;
+__u64 splice_flags;
+
+SEC("ksyscall/splice")
+int BPF_KSYSCALL(splice_enter, int fd_in, loff_t *off_in, int fd_out,
+ loff_t *off_out, size_t len, unsigned int flags)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != filter_pid)
+ return 0;
+
+ splice_fd_in = fd_in;
+ splice_off_in = (__u64)off_in;
+ splice_fd_out = fd_out;
+ splice_off_out = (__u64)off_out;
+ splice_len = len;
+ splice_flags = flags;
+
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
index e5560a656030..e01690618e1e 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
@@ -53,7 +53,7 @@ struct bitfields_only_mixed_types {
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct bitfield_mixed_with_others {
- long: 4; /* char is enough as a backing field */
+ char: 4; /* char is enough as a backing field */
int a: 4;
/* 8-bit implicit padding */
short b; /* combined with previous bitfield */
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
index e304b6204bd9..7998f27df7dd 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
@@ -58,7 +58,81 @@ union jump_code_union {
} __attribute__((packed));
};
-/*------ END-EXPECTED-OUTPUT ------ */
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *struct nested_packed_but_aligned_struct {
+ * int x1;
+ * int x2;
+ *};
+ *
+ *struct outer_implicitly_packed_struct {
+ * char y1;
+ * struct nested_packed_but_aligned_struct y2;
+ *} __attribute__((packed));
+ *
+ */
+/* ------ END-EXPECTED-OUTPUT ------ */
+
+struct nested_packed_but_aligned_struct {
+ int x1;
+ int x2;
+} __attribute__((packed));
+
+struct outer_implicitly_packed_struct {
+ char y1;
+ struct nested_packed_but_aligned_struct y2;
+};
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *struct usb_ss_ep_comp_descriptor {
+ * char: 8;
+ * char bDescriptorType;
+ * char bMaxBurst;
+ * short wBytesPerInterval;
+ *};
+ *
+ *struct usb_host_endpoint {
+ * long: 64;
+ * char: 8;
+ * struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ * long: 0;
+ *} __attribute__((packed));
+ *
+ */
+/* ------ END-EXPECTED-OUTPUT ------ */
+
+struct usb_ss_ep_comp_descriptor {
+ char: 8;
+ char bDescriptorType;
+ char bMaxBurst;
+ int: 0;
+ short wBytesPerInterval;
+} __attribute__((packed));
+
+struct usb_host_endpoint {
+ long: 64;
+ char: 8;
+ struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ long: 0;
+};
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+struct nested_packed_struct {
+ int a;
+ char b;
+} __attribute__((packed));
+
+struct outer_nonpacked_struct {
+ short a;
+ struct nested_packed_struct b;
+};
+
+struct outer_packed_struct {
+ short a;
+ struct nested_packed_struct b;
+} __attribute__((packed));
+
+/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct {
struct packed_trailing_space _1;
@@ -69,6 +143,10 @@ int f(struct {
union union_is_never_packed _6;
union union_does_not_need_packing _7;
union jump_code_union _8;
+ struct outer_implicitly_packed_struct _9;
+ struct usb_host_endpoint _10;
+ struct outer_nonpacked_struct _11;
+ struct outer_packed_struct _12;
} *_)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
index 7cb522d22a66..79276fbe454a 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
@@ -19,7 +19,7 @@ struct padded_implicitly {
/*
*struct padded_explicitly {
* int a;
- * int: 32;
+ * long: 0;
* int b;
*};
*
@@ -28,41 +28,28 @@ struct padded_implicitly {
struct padded_explicitly {
int a;
- int: 1; /* algo will explicitly pad with full 32 bits here */
+ int: 1; /* algo will emit aligning `long: 0;` here */
int b;
};
/* ----- START-EXPECTED-OUTPUT ----- */
-/*
- *struct padded_a_lot {
- * int a;
- * long: 32;
- * long: 64;
- * long: 64;
- * int b;
- *};
- *
- */
-/* ------ END-EXPECTED-OUTPUT ------ */
-
struct padded_a_lot {
int a;
- /* 32 bit of implicit padding here, which algo will make explicit */
long: 64;
long: 64;
int b;
};
+/* ------ END-EXPECTED-OUTPUT ------ */
+
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct padded_cache_line {
* int a;
- * long: 32;
* long: 64;
* long: 64;
* long: 64;
* int b;
- * long: 32;
* long: 64;
* long: 64;
* long: 64;
@@ -85,7 +72,7 @@ struct padded_cache_line {
*struct zone {
* int a;
* short b;
- * short: 16;
+ * long: 0;
* struct zone_padding __pad__;
*};
*
@@ -108,6 +95,131 @@ struct padding_wo_named_members {
long: 64;
};
+struct padding_weird_1 {
+ int a;
+ long: 64;
+ short: 16;
+ short b;
+};
+
+/* ------ END-EXPECTED-OUTPUT ------ */
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *struct padding_weird_2 {
+ * long: 56;
+ * char a;
+ * long: 56;
+ * char b;
+ * char: 8;
+ *};
+ *
+ */
+/* ------ END-EXPECTED-OUTPUT ------ */
+struct padding_weird_2 {
+ int: 32; /* these paddings will be collapsed into `long: 56;` */
+ short: 16;
+ char: 8;
+ char a;
+ int: 32; /* these paddings will be collapsed into `long: 56;` */
+ short: 16;
+ char: 8;
+ char b;
+ char: 8;
+};
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+struct exact_1byte {
+ char x;
+};
+
+struct padded_1byte {
+ char: 8;
+};
+
+struct exact_2bytes {
+ short x;
+};
+
+struct padded_2bytes {
+ short: 16;
+};
+
+struct exact_4bytes {
+ int x;
+};
+
+struct padded_4bytes {
+ int: 32;
+};
+
+struct exact_8bytes {
+ long x;
+};
+
+struct padded_8bytes {
+ long: 64;
+};
+
+struct ff_periodic_effect {
+ int: 32;
+ short magnitude;
+ long: 0;
+ short phase;
+ long: 0;
+ int: 32;
+ int custom_len;
+ short *custom_data;
+};
+
+struct ib_wc {
+ long: 64;
+ long: 64;
+ int: 32;
+ int byte_len;
+ void *qp;
+ union {} ex;
+ long: 64;
+ int slid;
+ int wc_flags;
+ long: 64;
+ char smac[6];
+ long: 0;
+ char network_hdr_type;
+};
+
+struct acpi_object_method {
+ long: 64;
+ char: 8;
+ char type;
+ short reference_count;
+ char flags;
+ short: 0;
+ char: 8;
+ char sync_level;
+ long: 64;
+ void *node;
+ void *aml_start;
+ union {} dispatch;
+ long: 64;
+ int aml_length;
+};
+
+struct nested_unpacked {
+ int x;
+};
+
+struct nested_packed {
+ struct nested_unpacked a;
+ char c;
+} __attribute__((packed));
+
+struct outer_mixed_but_unpacked {
+ struct nested_packed b1;
+ short a1;
+ struct nested_packed b2;
+};
+
/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct {
@@ -117,6 +229,20 @@ int f(struct {
struct padded_cache_line _4;
struct zone _5;
struct padding_wo_named_members _6;
+ struct padding_weird_1 _7;
+ struct padding_weird_2 _8;
+ struct exact_1byte _100;
+ struct padded_1byte _101;
+ struct exact_2bytes _102;
+ struct padded_2bytes _103;
+ struct exact_4bytes _104;
+ struct padded_4bytes _105;
+ struct exact_8bytes _106;
+ struct padded_8bytes _107;
+ struct ff_periodic_effect _200;
+ struct ib_wc _201;
+ struct acpi_object_method _202;
+ struct outer_mixed_but_unpacked _203;
} *_)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
index 4ee4748133fe..ad21ee8c7e23 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
@@ -25,6 +25,39 @@ typedef enum {
H = 2,
} e3_t;
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *enum e_byte {
+ * EBYTE_1 = 0,
+ * EBYTE_2 = 1,
+ *} __attribute__((mode(byte)));
+ *
+ */
+/* ----- END-EXPECTED-OUTPUT ----- */
+enum e_byte {
+ EBYTE_1,
+ EBYTE_2,
+} __attribute__((mode(byte)));
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *enum e_word {
+ * EWORD_1 = 0LL,
+ * EWORD_2 = 1LL,
+ *} __attribute__((mode(word)));
+ *
+ */
+/* ----- END-EXPECTED-OUTPUT ----- */
+enum e_word {
+ EWORD_1,
+ EWORD_2,
+} __attribute__((mode(word))); /* force to use 8-byte backing for this enum */
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+enum e_big {
+ EBIG_1 = 1000000000000ULL,
+};
+
typedef int int_t;
typedef volatile const int * volatile const crazy_ptr_t;
@@ -51,7 +84,7 @@ typedef void (*printf_fn_t)(const char *, ...);
* typedef int (*fn_t)(int);
* typedef char * const * (*fn_ptr2_t)(s_t, fn_t);
*
- * - `fn_complext_t`: pointer to a function returning struct and accepting
+ * - `fn_complex_t`: pointer to a function returning struct and accepting
* union and struct. All structs and enum are anonymous and defined inline.
*
* - `signal_t: pointer to a function accepting a pointer to a function as an
@@ -224,6 +257,9 @@ struct root_struct {
enum e2 _2;
e2_t _2_1;
e3_t _2_2;
+ enum e_byte _100;
+ enum e_word _101;
+ enum e_big _102;
struct struct_w_typedefs _3;
anon_struct_t _7;
struct struct_fwd *_8;
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
index a1369b5ebcf8..4ad7fe24966d 100644
--- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
@@ -5,6 +5,7 @@
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
#include "cgrp_kfunc_common.h"
char _license[] SEC("license") = "GPL";
@@ -28,6 +29,7 @@ static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp)
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
@@ -45,6 +47,7 @@ int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 pointer type STRUCT cgroup must point")
int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path;
@@ -57,6 +60,7 @@ int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
}
SEC("kretprobe/cgroup_destroy_locked")
+__failure __msg("reg type unsupported for arg#0 function")
int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp)
{
struct cgroup *acquired;
@@ -69,6 +73,7 @@ int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp)
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("cgrp_kfunc_acquire_trusted_walked")
int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
@@ -80,8 +85,8 @@ int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char
return 0;
}
-
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
@@ -96,6 +101,7 @@ int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Unreleased reference")
int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
@@ -108,6 +114,7 @@ int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *pat
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 expected pointer to map value")
int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
@@ -123,6 +130,7 @@ int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *pat
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 expected pointer to map value")
int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr, *acquired;
@@ -141,6 +149,7 @@ int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 expected pointer to map value")
int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
@@ -156,6 +165,7 @@ int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Unreleased reference")
int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
@@ -175,6 +185,7 @@ int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Unreleased reference")
int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
@@ -194,6 +205,7 @@ int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket")
int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path)
{
struct __cgrps_kfunc_map_value *v;
@@ -209,6 +221,7 @@ int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 pointer type STRUCT cgroup must point")
int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired = (struct cgroup *)&path;
@@ -220,6 +233,7 @@ int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket")
int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
{
struct __cgrps_kfunc_map_value local, *v;
@@ -251,6 +265,7 @@ int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
+__failure __msg("release kernel function bpf_cgroup_release expects")
int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path)
{
/* Cannot release trusted cgroup pointer which was not acquired. */
diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h
new file mode 100644
index 000000000000..ad34f3b602be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cpumask_common.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CPUMASK_COMMON_H
+#define _CPUMASK_COMMON_H
+
+#include "errno.h"
+#include <stdbool.h>
+
+int err;
+
+struct __cpumask_map_value {
+ struct bpf_cpumask __kptr_ref * cpumask;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct __cpumask_map_value);
+ __uint(max_entries, 1);
+} __cpumask_map SEC(".maps");
+
+struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
+void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
+struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
+struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumask) __ksym;
+u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
+u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
+void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
+bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
+bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
+bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
+void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym;
+void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym;
+bool bpf_cpumask_and(struct bpf_cpumask *cpumask,
+ const struct cpumask *src1,
+ const struct cpumask *src2) __ksym;
+void bpf_cpumask_or(struct bpf_cpumask *cpumask,
+ const struct cpumask *src1,
+ const struct cpumask *src2) __ksym;
+void bpf_cpumask_xor(struct bpf_cpumask *cpumask,
+ const struct cpumask *src1,
+ const struct cpumask *src2) __ksym;
+bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym;
+bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym;
+bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym;
+bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
+bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
+void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
+u32 bpf_cpumask_any(const struct cpumask *src) __ksym;
+u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym;
+
+static inline const struct cpumask *cast(struct bpf_cpumask *cpumask)
+{
+ return (const struct cpumask *)cpumask;
+}
+
+static inline struct bpf_cpumask *create_cpumask(void)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = bpf_cpumask_create();
+ if (!cpumask) {
+ err = 1;
+ return NULL;
+ }
+
+ if (!bpf_cpumask_empty(cast(cpumask))) {
+ err = 2;
+ bpf_cpumask_release(cpumask);
+ return NULL;
+ }
+
+ return cpumask;
+}
+
+static inline struct __cpumask_map_value *cpumask_map_value_lookup(void)
+{
+ u32 key = 0;
+
+ return bpf_map_lookup_elem(&__cpumask_map, &key);
+}
+
+static inline int cpumask_map_insert(struct bpf_cpumask *mask)
+{
+ struct __cpumask_map_value local, *v;
+ long status;
+ struct bpf_cpumask *old;
+ u32 key = 0;
+
+ local.cpumask = NULL;
+ status = bpf_map_update_elem(&__cpumask_map, &key, &local, 0);
+ if (status) {
+ bpf_cpumask_release(mask);
+ return status;
+ }
+
+ v = bpf_map_lookup_elem(&__cpumask_map, &key);
+ if (!v) {
+ bpf_cpumask_release(mask);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->cpumask, mask);
+ if (old) {
+ bpf_cpumask_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CPUMASK_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c
new file mode 100644
index 000000000000..33e8e86dd090
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#include "cpumask_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = create_cpumask();
+
+ /* cpumask is never released. */
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("NULL pointer passed to trusted arg0")
+int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = create_cpumask();
+
+ /* cpumask is released twice. */
+ bpf_cpumask_release(cpumask);
+ bpf_cpumask_release(cpumask);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("bpf_cpumask_acquire args#0 expected pointer to STRUCT bpf_cpumask")
+int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ /* Can't acquire a non-struct bpf_cpumask. */
+ cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask")
+int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ /* Can't set the CPU of a non-struct bpf_cpumask. */
+ bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+ struct __cpumask_map_value *v;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask))
+ return 0;
+
+ v = cpumask_map_value_lookup();
+ if (!v)
+ return 0;
+
+ cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
+
+ /* cpumask is never released. */
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(test_kptr_get_no_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+ struct __cpumask_map_value *v;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask))
+ return 0;
+
+ v = cpumask_map_value_lookup();
+ if (!v)
+ return 0;
+
+ cpumask = bpf_cpumask_kptr_get(&v->cpumask);
+
+ /* cpumask is never released. */
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("NULL pointer passed to trusted arg0")
+int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
+{
+ /* NULL passed to KF_TRUSTED_ARGS kfunc. */
+ bpf_cpumask_empty(NULL);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
new file mode 100644
index 000000000000..1d38bc65d4b0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "cpumask_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid, nr_cpus;
+
+static bool is_test_task(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ return pid == cur_pid;
+}
+
+static bool create_cpumask_set(struct bpf_cpumask **out1,
+ struct bpf_cpumask **out2,
+ struct bpf_cpumask **out3,
+ struct bpf_cpumask **out4)
+{
+ struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;
+
+ mask1 = create_cpumask();
+ if (!mask1)
+ return false;
+
+ mask2 = create_cpumask();
+ if (!mask2) {
+ bpf_cpumask_release(mask1);
+ err = 3;
+ return false;
+ }
+
+ mask3 = create_cpumask();
+ if (!mask3) {
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ err = 4;
+ return false;
+ }
+
+ mask4 = create_cpumask();
+ if (!mask4) {
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(mask3);
+ err = 5;
+ return false;
+ }
+
+ *out1 = mask1;
+ *out2 = mask2;
+ *out3 = mask3;
+ *out4 = mask4;
+
+ return true;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ bpf_cpumask_set_cpu(0, cpumask);
+ if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
+ err = 3;
+ goto release_exit;
+ }
+
+ bpf_cpumask_clear_cpu(0, cpumask);
+ if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
+ err = 4;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ bpf_cpumask_setall(cpumask);
+ if (!bpf_cpumask_full(cast(cpumask))) {
+ err = 3;
+ goto release_exit;
+ }
+
+ bpf_cpumask_clear(cpumask);
+ if (!bpf_cpumask_empty(cast(cpumask))) {
+ err = 4;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
+ err = 3;
+ goto release_exit;
+ }
+
+ if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
+ bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
+ err = 4;
+ goto release_exit;
+ }
+
+ bpf_cpumask_set_cpu(0, cpumask);
+ if (bpf_cpumask_first(cast(cpumask)) != 0) {
+ err = 5;
+ goto release_exit;
+ }
+
+ if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
+ err = 6;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
+ err = 3;
+ goto release_exit;
+ }
+
+ if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
+ err = 4;
+ goto release_exit;
+ }
+
+ if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
+ err = 5;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+
+ if (!is_test_task())
+ return 0;
+
+ if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
+ return 0;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+
+ if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
+ err = 6;
+ goto release_exit;
+ }
+ if (!bpf_cpumask_empty(cast(dst1))) {
+ err = 7;
+ goto release_exit;
+ }
+
+ bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
+ if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
+ err = 8;
+ goto release_exit;
+ }
+ if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
+ err = 9;
+ goto release_exit;
+ }
+
+ bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
+ if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
+ err = 10;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+
+ if (!is_test_task())
+ return 0;
+
+ if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
+ return 0;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+ if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
+ err = 6;
+ goto release_exit;
+ }
+
+ bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
+ if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
+ err = 7;
+ goto release_exit;
+ }
+
+ if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
+ err = 8;
+ goto release_exit;
+ }
+
+ if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
+ err = 9;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+ u32 cpu;
+
+ if (!is_test_task())
+ return 0;
+
+ if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
+ return 0;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+ bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
+
+ cpu = bpf_cpumask_any(cast(mask1));
+ if (cpu != 0) {
+ err = 6;
+ goto release_exit;
+ }
+
+ cpu = bpf_cpumask_any(cast(dst2));
+ if (cpu < nr_cpus) {
+ err = 7;
+ goto release_exit;
+ }
+
+ bpf_cpumask_copy(dst2, cast(dst1));
+ if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
+ err = 8;
+ goto release_exit;
+ }
+
+ cpu = bpf_cpumask_any(cast(dst2));
+ if (cpu > 1) {
+ err = 9;
+ goto release_exit;
+ }
+
+ cpu = bpf_cpumask_any_and(cast(mask1), cast(mask2));
+ if (cpu < nr_cpus) {
+ err = 10;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+ struct __cpumask_map_value *v;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask))
+ err = 3;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+ struct __cpumask_map_value *v;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask)) {
+ err = 3;
+ return 0;
+ }
+
+ v = cpumask_map_value_lookup();
+ if (!v) {
+ err = 4;
+ return 0;
+ }
+
+ cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
+ if (cpumask)
+ bpf_cpumask_release(cpumask);
+ else
+ err = 5;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_insert_kptr_get_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+ struct __cpumask_map_value *v;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask)) {
+ err = 3;
+ return 0;
+ }
+
+ v = cpumask_map_value_lookup();
+ if (!v) {
+ err = 4;
+ return 0;
+ }
+
+ cpumask = bpf_cpumask_kptr_get(&v->cpumask);
+ if (cpumask)
+ bpf_cpumask_release(cpumask);
+ else
+ err = 5;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c
new file mode 100644
index 000000000000..0bf969a0b5ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops.s/test_2")
+__failure __msg("attach to unsupported member test_2 of struct bpf_dummy_ops")
+int BPF_PROG(test_unsupported_field_sleepable,
+ struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
+ char a3, unsigned long a4)
+{
+ /* Tries to mark an unsleepable field in struct bpf_dummy_ops as sleepable. */
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_dummy_ops dummy_1 = {
+ .test_1 = NULL,
+ .test_2 = (void *)test_unsupported_field_sleepable,
+ .test_sleepable = (void *)NULL,
+};
diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
index ead87edb75e2..1efa746c25dc 100644
--- a/tools/testing/selftests/bpf/progs/dummy_st_ops.c
+++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
@@ -1,19 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-struct bpf_dummy_ops_state {
- int val;
-} __attribute__((preserve_access_index));
-
-struct bpf_dummy_ops {
- int (*test_1)(struct bpf_dummy_ops_state *state);
- int (*test_2)(struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
- char a3, unsigned long a4);
-};
-
char _license[] SEC("license") = "GPL";
SEC("struct_ops/test_1")
@@ -43,8 +33,15 @@ int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a
return 0;
}
+SEC("struct_ops.s/test_sleepable")
+int BPF_PROG(test_sleepable, struct bpf_dummy_ops_state *state)
+{
+ return 0;
+}
+
SEC(".struct_ops")
struct bpf_dummy_ops dummy_1 = {
.test_1 = (void *)test_1,
.test_2 = (void *)test_2,
+ .test_sleepable = (void *)test_sleepable,
};
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index 78debc1b3820..aa5b69354b91 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -35,6 +35,13 @@ struct {
__type(value, __u32);
} array_map3 SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array_map4 SEC(".maps");
+
struct sample {
int pid;
long value;
@@ -67,7 +74,7 @@ static int get_map_val_dynptr(struct bpf_dynptr *ptr)
* bpf_ringbuf_submit/discard_dynptr call
*/
SEC("?raw_tp")
-__failure __msg("Unreleased reference id=1")
+__failure __msg("Unreleased reference id=2")
int ringbuf_missing_release1(void *ctx)
{
struct bpf_dynptr ptr;
@@ -80,7 +87,7 @@ int ringbuf_missing_release1(void *ctx)
}
SEC("?raw_tp")
-__failure __msg("Unreleased reference id=2")
+__failure __msg("Unreleased reference id=4")
int ringbuf_missing_release2(void *ctx)
{
struct bpf_dynptr ptr1, ptr2;
@@ -382,7 +389,7 @@ int invalid_helper1(void *ctx)
/* A dynptr can't be passed into a helper function at a non-zero offset */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #3")
+__failure __msg("cannot pass in dynptr at an offset=-8")
int invalid_helper2(void *ctx)
{
struct bpf_dynptr ptr;
@@ -420,7 +427,7 @@ int invalid_write1(void *ctx)
* offset
*/
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #3")
+__failure __msg("cannot overwrite referenced dynptr")
int invalid_write2(void *ctx)
{
struct bpf_dynptr ptr;
@@ -444,7 +451,7 @@ int invalid_write2(void *ctx)
* non-const offset
*/
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #1")
+__failure __msg("cannot overwrite referenced dynptr")
int invalid_write3(void *ctx)
{
struct bpf_dynptr ptr;
@@ -476,7 +483,7 @@ static int invalid_write4_callback(__u32 index, void *data)
* be invalidated as a dynptr
*/
SEC("?raw_tp")
-__failure __msg("arg 1 is an unacquired reference")
+__failure __msg("cannot overwrite referenced dynptr")
int invalid_write4(void *ctx)
{
struct bpf_dynptr ptr;
@@ -584,7 +591,7 @@ int invalid_read4(void *ctx)
/* Initializing a dynptr on an offset should fail */
SEC("?raw_tp")
-__failure __msg("invalid write to stack")
+__failure __msg("cannot pass in dynptr at an offset=0")
int invalid_offset(void *ctx)
{
struct bpf_dynptr ptr;
@@ -623,7 +630,7 @@ static int release_twice_callback_fn(__u32 index, void *data)
}
/* Test that releasing a dynptr twice, where one of the releases happens
- * within a calback function, fails
+ * within a callback function, fails
*/
SEC("?raw_tp")
__failure __msg("arg 1 is an unacquired reference")
@@ -653,3 +660,435 @@ int dynptr_from_mem_invalid_api(void *ctx)
return 0;
}
+
+SEC("?tc")
+__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+int dynptr_pruning_overwrite(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r9 = 0xeB9F; \
+ r6 = %[ringbuf] ll; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ if r0 == 0 goto pjmp1; \
+ goto pjmp2; \
+ pjmp1: \
+ *(u64 *)(r10 - 16) = r9; \
+ pjmp2: \
+ r1 = r10; \
+ r1 += -16; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__success __msg("12: safe") __log_level(2)
+int dynptr_pruning_stacksafe(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r9 = 0xeB9F; \
+ r6 = %[ringbuf] ll; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ if r0 == 0 goto stjmp1; \
+ goto stjmp2; \
+ stjmp1: \
+ r9 = r9; \
+ stjmp2: \
+ r1 = r10; \
+ r1 += -16; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+int dynptr_pruning_type_confusion(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r6 = %[array_map4] ll; \
+ r7 = %[ringbuf] ll; \
+ r1 = r6; \
+ r2 = r10; \
+ r2 += -8; \
+ r9 = 0; \
+ *(u64 *)(r2 + 0) = r9; \
+ r3 = r10; \
+ r3 += -24; \
+ r9 = 0xeB9FeB9F; \
+ *(u64 *)(r10 - 16) = r9; \
+ *(u64 *)(r10 - 24) = r9; \
+ r9 = 0; \
+ r4 = 0; \
+ r8 = r2; \
+ call %[bpf_map_update_elem]; \
+ r1 = r6; \
+ r2 = r8; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto tjmp1; \
+ exit; \
+ tjmp1: \
+ r8 = r0; \
+ r1 = r7; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ r0 = *(u64 *)(r0 + 0); \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ if r0 == 0 goto tjmp2; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ goto tjmp3; \
+ tjmp2: \
+ *(u64 *)(r10 - 8) = r9; \
+ *(u64 *)(r10 - 16) = r9; \
+ r1 = r8; \
+ r1 += 8; \
+ r2 = 0; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_dynptr_from_mem]; \
+ tjmp3: \
+ r1 = r10; \
+ r1 += -16; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_map_update_elem),
+ __imm(bpf_map_lookup_elem),
+ __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_dynptr_from_mem),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(array_map4),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("dynptr has to be at a constant offset") __log_level(2)
+int dynptr_var_off_overwrite(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r9 = 16; \
+ *(u32 *)(r10 - 4) = r9; \
+ r8 = *(u32 *)(r10 - 4); \
+ if r8 >= 0 goto vjmp1; \
+ r0 = 1; \
+ exit; \
+ vjmp1: \
+ if r8 <= 16 goto vjmp2; \
+ r0 = 1; \
+ exit; \
+ vjmp2: \
+ r8 &= 16; \
+ r1 = %[ringbuf] ll; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -32; \
+ r4 += r8; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ r9 = 0xeB9F; \
+ *(u64 *)(r10 - 16) = r9; \
+ r1 = r10; \
+ r1 += -32; \
+ r1 += r8; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+int dynptr_partial_slot_invalidate(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r6 = %[ringbuf] ll; \
+ r7 = %[array_map4] ll; \
+ r1 = r7; \
+ r2 = r10; \
+ r2 += -8; \
+ r9 = 0; \
+ *(u64 *)(r2 + 0) = r9; \
+ r3 = r2; \
+ r4 = 0; \
+ r8 = r2; \
+ call %[bpf_map_update_elem]; \
+ r1 = r7; \
+ r2 = r8; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto sjmp1; \
+ exit; \
+ sjmp1: \
+ r7 = r0; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -24; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ *(u64 *)(r10 - 16) = r9; \
+ r1 = r7; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_dynptr_from_mem]; \
+ r1 = r10; \
+ r1 += -512; \
+ r2 = 488; \
+ r3 = r10; \
+ r3 += -24; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_dynptr_read]; \
+ r8 = 1; \
+ if r0 != 0 goto sjmp2; \
+ r8 = 0; \
+ sjmp2: \
+ r1 = r10; \
+ r1 += -24; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_map_update_elem),
+ __imm(bpf_map_lookup_elem),
+ __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm(bpf_dynptr_from_mem),
+ __imm(bpf_dynptr_read),
+ __imm_addr(ringbuf),
+ __imm_addr(array_map4)
+ : __clobber_all
+ );
+ return 0;
+}
+
+/* Test that it is allowed to overwrite unreferenced dynptr. */
+SEC("?raw_tp")
+__success
+int dynptr_overwrite_unref(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ return 0;
+}
+
+/* Test that slices are invalidated on reinitializing a dynptr. */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int dynptr_invalidate_slice_reinit(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u8 *p;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ p = bpf_dynptr_data(&ptr, 0, 1);
+ if (!p)
+ return 0;
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ /* this should fail */
+ return *p;
+}
+
+/* Invalidation of dynptr slices on destruction of dynptr should not miss
+ * mem_or_null pointers.
+ */
+SEC("?raw_tp")
+__failure __msg("R1 type=scalar expected=percpu_ptr_")
+int dynptr_invalidate_slice_or_null(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u8 *p;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ p = bpf_dynptr_data(&ptr, 0, 1);
+ *(__u8 *)&ptr = 0;
+ /* this should fail */
+ bpf_this_cpu_ptr(p);
+ return 0;
+}
+
+/* Destruction of dynptr should also any slices obtained from it */
+SEC("?raw_tp")
+__failure __msg("R7 invalid mem access 'scalar'")
+int dynptr_invalidate_slice_failure(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u8 *p1, *p2;
+
+ if (get_map_val_dynptr(&ptr1))
+ return 0;
+ if (get_map_val_dynptr(&ptr2))
+ return 0;
+
+ p1 = bpf_dynptr_data(&ptr1, 0, 1);
+ if (!p1)
+ return 0;
+ p2 = bpf_dynptr_data(&ptr2, 0, 1);
+ if (!p2)
+ return 0;
+
+ *(__u8 *)&ptr1 = 0;
+ /* this should fail */
+ return *p1;
+}
+
+/* Invalidation of slices should be scoped and should not prevent dereferencing
+ * slices of another dynptr after destroying unrelated dynptr
+ */
+SEC("?raw_tp")
+__success
+int dynptr_invalidate_slice_success(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u8 *p1, *p2;
+
+ if (get_map_val_dynptr(&ptr1))
+ return 1;
+ if (get_map_val_dynptr(&ptr2))
+ return 1;
+
+ p1 = bpf_dynptr_data(&ptr1, 0, 1);
+ if (!p1)
+ return 1;
+ p2 = bpf_dynptr_data(&ptr2, 0, 1);
+ if (!p2)
+ return 1;
+
+ *(__u8 *)&ptr1 = 0;
+ return *p2;
+}
+
+/* Overwriting referenced dynptr should be rejected */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int dynptr_overwrite_ref(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+ /* this should fail */
+ if (get_map_val_dynptr(&ptr))
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* Reject writes to dynptr slot from bpf_dynptr_read */
+SEC("?raw_tp")
+__failure __msg("potential write to dynptr at off=-16")
+int dynptr_read_into_slot(void *ctx)
+{
+ union {
+ struct {
+ char _pad[48];
+ struct bpf_dynptr ptr;
+ };
+ char buf[64];
+ } data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &data.ptr);
+ /* this should fail */
+ bpf_dynptr_read(data.buf, sizeof(data.buf), &data.ptr, 0, 0);
+
+ return 0;
+}
+
+/* Reject writes to dynptr slot for uninit arg */
+SEC("?raw_tp")
+__failure __msg("potential write to dynptr at off=-16")
+int uninit_write_into_slot(void *ctx)
+{
+ struct {
+ char buf[64];
+ struct bpf_dynptr ptr;
+ } data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 80, 0, &data.ptr);
+ /* this should fail */
+ bpf_get_current_comm(data.buf, 80);
+
+ return 0;
+}
+
+static int callback(__u32 index, void *data)
+{
+ *(__u32 *)data = 123;
+
+ return 0;
+}
+
+/* If the dynptr is written into in a callback function, its data
+ * slices should be invalidated as well.
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int invalid_data_slices(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u32 *slice;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ slice = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
+ if (!slice)
+ return 0;
+
+ bpf_loop(10, callback, &ptr, 0);
+
+ /* this should fail */
+ *slice = 1;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fib_lookup.c b/tools/testing/selftests/bpf/progs/fib_lookup.c
new file mode 100644
index 000000000000..c4514dd58c62
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fib_lookup.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_tracing_net.h"
+
+struct bpf_fib_lookup fib_params = {};
+int fib_lookup_ret = 0;
+int lookup_flags = 0;
+
+SEC("tc")
+int fib_lookup(struct __sk_buff *skb)
+{
+ fib_lookup_ret = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params),
+ lookup_flags);
+
+ return TC_ACT_SHOT;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/htab_reuse.c b/tools/testing/selftests/bpf/progs/htab_reuse.c
new file mode 100644
index 000000000000..7f7368cb3095
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/htab_reuse.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct htab_val {
+ struct bpf_spin_lock lock;
+ unsigned int data;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, unsigned int);
+ __type(value, struct htab_val);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} htab SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
new file mode 100644
index 000000000000..2d2e61470794
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+static struct prog_test_ref_kfunc __kptr_ref *v;
+long total_sum = -1;
+
+extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
+extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+
+SEC("tc")
+int test_jit_probe_mem(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long zero = 0, sum;
+
+ p = bpf_kfunc_call_test_acquire(&zero);
+ if (!p)
+ return 1;
+
+ p = bpf_kptr_xchg(&v, p);
+ if (p)
+ goto release_out;
+
+ /* Direct map value access of kptr, should be PTR_UNTRUSTED */
+ p = v;
+ if (!p)
+ return 1;
+
+ asm volatile (
+ "r9 = %[p];"
+ "%[sum] = 0;"
+
+ /* r8 = p->a */
+ "r8 = *(u32 *)(r9 + 0);"
+ "%[sum] += r8;"
+
+ /* r8 = p->b */
+ "r8 = *(u32 *)(r9 + 4);"
+ "%[sum] += r8;"
+
+ "r9 += 8;"
+ /* r9 = p->a */
+ "r9 = *(u32 *)(r9 - 8);"
+ "%[sum] += r9;"
+
+ : [sum] "=r"(sum)
+ : [p] "r"(p)
+ : "r8", "r9"
+ );
+
+ total_sum = sum;
+ return 0;
+release_out:
+ bpf_kfunc_call_test_release(p);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
index f636e50be259..7daa8f5720b9 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -3,6 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
+extern long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym;
extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
__u32 c, __u64 d) __ksym;
@@ -16,6 +17,24 @@ extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym;
extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym;
extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
+extern u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym;
+
+SEC("tc")
+int kfunc_call_test4(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ long tmp;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ tmp = bpf_kfunc_call_test4(-3, -30, -200, -1000);
+ return (tmp >> 32) + tmp;
+}
SEC("tc")
int kfunc_call_test2(struct __sk_buff *skb)
@@ -163,4 +182,14 @@ int kfunc_call_test_get_mem(struct __sk_buff *skb)
return ret;
}
+SEC("tc")
+int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
+{
+
+ u32 expected = 5, actual;
+
+ actual = bpf_kfunc_call_test_static_unused_arg(expected, 0xdeadbeef);
+ return actual != expected ? -1 : 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c
index 4ad88da5cda2..4fa4a9b01bde 100644
--- a/tools/testing/selftests/bpf/progs/linked_list.c
+++ b/tools/testing/selftests/bpf/progs/linked_list.c
@@ -260,7 +260,7 @@ int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head
{
int ret;
- ret = list_push_pop_multiple(lock ,head, false);
+ ret = list_push_pop_multiple(lock, head, false);
if (ret)
return ret;
return list_push_pop_multiple(lock, head, true);
diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
index 1d9017240e19..69cdc07cba13 100644
--- a/tools/testing/selftests/bpf/progs/linked_list_fail.c
+++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
@@ -54,28 +54,44 @@
return 0; \
}
-CHECK(kptr, push_front, &f->head);
-CHECK(kptr, push_back, &f->head);
CHECK(kptr, pop_front, &f->head);
CHECK(kptr, pop_back, &f->head);
-CHECK(global, push_front, &ghead);
-CHECK(global, push_back, &ghead);
CHECK(global, pop_front, &ghead);
CHECK(global, pop_back, &ghead);
-CHECK(map, push_front, &v->head);
-CHECK(map, push_back, &v->head);
CHECK(map, pop_front, &v->head);
CHECK(map, pop_back, &v->head);
-CHECK(inner_map, push_front, &iv->head);
-CHECK(inner_map, push_back, &iv->head);
CHECK(inner_map, pop_front, &iv->head);
CHECK(inner_map, pop_back, &iv->head);
#undef CHECK
+#define CHECK(test, op, hexpr, nexpr) \
+ SEC("?tc") \
+ int test##_missing_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ void (*p)(void *, void *) = (void *)&bpf_list_##op; \
+ p(hexpr, nexpr); \
+ return 0; \
+ }
+
+CHECK(kptr, push_front, &f->head, b);
+CHECK(kptr, push_back, &f->head, b);
+
+CHECK(global, push_front, &ghead, f);
+CHECK(global, push_back, &ghead, f);
+
+CHECK(map, push_front, &v->head, f);
+CHECK(map, push_back, &v->head, f);
+
+CHECK(inner_map, push_front, &iv->head, f);
+CHECK(inner_map, push_back, &iv->head, f);
+
+#undef CHECK
+
#define CHECK(test, op, lexpr, hexpr) \
SEC("?tc") \
int test##_incorrect_lock_##op(void *ctx) \
@@ -108,13 +124,49 @@ CHECK(inner_map, pop_back, &iv->head);
CHECK(inner_map_global, op, &iv->lock, &ghead); \
CHECK(inner_map_map, op, &iv->lock, &v->head);
-CHECK_OP(push_front);
-CHECK_OP(push_back);
CHECK_OP(pop_front);
CHECK_OP(pop_back);
#undef CHECK
#undef CHECK_OP
+
+#define CHECK(test, op, lexpr, hexpr, nexpr) \
+ SEC("?tc") \
+ int test##_incorrect_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ void (*p)(void *, void*) = (void *)&bpf_list_##op; \
+ bpf_spin_lock(lexpr); \
+ p(hexpr, nexpr); \
+ return 0; \
+ }
+
+#define CHECK_OP(op) \
+ CHECK(kptr_kptr, op, &f1->lock, &f2->head, b); \
+ CHECK(kptr_global, op, &f1->lock, &ghead, f); \
+ CHECK(kptr_map, op, &f1->lock, &v->head, f); \
+ CHECK(kptr_inner_map, op, &f1->lock, &iv->head, f); \
+ \
+ CHECK(global_global, op, &glock2, &ghead, f); \
+ CHECK(global_kptr, op, &glock, &f1->head, b); \
+ CHECK(global_map, op, &glock, &v->head, f); \
+ CHECK(global_inner_map, op, &glock, &iv->head, f); \
+ \
+ CHECK(map_map, op, &v->lock, &v2->head, f); \
+ CHECK(map_kptr, op, &v->lock, &f2->head, b); \
+ CHECK(map_global, op, &v->lock, &ghead, f); \
+ CHECK(map_inner_map, op, &v->lock, &iv->head, f); \
+ \
+ CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, f); \
+ CHECK(inner_map_kptr, op, &iv->lock, &f2->head, b); \
+ CHECK(inner_map_global, op, &iv->lock, &ghead, f); \
+ CHECK(inner_map_map, op, &iv->lock, &v->head, f);
+
+CHECK_OP(push_front);
+CHECK_OP(push_back);
+
+#undef CHECK
+#undef CHECK_OP
#undef INIT
SEC("?kprobe/xyz")
@@ -304,34 +356,6 @@ int direct_write_node(void *ctx)
}
static __always_inline
-int write_after_op(void (*push_op)(void *head, void *node))
-{
- struct foo *f;
-
- f = bpf_obj_new(typeof(*f));
- if (!f)
- return 0;
- bpf_spin_lock(&glock);
- push_op(&ghead, &f->node);
- f->data = 42;
- bpf_spin_unlock(&glock);
-
- return 0;
-}
-
-SEC("?tc")
-int write_after_push_front(void *ctx)
-{
- return write_after_op((void *)bpf_list_push_front);
-}
-
-SEC("?tc")
-int write_after_push_back(void *ctx)
-{
- return write_after_op((void *)bpf_list_push_back);
-}
-
-static __always_inline
int use_after_unlock(void (*op)(void *head, void *node))
{
struct foo *f;
diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c
index d8d8af623bc2..dc93887ed34c 100644
--- a/tools/testing/selftests/bpf/progs/lsm.c
+++ b/tools/testing/selftests/bpf/progs/lsm.c
@@ -6,9 +6,10 @@
#include "bpf_misc.h"
#include "vmlinux.h"
+#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include <errno.h>
+#include <errno.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -164,8 +165,8 @@ int copy_test = 0;
SEC("fentry.s/" SYS_PREFIX "sys_setdomainname")
int BPF_PROG(test_sys_setdomainname, struct pt_regs *regs)
{
- void *ptr = (void *)PT_REGS_PARM1(regs);
- int len = PT_REGS_PARM2(regs);
+ void *ptr = (void *)PT_REGS_PARM1_SYSCALL(regs);
+ int len = PT_REGS_PARM2_SYSCALL(regs);
int buf = 0;
long ret;
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
index eb8217803493..228ec45365a8 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -62,21 +62,23 @@ extern struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
+
static void test_kptr_unref(struct map_value *v)
{
struct prog_test_ref_kfunc *p;
p = v->unref_ptr;
/* store untrusted_ptr_or_null_ */
- v->unref_ptr = p;
+ WRITE_ONCE(v->unref_ptr, p);
if (!p)
return;
if (p->a + p->b > 100)
return;
/* store untrusted_ptr_ */
- v->unref_ptr = p;
+ WRITE_ONCE(v->unref_ptr, p);
/* store NULL */
- v->unref_ptr = NULL;
+ WRITE_ONCE(v->unref_ptr, NULL);
}
static void test_kptr_ref(struct map_value *v)
@@ -85,7 +87,7 @@ static void test_kptr_ref(struct map_value *v)
p = v->ref_ptr;
/* store ptr_or_null_ */
- v->unref_ptr = p;
+ WRITE_ONCE(v->unref_ptr, p);
if (!p)
return;
if (p->a + p->b > 100)
@@ -99,7 +101,7 @@ static void test_kptr_ref(struct map_value *v)
return;
}
/* store ptr_ */
- v->unref_ptr = p;
+ WRITE_ONCE(v->unref_ptr, p);
bpf_kfunc_call_test_release(p);
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_common.h b/tools/testing/selftests/bpf/progs/nested_trust_common.h
new file mode 100644
index 000000000000..83d33931136e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_trust_common.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _NESTED_TRUST_COMMON_H
+#define _NESTED_TRUST_COMMON_H
+
+#include <stdbool.h>
+
+bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym;
+bool bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+
+#endif /* _NESTED_TRUST_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
new file mode 100644
index 000000000000..14aff7676436
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#include "nested_trust_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+SEC("tp_btf/task_newtask")
+__failure __msg("R2 must be referenced or trusted")
+int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_test_cpu(0, task->user_cpus_ptr);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("R1 must have zero offset when passed to release func or trusted arg to kfunc")
+int BPF_PROG(test_invalid_nested_offset, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_first_zero(&task->cpus_mask);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_success.c b/tools/testing/selftests/bpf/progs/nested_trust_success.c
new file mode 100644
index 000000000000..886ade4aa99d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_trust_success.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#include "nested_trust_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("tp_btf/task_newtask")
+__success
+int BPF_PROG(test_read_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_test_cpu(0, task->cpus_ptr);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 7bd76b9e0f98..875513866032 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -156,10 +156,10 @@ probe_read_lim(void* dst, void* src, unsigned long len, unsigned long max)
{
len = len < max ? len : max;
if (len > 1) {
- if (bpf_probe_read(dst, len, src))
+ if (bpf_probe_read_kernel(dst, len, src))
return 0;
} else if (len == 1) {
- if (bpf_probe_read(dst, 1, src))
+ if (bpf_probe_read_kernel(dst, 1, src))
return 0;
}
return len;
@@ -216,7 +216,8 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
#endif
for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
filepart_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(cgroup_node, name));
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(cgroup_node, name));
if (!cgroup_node)
return payload;
if (cgroup_node == cgroup_root_node)
@@ -303,7 +304,8 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
cgroup_data->cgroup_full_length = 0;
size_t cgroup_root_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(root_kernfs, name));
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(root_kernfs, name));
barrier_var(cgroup_root_length);
if (cgroup_root_length <= MAX_PATH) {
barrier_var(cgroup_root_length);
@@ -312,7 +314,8 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
}
size_t cgroup_proc_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(proc_kernfs, name));
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(proc_kernfs, name));
barrier_var(cgroup_proc_length);
if (cgroup_proc_length <= MAX_PATH) {
barrier_var(cgroup_proc_length);
@@ -395,7 +398,8 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
arr_struct = bpf_map_lookup_elem(&data_heap, &zero);
if (arr_struct == NULL)
return 0;
- bpf_probe_read(&arr_struct->array[0], sizeof(arr_struct->array[0]), kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[0],
+ sizeof(arr_struct->array[0]), kill_data);
} else {
int index = get_var_spid_index(arr_struct, spid);
@@ -409,8 +413,9 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == 0) {
- bpf_probe_read(&arr_struct->array[i],
- sizeof(arr_struct->array[i]), kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[i],
+ sizeof(arr_struct->array[i]),
+ kill_data);
bpf_map_update_elem(&var_tpid_to_data, &tpid,
arr_struct, 0);
@@ -427,17 +432,17 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
if (delta_sec < STALE_INFO) {
kill_data->kill_count++;
kill_data->last_kill_time = bpf_ktime_get_ns();
- bpf_probe_read(&arr_struct->array[index],
- sizeof(arr_struct->array[index]),
- kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
} else {
struct var_kill_data_t* kill_data =
get_var_kill_data(ctx, spid, tpid, sig);
if (kill_data == NULL)
return 0;
- bpf_probe_read(&arr_struct->array[index],
- sizeof(arr_struct->array[index]),
- kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
}
}
bpf_map_update_elem(&var_tpid_to_data, &tpid, arr_struct, 0);
@@ -487,8 +492,9 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
#pragma unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
- filepart_length = bpf_probe_read_str(payload, MAX_PATH,
- BPF_CORE_READ(filp_dentry, d_name.name));
+ filepart_length =
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp_dentry, d_name.name));
barrier_var(filepart_length);
if (filepart_length > MAX_PATH)
break;
@@ -572,7 +578,8 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
sysctl_data->sysctl_val_length = 0;
sysctl_data->sysctl_path_length = 0;
- size_t sysctl_val_length = bpf_probe_read_str(payload, CTL_MAXNAME, buf);
+ size_t sysctl_val_length = bpf_probe_read_kernel_str(payload,
+ CTL_MAXNAME, buf);
barrier_var(sysctl_val_length);
if (sysctl_val_length <= CTL_MAXNAME) {
barrier_var(sysctl_val_length);
@@ -580,8 +587,10 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
payload += sysctl_val_length;
}
- size_t sysctl_path_length = bpf_probe_read_str(payload, MAX_PATH,
- BPF_CORE_READ(filp, f_path.dentry, d_name.name));
+ size_t sysctl_path_length =
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp, f_path.dentry,
+ d_name.name));
barrier_var(sysctl_path_length);
if (sysctl_path_length <= MAX_PATH) {
barrier_var(sysctl_path_length);
@@ -638,7 +647,8 @@ int raw_tracepoint__sched_process_exit(void* ctx)
struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
- bpf_probe_read(kill_data, sizeof(*past_kill_data), past_kill_data);
+ bpf_probe_read_kernel(kill_data, sizeof(*past_kill_data),
+ past_kill_data);
void* payload = kill_data->payload;
size_t offset = kill_data->payload_length;
if (offset >= MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN)
@@ -656,8 +666,10 @@ int raw_tracepoint__sched_process_exit(void* ctx)
payload += comm_length;
}
- size_t cgroup_proc_length = bpf_probe_read_str(payload, KILL_TARGET_LEN,
- BPF_CORE_READ(proc_kernfs, name));
+ size_t cgroup_proc_length =
+ bpf_probe_read_kernel_str(payload,
+ KILL_TARGET_LEN,
+ BPF_CORE_READ(proc_kernfs, name));
barrier_var(cgroup_proc_length);
if (cgroup_proc_length <= KILL_TARGET_LEN) {
barrier_var(cgroup_proc_length);
@@ -718,7 +730,8 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
proc_exec_data->parent_start_time = BPF_CORE_READ(parent_task, start_time);
const char* filename = BPF_CORE_READ(bprm, filename);
- size_t bin_path_length = bpf_probe_read_str(payload, MAX_FILENAME_LEN, filename);
+ size_t bin_path_length =
+ bpf_probe_read_kernel_str(payload, MAX_FILENAME_LEN, filename);
barrier_var(bin_path_length);
if (bin_path_length <= MAX_FILENAME_LEN) {
barrier_var(bin_path_length);
@@ -922,7 +935,8 @@ int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
filemod_data->payload);
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
- size_t len = bpf_probe_read_str(payload, MAX_FILEPATH_LENGTH, oldname);
+ size_t len = bpf_probe_read_kernel_str(payload, MAX_FILEPATH_LENGTH,
+ oldname);
barrier_var(len);
if (len <= MAX_FILEPATH_LENGTH) {
barrier_var(len);
diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c
new file mode 100644
index 000000000000..e5db1a4287e5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+};
+
+long less_callback_ran = -1;
+long removed_key = -1;
+long first_data[2] = {-1, -1};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ less_callback_ran = 1;
+
+ return node_a->key < node_b->key;
+}
+
+static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
+{
+ struct node_data *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ n->key = 5;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m) {
+ bpf_obj_drop(n);
+ return 2;
+ }
+ m->key = 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rbtree_add(&groot, &m->node, less);
+ bpf_spin_unlock(&glock);
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 3;
+ n->key = 3;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("tc")
+long rbtree_add_nodes(void *ctx)
+{
+ return __add_three(&groot, &glock);
+}
+
+SEC("tc")
+long rbtree_add_and_remove(void *ctx)
+{
+ struct bpf_rb_node *res = NULL;
+ struct node_data *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ goto err_out;
+ n->key = 5;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m)
+ goto err_out;
+ m->key = 3;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rbtree_add(&groot, &m->node, less);
+ res = bpf_rbtree_remove(&groot, &n->node);
+ bpf_spin_unlock(&glock);
+
+ n = container_of(res, struct node_data, node);
+ removed_key = n->key;
+
+ bpf_obj_drop(n);
+
+ return 0;
+err_out:
+ if (n)
+ bpf_obj_drop(n);
+ if (m)
+ bpf_obj_drop(m);
+ return 1;
+}
+
+SEC("tc")
+long rbtree_first_and_remove(void *ctx)
+{
+ struct bpf_rb_node *res = NULL;
+ struct node_data *n, *m, *o;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ n->key = 3;
+ n->data = 4;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m)
+ goto err_out;
+ m->key = 5;
+ m->data = 6;
+
+ o = bpf_obj_new(typeof(*o));
+ if (!o)
+ goto err_out;
+ o->key = 1;
+ o->data = 2;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rbtree_add(&groot, &m->node, less);
+ bpf_rbtree_add(&groot, &o->node, less);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res) {
+ bpf_spin_unlock(&glock);
+ return 2;
+ }
+
+ o = container_of(res, struct node_data, node);
+ first_data[0] = o->data;
+
+ res = bpf_rbtree_remove(&groot, &o->node);
+ bpf_spin_unlock(&glock);
+
+ o = container_of(res, struct node_data, node);
+ removed_key = o->key;
+
+ bpf_obj_drop(o);
+
+ bpf_spin_lock(&glock);
+ res = bpf_rbtree_first(&groot);
+ if (!res) {
+ bpf_spin_unlock(&glock);
+ return 3;
+ }
+
+ o = container_of(res, struct node_data, node);
+ first_data[1] = o->data;
+ bpf_spin_unlock(&glock);
+
+ return 0;
+err_out:
+ if (n)
+ bpf_obj_drop(n);
+ if (m)
+ bpf_obj_drop(m);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
new file mode 100644
index 000000000000..60079b202c07
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct node_data {
+ int key;
+ int data;
+ struct bpf_rb_node node;
+};
+
+struct node_data2 {
+ int key;
+ struct bpf_rb_node node;
+ int data;
+};
+
+static bool less2(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data2 *node_a;
+ struct node_data2 *node_b;
+
+ node_a = container_of(a, struct node_data2, node);
+ node_b = container_of(b, struct node_data2, node);
+
+ return node_a->key < node_b->key;
+}
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+SEC("tc")
+long rbtree_api_add__add_wrong_type(void *ctx)
+{
+ struct node_data2 *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less2);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c
new file mode 100644
index 000000000000..340f97da1084
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+/* BTF load should fail as bpf_rb_root __contains this type and points to
+ * 'node', but 'node' is not a bpf_rb_node
+ */
+struct node_data {
+ int key;
+ int data;
+ struct bpf_list_node node;
+};
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_a->key < node_b->key;
+}
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+SEC("tc")
+long rbtree_api_add__wrong_node_type(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_first(&groot);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c
new file mode 100644
index 000000000000..bf3cba115897
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+private(A) struct bpf_rb_root groot2 __contains(node_data, node);
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_add(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_rbtree_add(&groot, &n->node, less);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_remove(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_rbtree_remove(&groot, &n->node);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_first(void *ctx)
+{
+ bpf_rbtree_first(&groot);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove node input must be non-owning ref")
+long rbtree_api_remove_unadded_node(void *ctx)
+{
+ struct node_data *n, *m;
+ struct bpf_rb_node *res;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m) {
+ bpf_obj_drop(n);
+ return 1;
+ }
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+
+ /* This remove should pass verifier */
+ res = bpf_rbtree_remove(&groot, &n->node);
+ n = container_of(res, struct node_data, node);
+
+ /* This remove shouldn't, m isn't in an rbtree */
+ res = bpf_rbtree_remove(&groot, &m->node);
+ m = container_of(res, struct node_data, node);
+ bpf_spin_unlock(&glock);
+
+ if (n)
+ bpf_obj_drop(n);
+ if (m)
+ bpf_obj_drop(m);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=2 alloc_insn=11")
+long rbtree_api_remove_no_drop(void *ctx)
+{
+ struct bpf_rb_node *res;
+ struct node_data *n;
+
+ bpf_spin_lock(&glock);
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto unlock_err;
+
+ res = bpf_rbtree_remove(&groot, res);
+
+ n = container_of(res, struct node_data, node);
+ bpf_spin_unlock(&glock);
+
+ /* bpf_obj_drop(n) is missing here */
+ return 0;
+
+unlock_err:
+ bpf_spin_unlock(&glock);
+ return 1;
+}
+
+SEC("?tc")
+__failure __msg("arg#1 expected pointer to allocated object")
+long rbtree_api_add_to_multiple_trees(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+
+ /* This add should fail since n already in groot's tree */
+ bpf_rbtree_add(&groot2, &n->node, less);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove node input must be non-owning ref")
+long rbtree_api_add_release_unlock_escape(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+ /* After add() in previous critical section, n should be
+ * release_on_unlock and released after previous spin_unlock,
+ * so should not be possible to use it here
+ */
+ bpf_rbtree_remove(&groot, &n->node);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove node input must be non-owning ref")
+long rbtree_api_release_aliasing(void *ctx)
+{
+ struct node_data *n, *m, *o;
+ struct bpf_rb_node *res;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+
+ /* m and o point to the same node,
+ * but verifier doesn't know this
+ */
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ return 1;
+ o = container_of(res, struct node_data, node);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ return 1;
+ m = container_of(res, struct node_data, node);
+
+ bpf_rbtree_remove(&groot, &m->node);
+ /* This second remove shouldn't be possible. Retval of previous
+ * remove returns owning reference to m, which is the same
+ * node o's non-owning ref is pointing at
+ *
+ * In order to preserve property
+ * * owning ref must not be in rbtree
+ * * non-owning ref must be in rbtree
+ *
+ * o's ref must be invalidated after previous remove. Otherwise
+ * we'd have non-owning ref to node that isn't in rbtree, and
+ * verifier wouldn't be able to use type system to prevent remove
+ * of ref that already isn't in any tree. Would have to do runtime
+ * checks in that case.
+ */
+ bpf_rbtree_remove(&groot, &o->node);
+
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove node input must be non-owning ref")
+long rbtree_api_first_release_unlock_escape(void *ctx)
+{
+ struct bpf_rb_node *res;
+ struct node_data *n;
+
+ bpf_spin_lock(&glock);
+ res = bpf_rbtree_first(&groot);
+ if (res)
+ n = container_of(res, struct node_data, node);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+ /* After first() in previous critical section, n should be
+ * release_on_unlock and released after previous spin_unlock,
+ * so should not be possible to use it here
+ */
+ bpf_rbtree_remove(&groot, &n->node);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+static bool less__bad_fn_call_add(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ bpf_rbtree_add(&groot, &node_a->node, less);
+
+ return node_a->key < node_b->key;
+}
+
+static bool less__bad_fn_call_remove(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ bpf_rbtree_remove(&groot, &node_a->node);
+
+ return node_a->key < node_b->key;
+}
+
+static bool less__bad_fn_call_first_unlock_after(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ bpf_rbtree_first(&groot);
+ bpf_spin_unlock(&glock);
+
+ return node_a->key < node_b->key;
+}
+
+static __always_inline
+long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, cb);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("arg#1 expected pointer to allocated object")
+long rbtree_api_add_bad_cb_bad_fn_call_add(void *ctx)
+{
+ return add_with_cb(less__bad_fn_call_add);
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove not allowed in rbtree cb")
+long rbtree_api_add_bad_cb_bad_fn_call_remove(void *ctx)
+{
+ return add_with_cb(less__bad_fn_call_remove);
+}
+
+SEC("?tc")
+__failure __msg("can't spin_{lock,unlock} in rbtree cb")
+long rbtree_api_add_bad_cb_bad_fn_call_first_unlock_after(void *ctx)
+{
+ return add_with_cb(less__bad_fn_call_first_unlock_after);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c
index 9523333b8905..7a438600ae98 100644
--- a/tools/testing/selftests/bpf/progs/setget_sockopt.c
+++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c
@@ -22,6 +22,7 @@ int nr_active;
int nr_connect;
int nr_binddev;
int nr_socket_post_create;
+int nr_fin_wait1;
struct sockopt_test {
int opt;
@@ -386,6 +387,13 @@ int skops_sockopt(struct bpf_sock_ops *skops)
nr_passive += !(bpf_test_sockopt(skops, sk) ||
test_tcp_maxseg(skops, sk) ||
test_tcp_saved_syn(skops, sk));
+ bpf_sock_ops_cb_flags_set(skops,
+ skops->bpf_sock_ops_cb_flags |
+ BPF_SOCK_OPS_STATE_CB_FLAG);
+ break;
+ case BPF_SOCK_OPS_STATE_CB:
+ if (skops->args[1] == BPF_TCP_CLOSE_WAIT)
+ nr_fin_wait1 += !bpf_test_sockopt(skops, sk);
break;
}
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index 753718595c26..e562be6356f3 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -135,7 +135,7 @@ struct strobe_value_loc {
* tpidr_el0 for aarch64).
* TLS_IMM_EXEC: absolute address of GOT entry containing offset
* from thread pointer;
- * TLS_GENERAL_DYN: absolute addres of double GOT entry
+ * TLS_GENERAL_DYN: absolute address of double GOT entry
* containing tls_index_t struct;
*/
int64_t offset;
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
index 1b47b94dbca0..f19d54eda4f1 100644
--- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
@@ -5,6 +5,7 @@
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
#include "task_kfunc_common.h"
char _license[] SEC("license") = "GPL";
@@ -27,6 +28,7 @@ static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *ta
}
SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -44,6 +46,7 @@ int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_f
}
SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 pointer type STRUCT task_struct must point")
int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags;
@@ -56,6 +59,7 @@ int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
}
SEC("kretprobe/free_task")
+__failure __msg("reg type unsupported for arg#0 function")
int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -68,6 +72,7 @@ int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64
}
SEC("tp_btf/task_newtask")
+__failure __msg("R1 must be referenced or trusted")
int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -81,6 +86,7 @@ int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 cl
SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -95,6 +101,7 @@ int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
}
SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -107,6 +114,7 @@ int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_
}
SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 expected pointer to map value")
int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_flags)
{
struct task_struct *kptr;
@@ -122,6 +130,7 @@ int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_
}
SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 expected pointer to map value")
int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clone_flags)
{
struct task_struct *kptr, *acquired;
@@ -140,6 +149,7 @@ int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clo
}
SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 expected pointer to map value")
int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags)
{
struct task_struct *kptr;
@@ -155,6 +165,7 @@ int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags)
}
SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags)
{
struct task_struct *kptr;
@@ -174,6 +185,7 @@ int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_fla
}
SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flags)
{
struct task_struct *kptr;
@@ -193,6 +205,7 @@ int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flag
}
SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket")
int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags)
{
struct __tasks_kfunc_map_value *v;
@@ -208,6 +221,7 @@ int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_f
}
SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 pointer type STRUCT task_struct must point")
int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired = (struct task_struct *)&clone_flags;
@@ -219,6 +233,7 @@ int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
}
SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket")
int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
{
struct __tasks_kfunc_map_value local, *v;
@@ -251,6 +266,7 @@ int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
}
SEC("tp_btf/task_newtask")
+__failure __msg("release kernel function bpf_task_release expects")
int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags)
{
/* Cannot release trusted task pointer which was not acquired. */
@@ -260,6 +276,7 @@ int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_
}
SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket")
int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -273,6 +290,7 @@ int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 cl
}
SEC("lsm/task_free")
+__failure __msg("reg type unsupported for arg#0 function")
int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task)
{
struct task_struct *acquired;
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index a1e45fec8938..3b5dc34d23e9 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -92,18 +92,19 @@ int handle_uretprobe_byname(struct pt_regs *ctx)
}
SEC("uprobe")
-int handle_uprobe_byname2(struct pt_regs *ctx)
+int BPF_UPROBE(handle_uprobe_byname2, const char *pathname, const char *mode)
{
- unsigned int size = PT_REGS_PARM1(ctx);
+ char mode_buf[2] = {};
- /* verify malloc size */
- if (size == 1)
+ /* verify fopen mode */
+ bpf_probe_read_user(mode_buf, sizeof(mode_buf), mode);
+ if (mode_buf[0] == 'r' && mode_buf[1] == 0)
uprobe_byname2_res = 7;
return 0;
}
SEC("uretprobe")
-int handle_uretprobe_byname2(struct pt_regs *ctx)
+int BPF_URETPROBE(handle_uretprobe_byname2, void *ret)
{
uretprobe_byname2_res = 8;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
index 227e85e85dda..9fc603c9d673 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
@@ -34,6 +34,11 @@ __be16 dport = 0;
int test_exist_lookup = -ENOENT;
u32 test_exist_lookup_mark = 0;
+enum nf_nat_manip_type___local {
+ NF_NAT_MANIP_SRC___local,
+ NF_NAT_MANIP_DST___local
+};
+
struct nf_conn;
struct bpf_ct_opts___local {
@@ -58,7 +63,7 @@ int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
int bpf_ct_set_nat_info(struct nf_conn *, union nf_inet_addr *,
- int port, enum nf_nat_manip_type) __ksym;
+ int port, enum nf_nat_manip_type___local) __ksym;
static __always_inline void
nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
@@ -157,10 +162,10 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
/* snat */
saddr.ip = bpf_get_prandom_u32();
- bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC);
+ bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
/* dnat */
daddr.ip = bpf_get_prandom_u32();
- bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST);
+ bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
ct_ins = bpf_ct_insert_entry(ct);
if (ct_ins) {
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index 2833ad722cb7..66b304982245 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -600,7 +600,7 @@ static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
return TC_ACT_SHOT;
}
- /* Skip the remainig next hops (may be zero). */
+ /* Skip the remaining next hops (may be zero). */
return skip_next_hops(pkt, encap->unigue.hop_count -
encap->unigue.next_hop - 1);
}
@@ -610,8 +610,8 @@ static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
*
* fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321)
*
- * clang will substitue a costant for sizeof, which allows the verifier
- * to track it's value. Based on this, it can figure out the constant
+ * clang will substitute a constant for sizeof, which allows the verifier
+ * to track its value. Based on this, it can figure out the constant
* return value, and calling code works while still being "generic" to
* IPv4 and IPv6.
*/
diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c
index 7b42dad187b8..23970a20b324 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func1.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func1.c
@@ -3,10 +3,9 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
-#ifndef MAX_STACK
#define MAX_STACK (512 - 3 * 32 + 8)
-#endif
static __attribute__ ((noinline))
int f0(int var, struct __sk_buff *skb)
@@ -39,7 +38,8 @@ int f3(int val, struct __sk_buff *skb, int var)
}
SEC("tc")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("combined stack size of 4 calls is 544")
+int global_func1(struct __sk_buff *skb)
{
return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func10.c b/tools/testing/selftests/bpf/progs/test_global_func10.c
index 97b7031d0e22..98327bdbbfd2 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func10.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func10.c
@@ -2,6 +2,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct Small {
int x;
@@ -21,7 +22,8 @@ __noinline int foo(const struct Big *big)
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("invalid indirect read from stack")
+int global_func10(struct __sk_buff *skb)
{
const struct Small small = {.x = skb->len };
diff --git a/tools/testing/selftests/bpf/progs/test_global_func11.c b/tools/testing/selftests/bpf/progs/test_global_func11.c
index ef5277d982d9..283e036dc401 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func11.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func11.c
@@ -2,6 +2,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct S {
int x;
@@ -13,7 +14,8 @@ __noinline int foo(const struct S *s)
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("Caller passes invalid args into func#1")
+int global_func11(struct __sk_buff *skb)
{
return foo((const void *)skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func12.c b/tools/testing/selftests/bpf/progs/test_global_func12.c
index 62343527cc59..7f159d83c6f6 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func12.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func12.c
@@ -2,6 +2,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct S {
int x;
@@ -13,7 +14,8 @@ __noinline int foo(const struct S *s)
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("invalid mem access 'mem_or_null'")
+int global_func12(struct __sk_buff *skb)
{
const struct S s = {.x = skb->len };
diff --git a/tools/testing/selftests/bpf/progs/test_global_func13.c b/tools/testing/selftests/bpf/progs/test_global_func13.c
index ff8897c1ac22..02ea80da75b5 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func13.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func13.c
@@ -2,6 +2,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct S {
int x;
@@ -16,7 +17,8 @@ __noinline int foo(const struct S *s)
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("Caller passes invalid args into func#1")
+int global_func13(struct __sk_buff *skb)
{
const struct S *s = (const struct S *)(0xbedabeda);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func14.c b/tools/testing/selftests/bpf/progs/test_global_func14.c
index 698c77199ebf..33b7d5efd7b2 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func14.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func14.c
@@ -2,6 +2,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct S;
@@ -14,7 +15,8 @@ __noinline int foo(const struct S *s)
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("reference type('FWD S') size cannot be determined")
+int global_func14(struct __sk_buff *skb)
{
return foo(NULL);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c
index c19c435988d5..b512d6a6c75e 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func15.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func15.c
@@ -2,6 +2,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__noinline int foo(unsigned int *v)
{
@@ -12,7 +13,8 @@ __noinline int foo(unsigned int *v)
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("At program exit the register R0 has value")
+int global_func15(struct __sk_buff *skb)
{
unsigned int v = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func16.c b/tools/testing/selftests/bpf/progs/test_global_func16.c
index 0312d1e8d8c0..e7206304632e 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func16.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func16.c
@@ -2,6 +2,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__noinline int foo(int (*arr)[10])
{
@@ -12,7 +13,8 @@ __noinline int foo(int (*arr)[10])
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("invalid indirect read from stack")
+int global_func16(struct __sk_buff *skb)
{
int array[10];
diff --git a/tools/testing/selftests/bpf/progs/test_global_func17.c b/tools/testing/selftests/bpf/progs/test_global_func17.c
index 2b8b9b8ba018..a32e11c7d933 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func17.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func17.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__noinline int foo(int *p)
{
@@ -10,7 +11,8 @@ __noinline int foo(int *p)
const volatile int i;
SEC("tc")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("Caller passes invalid args into func#1")
+int global_func17(struct __sk_buff *skb)
{
return foo((int *)&i);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func2.c b/tools/testing/selftests/bpf/progs/test_global_func2.c
index 2c18d82923a2..3dce97fb52a4 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func2.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func2.c
@@ -1,4 +1,45 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
#define MAX_STACK (512 - 3 * 32)
-#include "test_global_func1.c"
+
+static __attribute__ ((noinline))
+int f0(int var, struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+__attribute__ ((noinline))
+int f1(struct __sk_buff *skb)
+{
+ volatile char buf[MAX_STACK] = {};
+
+ return f0(0, skb) + skb->len;
+}
+
+int f3(int, struct __sk_buff *skb, int);
+
+__attribute__ ((noinline))
+int f2(int val, struct __sk_buff *skb)
+{
+ return f1(skb) + f3(val, skb, 1);
+}
+
+__attribute__ ((noinline))
+int f3(int val, struct __sk_buff *skb, int var)
+{
+ volatile char buf[MAX_STACK] = {};
+
+ return skb->ifindex * val * var;
+}
+
+SEC("tc")
+__success
+int global_func2(struct __sk_buff *skb)
+{
+ return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func3.c b/tools/testing/selftests/bpf/progs/test_global_func3.c
index 01bf8275dfd6..142b682d3c2f 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func3.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func3.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
@@ -46,20 +47,15 @@ int f7(struct __sk_buff *skb)
return f6(skb);
}
-#ifndef NO_FN8
__attribute__ ((noinline))
int f8(struct __sk_buff *skb)
{
return f7(skb);
}
-#endif
SEC("tc")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("the call stack of 8 frames")
+int global_func3(struct __sk_buff *skb)
{
-#ifndef NO_FN8
return f8(skb);
-#else
- return f7(skb);
-#endif
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func4.c b/tools/testing/selftests/bpf/progs/test_global_func4.c
index 610f75edf276..1733d87ad3f3 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func4.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func4.c
@@ -1,4 +1,55 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
-#define NO_FN8
-#include "test_global_func3.c"
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__attribute__ ((noinline))
+int f1(struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+__attribute__ ((noinline))
+int f2(int val, struct __sk_buff *skb)
+{
+ return f1(skb) + val;
+}
+
+__attribute__ ((noinline))
+int f3(int val, struct __sk_buff *skb, int var)
+{
+ return f2(var, skb) + val;
+}
+
+__attribute__ ((noinline))
+int f4(struct __sk_buff *skb)
+{
+ return f3(1, skb, 2);
+}
+
+__attribute__ ((noinline))
+int f5(struct __sk_buff *skb)
+{
+ return f4(skb);
+}
+
+__attribute__ ((noinline))
+int f6(struct __sk_buff *skb)
+{
+ return f5(skb);
+}
+
+__attribute__ ((noinline))
+int f7(struct __sk_buff *skb)
+{
+ return f6(skb);
+}
+
+SEC("tc")
+__success
+int global_func4(struct __sk_buff *skb)
+{
+ return f7(skb);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func5.c b/tools/testing/selftests/bpf/progs/test_global_func5.c
index 9248d03e0d06..cc55aedaf82d 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func5.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func5.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
@@ -25,7 +26,8 @@ int f3(int val, struct __sk_buff *skb)
}
SEC("tc")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("expected pointer to ctx, but got PTR")
+int global_func5(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func6.c b/tools/testing/selftests/bpf/progs/test_global_func6.c
index af8c78bdfb25..46c38c8f2cf0 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func6.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func6.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
@@ -25,7 +26,8 @@ int f3(int val, struct __sk_buff *skb)
}
SEC("tc")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("modified ctx ptr R2")
+int global_func6(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func7.c b/tools/testing/selftests/bpf/progs/test_global_func7.c
index 6cb8e2f5254c..f182febfde3c 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func7.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func7.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
void foo(struct __sk_buff *skb)
@@ -11,7 +12,8 @@ void foo(struct __sk_buff *skb)
}
SEC("tc")
-int test_cls(struct __sk_buff *skb)
+__failure __msg("foo() doesn't return scalar")
+int global_func7(struct __sk_buff *skb)
{
foo(skb);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func8.c b/tools/testing/selftests/bpf/progs/test_global_func8.c
index d55a6544b1ab..9b9c57fa2dd3 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func8.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func8.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__noinline int foo(struct __sk_buff *skb)
{
@@ -10,7 +11,8 @@ __noinline int foo(struct __sk_buff *skb)
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__success
+int global_func8(struct __sk_buff *skb)
{
if (!foo(skb))
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func9.c b/tools/testing/selftests/bpf/progs/test_global_func9.c
index bd233ddede98..1f2cb0159b8d 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func9.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func9.c
@@ -2,6 +2,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct S {
int x;
@@ -74,7 +75,8 @@ __noinline int quuz(int **p)
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__success
+int global_func9(struct __sk_buff *skb)
{
int result = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
new file mode 100644
index 000000000000..7faa8eef0598
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+static long stack[256];
+
+/*
+ * KPROBE contexts
+ */
+
+__weak int kprobe_typedef_ctx_subprog(bpf_user_pt_regs_t *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?kprobe")
+__success
+int kprobe_typedef_ctx(void *ctx)
+{
+ return kprobe_typedef_ctx_subprog(ctx);
+}
+
+#define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL)))
+
+__weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx)
+{
+ return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?kprobe")
+__success
+int kprobe_resolved_ctx(void *ctx)
+{
+ return kprobe_struct_ctx_subprog(ctx);
+}
+
+/* this is current hack to make this work on old kernels */
+struct bpf_user_pt_regs_t {};
+
+__weak int kprobe_workaround_ctx_subprog(struct bpf_user_pt_regs_t *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?kprobe")
+__success
+int kprobe_workaround_ctx(void *ctx)
+{
+ return kprobe_workaround_ctx_subprog(ctx);
+}
+
+/*
+ * RAW_TRACEPOINT contexts
+ */
+
+__weak int raw_tp_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?raw_tp")
+__success
+int raw_tp_ctx(void *ctx)
+{
+ return raw_tp_ctx_subprog(ctx);
+}
+
+/*
+ * RAW_TRACEPOINT_WRITABLE contexts
+ */
+
+__weak int raw_tp_writable_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?raw_tp")
+__success
+int raw_tp_writable_ctx(void *ctx)
+{
+ return raw_tp_writable_ctx_subprog(ctx);
+}
+
+/*
+ * PERF_EVENT contexts
+ */
+
+__weak int perf_event_ctx_subprog(struct bpf_perf_event_data *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?perf_event")
+__success
+int perf_event_ctx(void *ctx)
+{
+ return perf_event_ctx_subprog(ctx);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
index f4a8250329b2..2fbef3cc7ad8 100644
--- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
@@ -10,6 +10,7 @@
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
extern void bpf_key_put(struct bpf_key *key) __ksym;
@@ -19,6 +20,7 @@ extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
} ringbuf SEC(".maps");
struct {
@@ -33,6 +35,7 @@ int err, pid;
char _license[] SEC("license") = "GPL";
SEC("?lsm.s/bpf")
+__failure __msg("cannot pass in dynptr at an offset=-8")
int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size)
{
unsigned long val;
@@ -42,6 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size)
}
SEC("?lsm.s/bpf")
+__failure __msg("arg#0 expected pointer to stack or dynptr_ptr")
int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size)
{
unsigned long val;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
index 98c6493d9b91..21b19b758c4e 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_assign.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
@@ -16,6 +16,16 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#if defined(IPROUTE2_HAVE_LIBBPF)
+/* Use a new-style map definition. */
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __type(key, int);
+ __type(value, __u64);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+ __uint(max_entries, 1);
+} server_map SEC(".maps");
+#else
/* Pin map under /sys/fs/bpf/tc/globals/<map name> */
#define PIN_GLOBAL_NS 2
@@ -35,6 +45,7 @@ struct {
.max_elem = 1,
.pinning = PIN_GLOBAL_NS,
};
+#endif
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
new file mode 100644
index 000000000000..dcf46adfda04
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define IPROUTE2_HAVE_LIBBPF
+#include "test_sk_assign.c"
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs.c b/tools/testing/selftests/bpf/progs/test_subprogs.c
index f8e9256cf18d..a8d602d7c88a 100644
--- a/tools/testing/selftests/bpf/progs/test_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/test_subprogs.c
@@ -47,7 +47,7 @@ static __noinline int sub5(int v)
return sub1(v) - 1; /* compensates sub1()'s + 1 */
}
-/* unfortunately verifier rejects `struct task_struct *t` as an unkown pointer
+/* unfortunately verifier rejects `struct task_struct *t` as an unknown pointer
* type, so we need to accept pointer as integer and then cast it inside the
* function
*/
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
index a0e7762b1e5a..e6e678aa9874 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -38,6 +38,10 @@ static const int cfg_udp_src = 20000;
#define VXLAN_FLAGS 0x8
#define VXLAN_VNI 1
+#ifndef NEXTHDR_DEST
+#define NEXTHDR_DEST 60
+#endif
+
/* MPLS label 1000 with S bit (last label) set and ttl of 255. */
static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 |
MPLS_LS_S_MASK | 0xff);
@@ -363,6 +367,61 @@ static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
return TC_ACT_OK;
}
+static int encap_ipv6_ipip6(struct __sk_buff *skb)
+{
+ struct iphdr iph_inner;
+ struct v6hdr h_outer;
+ struct tcphdr tcph;
+ struct ethhdr eth;
+ __u64 flags;
+ int olen;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
+ sizeof(iph_inner)) < 0)
+ return TC_ACT_OK;
+
+ /* filter only packets we want */
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + (iph_inner.ihl << 2),
+ &tcph, sizeof(tcph)) < 0)
+ return TC_ACT_OK;
+
+ if (tcph.dest != __bpf_constant_htons(cfg_port))
+ return TC_ACT_OK;
+
+ olen = sizeof(h_outer.ip);
+
+ flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6;
+
+ /* add room between mac and network header */
+ if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
+ return TC_ACT_SHOT;
+
+ /* prepare new outer network header */
+ memset(&h_outer.ip, 0, sizeof(h_outer.ip));
+ h_outer.ip.version = 6;
+ h_outer.ip.hop_limit = iph_inner.ttl;
+ h_outer.ip.saddr.s6_addr[1] = 0xfd;
+ h_outer.ip.saddr.s6_addr[15] = 1;
+ h_outer.ip.daddr.s6_addr[1] = 0xfd;
+ h_outer.ip.daddr.s6_addr[15] = 2;
+ h_outer.ip.payload_len = iph_inner.tot_len;
+ h_outer.ip.nexthdr = IPPROTO_IPIP;
+
+ /* store new outer network header */
+ if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
+ BPF_F_INVALIDATE_HASH) < 0)
+ return TC_ACT_SHOT;
+
+ /* update eth->h_proto */
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)) < 0)
+ return TC_ACT_SHOT;
+ eth.h_proto = bpf_htons(ETH_P_IPV6);
+ if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0) < 0)
+ return TC_ACT_SHOT;
+
+ return TC_ACT_OK;
+}
+
static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
__u16 l2_proto)
{
@@ -461,6 +520,15 @@ int __encap_ip6tnl_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("encap_ipip6_none")
+int __encap_ipip6_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv6_ipip6(skb);
+ else
+ return TC_ACT_OK;
+}
+
SEC("encap_ip6gre_none")
int __encap_ip6gre_none(struct __sk_buff *skb)
{
@@ -528,13 +596,33 @@ int __encap_ip6vxlan_eth(struct __sk_buff *skb)
static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
{
+ __u64 flags = BPF_F_ADJ_ROOM_FIXED_GSO;
+ struct ipv6_opt_hdr ip6_opt_hdr;
struct gre_hdr greh;
struct udphdr udph;
int olen = len;
switch (proto) {
case IPPROTO_IPIP:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4;
+ break;
case IPPROTO_IPV6:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6;
+ break;
+ case NEXTHDR_DEST:
+ if (bpf_skb_load_bytes(skb, off + len, &ip6_opt_hdr,
+ sizeof(ip6_opt_hdr)) < 0)
+ return TC_ACT_OK;
+ switch (ip6_opt_hdr.nexthdr) {
+ case IPPROTO_IPIP:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4;
+ break;
+ case IPPROTO_IPV6:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6;
+ break;
+ default:
+ return TC_ACT_OK;
+ }
break;
case IPPROTO_GRE:
olen += sizeof(struct gre_hdr);
@@ -569,8 +657,7 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
return TC_ACT_OK;
}
- if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC,
- BPF_F_ADJ_ROOM_FIXED_GSO))
+ if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC, flags))
return TC_ACT_SHOT;
return TC_ACT_OK;
diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index 98af55f0bcd3..508da4a23c4f 100644
--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
@@ -82,6 +82,27 @@ int gre_set_tunnel(struct __sk_buff *skb)
}
SEC("tc")
+int gre_set_tunnel_no_key(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_ZERO_CSUM_TX | BPF_F_SEQ_NUMBER |
+ BPF_F_NO_TUNNEL_KEY);
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("tc")
int gre_get_tunnel(struct __sk_buff *skb)
{
int ret;
diff --git a/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
index ab75522e2eeb..da4bf89d004c 100644
--- a/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
+++ b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
@@ -6,18 +6,22 @@
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
int uprobe_byname_parm1 = 0;
int uprobe_byname_ran = 0;
int uretprobe_byname_rc = 0;
+int uretprobe_byname_ret = 0;
int uretprobe_byname_ran = 0;
-size_t uprobe_byname2_parm1 = 0;
+u64 uprobe_byname2_parm1 = 0;
int uprobe_byname2_ran = 0;
-char *uretprobe_byname2_rc = NULL;
+u64 uretprobe_byname2_rc = 0;
int uretprobe_byname2_ran = 0;
int test_pid;
+int a[8];
+
/* This program cannot auto-attach, but that should not stop other
* programs from attaching.
*/
@@ -28,44 +32,84 @@ int handle_uprobe_noautoattach(struct pt_regs *ctx)
}
SEC("uprobe//proc/self/exe:autoattach_trigger_func")
-int handle_uprobe_byname(struct pt_regs *ctx)
+int BPF_UPROBE(handle_uprobe_byname
+ , int arg1
+ , int arg2
+ , int arg3
+#if FUNC_REG_ARG_CNT > 3
+ , int arg4
+#endif
+#if FUNC_REG_ARG_CNT > 4
+ , int arg5
+#endif
+#if FUNC_REG_ARG_CNT > 5
+ , int arg6
+#endif
+#if FUNC_REG_ARG_CNT > 6
+ , int arg7
+#endif
+#if FUNC_REG_ARG_CNT > 7
+ , int arg8
+#endif
+)
{
uprobe_byname_parm1 = PT_REGS_PARM1_CORE(ctx);
uprobe_byname_ran = 1;
+
+ a[0] = arg1;
+ a[1] = arg2;
+ a[2] = arg3;
+#if FUNC_REG_ARG_CNT > 3
+ a[3] = arg4;
+#endif
+#if FUNC_REG_ARG_CNT > 4
+ a[4] = arg5;
+#endif
+#if FUNC_REG_ARG_CNT > 5
+ a[5] = arg6;
+#endif
+#if FUNC_REG_ARG_CNT > 6
+ a[6] = arg7;
+#endif
+#if FUNC_REG_ARG_CNT > 7
+ a[7] = arg8;
+#endif
return 0;
}
SEC("uretprobe//proc/self/exe:autoattach_trigger_func")
-int handle_uretprobe_byname(struct pt_regs *ctx)
+int BPF_URETPROBE(handle_uretprobe_byname, int ret)
{
uretprobe_byname_rc = PT_REGS_RC_CORE(ctx);
+ uretprobe_byname_ret = ret;
uretprobe_byname_ran = 2;
+
return 0;
}
-SEC("uprobe/libc.so.6:malloc")
-int handle_uprobe_byname2(struct pt_regs *ctx)
+SEC("uprobe/libc.so.6:fopen")
+int BPF_UPROBE(handle_uprobe_byname2, const char *pathname, const char *mode)
{
int pid = bpf_get_current_pid_tgid() >> 32;
/* ignore irrelevant invocations */
if (test_pid != pid)
return 0;
- uprobe_byname2_parm1 = PT_REGS_PARM1_CORE(ctx);
+ uprobe_byname2_parm1 = (u64)(long)pathname;
uprobe_byname2_ran = 3;
return 0;
}
-SEC("uretprobe/libc.so.6:malloc")
-int handle_uretprobe_byname2(struct pt_regs *ctx)
+SEC("uretprobe/libc.so.6:fopen")
+int BPF_URETPROBE(handle_uretprobe_byname2, void *ret)
{
int pid = bpf_get_current_pid_tgid() >> 32;
/* ignore irrelevant invocations */
if (test_pid != pid)
return 0;
- uretprobe_byname2_rc = (char *)PT_REGS_RC_CORE(ctx);
+ uretprobe_byname2_rc = (u64)(long)ret;
uretprobe_byname2_ran = 4;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
index ce419304ff1f..7748cc23de8a 100644
--- a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
+++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
@@ -59,10 +59,14 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
if (!data_val)
return 0;
- bpf_probe_read(&value, sizeof(value), &attr->value);
-
- bpf_copy_from_user(data_val, sizeof(struct data),
- (void *)(unsigned long)value);
+ ret = bpf_probe_read_kernel(&value, sizeof(value), &attr->value);
+ if (ret)
+ return ret;
+
+ ret = bpf_copy_from_user(data_val, sizeof(struct data),
+ (void *)(unsigned long)value);
+ if (ret)
+ return ret;
if (data_val->data_len > sizeof(data_val->data))
return -EINVAL;
diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
index e9dfa0313d1b..4b8e37f7fd06 100644
--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c
+++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
@@ -42,7 +42,7 @@ int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
if (id != __NR_nanosleep)
return 0;
- ts = (void *)PT_REGS_PARM1_CORE(regs);
+ ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
@@ -60,7 +60,7 @@ int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
if (id != __NR_nanosleep)
return 0;
- ts = (void *)PT_REGS_PARM1_CORE(regs);
+ ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
index 53b64c999450..297c260fc364 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
@@ -9,6 +9,12 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp)
void *data = (void *)(long)xdp->data;
int data_len = bpf_xdp_get_buff_len(xdp);
int offset = 0;
+ /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
+#if defined(__TARGET_ARCH_s390)
+ int tailroom = 512;
+#else
+ int tailroom = 320;
+#endif
/* Data length determine test case */
@@ -20,7 +26,7 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp)
offset = 128;
} else if (data_len == 128) {
/* Max tail grow 3520 */
- offset = 4096 - 256 - 320 - data_len;
+ offset = 4096 - 256 - tailroom - data_len;
} else if (data_len == 9000) {
offset = 10;
} else if (data_len == 9001) {
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
index 134768f6b788..4ddcb6dfe500 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
@@ -98,7 +98,7 @@ bool parse_eth_frame(struct ethhdr *eth, void *data_end, struct parse_pkt *pkt)
return true;
}
-/* Hint, VLANs are choosen to hit network-byte-order issues */
+/* Hint, VLANs are chosen to hit network-byte-order issues */
#define TESTVLAN 4011 /* 0xFAB */
// #define TO_VLAN 4000 /* 0xFA0 (hint 0xOA0 = 160) */
@@ -195,7 +195,7 @@ int xdp_prognum2(struct xdp_md *ctx)
/* Moving Ethernet header, dest overlap with src, memmove handle this */
dest = data;
- dest+= VLAN_HDR_SZ;
+ dest += VLAN_HDR_SZ;
/*
* Notice: Taking over vlan_hdr->h_vlan_encapsulated_proto, by
* only moving two MAC addrs (12 bytes), not overwriting last 2 bytes
diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
index f3201dc69a60..03ee946c6bf7 100644
--- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
+++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
@@ -16,6 +16,7 @@ struct sample {
struct {
__uint(type, BPF_MAP_TYPE_USER_RINGBUF);
+ __uint(max_entries, 4096);
} user_ringbuf SEC(".maps");
struct {
@@ -39,7 +40,8 @@ bad_access1(struct bpf_dynptr *dynptr, void *context)
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to read before the pointer.
*/
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("negative offset dynptr_ptr ptr")
int user_ringbuf_callback_bad_access1(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, bad_access1, NULL, 0);
@@ -61,7 +63,8 @@ bad_access2(struct bpf_dynptr *dynptr, void *context)
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to read past the end of the pointer.
*/
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("dereference of modified dynptr_ptr ptr")
int user_ringbuf_callback_bad_access2(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, bad_access2, NULL, 0);
@@ -80,7 +83,8 @@ write_forbidden(struct bpf_dynptr *dynptr, void *context)
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to write to that pointer.
*/
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'dynptr_ptr'")
int user_ringbuf_callback_write_forbidden(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, write_forbidden, NULL, 0);
@@ -99,7 +103,8 @@ null_context_write(struct bpf_dynptr *dynptr, void *context)
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to write to that pointer.
*/
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
int user_ringbuf_callback_null_context_write(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, null_context_write, NULL, 0);
@@ -120,7 +125,8 @@ null_context_read(struct bpf_dynptr *dynptr, void *context)
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to write to that pointer.
*/
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
int user_ringbuf_callback_null_context_read(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, null_context_read, NULL, 0);
@@ -139,7 +145,8 @@ try_discard_dynptr(struct bpf_dynptr *dynptr, void *context)
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to read past the end of the pointer.
*/
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("cannot release unowned const bpf_dynptr")
int user_ringbuf_callback_discard_dynptr(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_discard_dynptr, NULL, 0);
@@ -158,7 +165,8 @@ try_submit_dynptr(struct bpf_dynptr *dynptr, void *context)
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to read past the end of the pointer.
*/
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("cannot release unowned const bpf_dynptr")
int user_ringbuf_callback_submit_dynptr(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_submit_dynptr, NULL, 0);
@@ -175,7 +183,8 @@ invalid_drain_callback_return(struct bpf_dynptr *dynptr, void *context)
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to write to that pointer.
*/
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("At callback return the register R0 has value")
int user_ringbuf_callback_invalid_return(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, invalid_drain_callback_return, NULL, 0);
@@ -197,14 +206,16 @@ try_reinit_dynptr_ringbuf(struct bpf_dynptr *dynptr, void *context)
return 0;
}
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("Dynptr has to be an uninitialized dynptr")
int user_ringbuf_callback_reinit_dynptr_mem(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_mem, NULL, 0);
return 0;
}
-SEC("?raw_tp/")
+SEC("?raw_tp")
+__failure __msg("Dynptr has to be an uninitialized dynptr")
int user_ringbuf_callback_reinit_dynptr_ringbuf(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_ringbuf, NULL, 0);
diff --git a/tools/testing/selftests/bpf/progs/xdp_features.c b/tools/testing/selftests/bpf/progs/xdp_features.c
new file mode 100644
index 000000000000..87c247d56f72
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_features.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/netdev.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/udp.h>
+#include <asm-generic/errno-base.h>
+
+#include "xdp_features.h"
+
+#define ipv6_addr_equal(a, b) ((a).s6_addr32[0] == (b).s6_addr32[0] && \
+ (a).s6_addr32[1] == (b).s6_addr32[1] && \
+ (a).s6_addr32[2] == (b).s6_addr32[2] && \
+ (a).s6_addr32[3] == (b).s6_addr32[3])
+
+struct net_device;
+struct bpf_prog;
+
+struct xdp_cpumap_stats {
+ unsigned int redirect;
+ unsigned int pass;
+ unsigned int drop;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} stats SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} dut_stats SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CPUMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_cpumap_val));
+ __uint(max_entries, 1);
+} cpu_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 1);
+} dev_map SEC(".maps");
+
+const volatile struct in6_addr tester_addr;
+const volatile struct in6_addr dut_addr;
+
+static __always_inline int
+xdp_process_echo_packet(struct xdp_md *xdp, bool dut)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eh = data;
+ struct tlv_hdr *tlv;
+ struct udphdr *uh;
+ __be16 port;
+ __u8 *cmd;
+
+ if (eh + 1 > (struct ethhdr *)data_end)
+ return -EINVAL;
+
+ if (eh->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *ih = (struct iphdr *)(eh + 1);
+ __be32 saddr = dut ? tester_addr.s6_addr32[3]
+ : dut_addr.s6_addr32[3];
+ __be32 daddr = dut ? dut_addr.s6_addr32[3]
+ : tester_addr.s6_addr32[3];
+
+ ih = (struct iphdr *)(eh + 1);
+ if (ih + 1 > (struct iphdr *)data_end)
+ return -EINVAL;
+
+ if (saddr != ih->saddr)
+ return -EINVAL;
+
+ if (daddr != ih->daddr)
+ return -EINVAL;
+
+ if (ih->protocol != IPPROTO_UDP)
+ return -EINVAL;
+
+ uh = (struct udphdr *)(ih + 1);
+ } else if (eh->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct in6_addr saddr = dut ? tester_addr : dut_addr;
+ struct in6_addr daddr = dut ? dut_addr : tester_addr;
+ struct ipv6hdr *ih6 = (struct ipv6hdr *)(eh + 1);
+
+ if (ih6 + 1 > (struct ipv6hdr *)data_end)
+ return -EINVAL;
+
+ if (!ipv6_addr_equal(saddr, ih6->saddr))
+ return -EINVAL;
+
+ if (!ipv6_addr_equal(daddr, ih6->daddr))
+ return -EINVAL;
+
+ if (ih6->nexthdr != IPPROTO_UDP)
+ return -EINVAL;
+
+ uh = (struct udphdr *)(ih6 + 1);
+ } else {
+ return -EINVAL;
+ }
+
+ if (uh + 1 > (struct udphdr *)data_end)
+ return -EINVAL;
+
+ port = dut ? uh->dest : uh->source;
+ if (port != bpf_htons(DUT_ECHO_PORT))
+ return -EINVAL;
+
+ tlv = (struct tlv_hdr *)(uh + 1);
+ if (tlv + 1 > data_end)
+ return -EINVAL;
+
+ return bpf_htons(tlv->type) == CMD_ECHO ? 0 : -EINVAL;
+}
+
+static __always_inline int
+xdp_update_stats(struct xdp_md *xdp, bool tx, bool dut)
+{
+ __u32 *val, key = 0;
+
+ if (xdp_process_echo_packet(xdp, tx))
+ return -EINVAL;
+
+ if (dut)
+ val = bpf_map_lookup_elem(&dut_stats, &key);
+ else
+ val = bpf_map_lookup_elem(&stats, &key);
+
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return 0;
+}
+
+/* Tester */
+
+SEC("xdp")
+int xdp_tester_check_tx(struct xdp_md *xdp)
+{
+ xdp_update_stats(xdp, true, false);
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_tester_check_rx(struct xdp_md *xdp)
+{
+ xdp_update_stats(xdp, false, false);
+
+ return XDP_PASS;
+}
+
+/* DUT */
+
+SEC("xdp")
+int xdp_do_pass(struct xdp_md *xdp)
+{
+ xdp_update_stats(xdp, true, true);
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_do_drop(struct xdp_md *xdp)
+{
+ if (xdp_update_stats(xdp, true, true))
+ return XDP_PASS;
+
+ return XDP_DROP;
+}
+
+SEC("xdp")
+int xdp_do_aborted(struct xdp_md *xdp)
+{
+ if (xdp_process_echo_packet(xdp, true))
+ return XDP_PASS;
+
+ return XDP_ABORTED;
+}
+
+SEC("xdp")
+int xdp_do_tx(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eh = data;
+ __u8 tmp_mac[ETH_ALEN];
+
+ if (xdp_update_stats(xdp, true, true))
+ return XDP_PASS;
+
+ __builtin_memcpy(tmp_mac, eh->h_source, ETH_ALEN);
+ __builtin_memcpy(eh->h_source, eh->h_dest, ETH_ALEN);
+ __builtin_memcpy(eh->h_dest, tmp_mac, ETH_ALEN);
+
+ return XDP_TX;
+}
+
+SEC("xdp")
+int xdp_do_redirect(struct xdp_md *xdp)
+{
+ if (xdp_process_echo_packet(xdp, true))
+ return XDP_PASS;
+
+ return bpf_redirect_map(&cpu_map, 0, 0);
+}
+
+SEC("tp_btf/xdp_exception")
+int BPF_PROG(xdp_exception, const struct net_device *dev,
+ const struct bpf_prog *xdp, __u32 act)
+{
+ __u32 *val, key = 0;
+
+ val = bpf_map_lookup_elem(&dut_stats, &key);
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return 0;
+}
+
+SEC("tp_btf/xdp_cpumap_kthread")
+int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed,
+ unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
+{
+ __u32 *val, key = 0;
+
+ val = bpf_map_lookup_elem(&dut_stats, &key);
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return 0;
+}
+
+SEC("xdp/cpumap")
+int xdp_do_redirect_cpumap(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eh = data;
+ __u8 tmp_mac[ETH_ALEN];
+
+ if (xdp_process_echo_packet(xdp, true))
+ return XDP_PASS;
+
+ __builtin_memcpy(tmp_mac, eh->h_source, ETH_ALEN);
+ __builtin_memcpy(eh->h_source, eh->h_dest, ETH_ALEN);
+ __builtin_memcpy(eh->h_dest, tmp_mac, ETH_ALEN);
+
+ return bpf_redirect_map(&dev_map, 0, 0);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
new file mode 100644
index 000000000000..4c55b4d79d3d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "xdp_metadata.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, 256);
+ __type(key, __u32);
+ __type(value, __u32);
+} xsk SEC(".maps");
+
+extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
+ __u64 *timestamp) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx,
+ __u32 *hash) __ksym;
+
+SEC("xdp")
+int rx(struct xdp_md *ctx)
+{
+ void *data, *data_meta, *data_end;
+ struct ipv6hdr *ip6h = NULL;
+ struct ethhdr *eth = NULL;
+ struct udphdr *udp = NULL;
+ struct iphdr *iph = NULL;
+ struct xdp_meta *meta;
+ int ret;
+
+ data = (void *)(long)ctx->data;
+ data_end = (void *)(long)ctx->data_end;
+ eth = data;
+ if (eth + 1 < data_end) {
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ iph = (void *)(eth + 1);
+ if (iph + 1 < data_end && iph->protocol == IPPROTO_UDP)
+ udp = (void *)(iph + 1);
+ }
+ if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ ip6h = (void *)(eth + 1);
+ if (ip6h + 1 < data_end && ip6h->nexthdr == IPPROTO_UDP)
+ udp = (void *)(ip6h + 1);
+ }
+ if (udp && udp + 1 > data_end)
+ udp = NULL;
+ }
+
+ if (!udp)
+ return XDP_PASS;
+
+ if (udp->dest != bpf_htons(9091))
+ return XDP_PASS;
+
+ bpf_printk("forwarding UDP:9091 to AF_XDP");
+
+ ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
+ if (ret != 0) {
+ bpf_printk("bpf_xdp_adjust_meta returned %d", ret);
+ return XDP_PASS;
+ }
+
+ data = (void *)(long)ctx->data;
+ data_meta = (void *)(long)ctx->data_meta;
+ meta = data_meta;
+
+ if (meta + 1 > data) {
+ bpf_printk("bpf_xdp_adjust_meta doesn't appear to work");
+ return XDP_PASS;
+ }
+
+ if (!bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp))
+ bpf_printk("populated rx_timestamp with %llu", meta->rx_timestamp);
+ else
+ meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */
+
+ if (!bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash))
+ bpf_printk("populated rx_hash with %u", meta->rx_hash);
+ else
+ meta->rx_hash = 0; /* Used by AF_XDP as not avail signal */
+
+ return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata.c b/tools/testing/selftests/bpf/progs/xdp_metadata.c
new file mode 100644
index 000000000000..77678b034389
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_metadata.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "xdp_metadata.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, 4);
+ __type(key, __u32);
+ __type(value, __u32);
+} xsk SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} prog_arr SEC(".maps");
+
+extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
+ __u64 *timestamp) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx,
+ __u32 *hash) __ksym;
+
+SEC("xdp")
+int rx(struct xdp_md *ctx)
+{
+ void *data, *data_meta;
+ struct xdp_meta *meta;
+ u64 timestamp = -1;
+ int ret;
+
+ /* Reserve enough for all custom metadata. */
+
+ ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
+ if (ret != 0)
+ return XDP_DROP;
+
+ data = (void *)(long)ctx->data;
+ data_meta = (void *)(long)ctx->data_meta;
+
+ if (data_meta + sizeof(struct xdp_meta) > data)
+ return XDP_DROP;
+
+ meta = data_meta;
+
+ /* Export metadata. */
+
+ /* We expect veth bpf_xdp_metadata_rx_timestamp to return 0 HW
+ * timestamp, so put some non-zero value into AF_XDP frame for
+ * the userspace.
+ */
+ bpf_xdp_metadata_rx_timestamp(ctx, &timestamp);
+ if (timestamp == 0)
+ meta->rx_timestamp = 1;
+
+ bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash);
+
+ return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata2.c b/tools/testing/selftests/bpf/progs/xdp_metadata2.c
new file mode 100644
index 000000000000..cf69d05451c3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_metadata2.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "xdp_metadata.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx,
+ __u32 *hash) __ksym;
+
+int called;
+
+SEC("freplace/rx")
+int freplace_rx(struct xdp_md *ctx)
+{
+ u32 hash = 0;
+ /* Call _any_ metadata function to make sure we don't crash. */
+ bpf_xdp_metadata_rx_hash(ctx, &hash);
+ called++;
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
index 736686e903f6..07d786329105 100644
--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -310,7 +310,7 @@ static __always_inline void values_get_tcpipopts(__u16 *mss, __u8 *wscale,
static __always_inline void values_inc_synacks(void)
{
__u32 key = 1;
- __u32 *value;
+ __u64 *value;
value = bpf_map_lookup_elem(&values, &key);
if (value)
diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
new file mode 100644
index 000000000000..744a01d0e57d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Intel */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+} xsk SEC(".maps");
+
+static unsigned int idx;
+
+SEC("xdp") int xsk_def_prog(struct xdp_md *xdp)
+{
+ return bpf_redirect_map(&xsk, 0, XDP_DROP);
+}
+
+SEC("xdp") int xsk_xdp_drop(struct xdp_md *xdp)
+{
+ /* Drop every other packet */
+ if (idx++ % 2)
+ return XDP_DROP;
+
+ return bpf_redirect_map(&xsk, 0, XDP_DROP);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp
index 0bd9990e83fa..f4936834f76f 100644
--- a/tools/testing/selftests/bpf/test_cpp.cpp
+++ b/tools/testing/selftests/bpf/test_cpp.cpp
@@ -91,7 +91,7 @@ static void try_skeleton_template()
skel.detach();
- /* destructor will destory underlying skeleton */
+ /* destructor will destroy underlying skeleton */
}
int main(int argc, char *argv[])
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index b73152822aa2..7fc00e423e4d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1275,7 +1275,7 @@ static void test_map_in_map(void)
goto out_map_in_map;
}
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_map_get_info_by_fd(fd, &info, &len);
if (err) {
printf("Failed to get map info by fd %d: %d", fd,
errno);
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index 7cb1bc05e5cf..40cba8d368d9 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -1039,7 +1039,7 @@ try:
offload = bpf_pinned("/sys/fs/bpf/offload")
ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True)
fail(ret == 0, "attached offloaded XDP program to drv")
- check_extack(err, "Using device-bound program without HW_MODE flag is not supported.", args)
+ check_extack(err, "Using offloaded program without HW_MODE flag is not supported.", args)
rm("/sys/fs/bpf/offload")
sim.wait_for_flush()
@@ -1088,12 +1088,12 @@ try:
ret, _, err = sim.set_xdp(pinned, "offload",
fail=False, include_stderr=True)
fail(ret == 0, "Pinned program loaded for a different device accepted")
- check_extack_nsim(err, "program bound to different dev.", args)
+ check_extack(err, "Program bound to different device.", args)
simdev2.remove()
ret, _, err = sim.set_xdp(pinned, "offload",
fail=False, include_stderr=True)
fail(ret == 0, "Pinned program loaded for a removed device accepted")
- check_extack_nsim(err, "xdpoffload of non-bound program.", args)
+ check_extack(err, "Program bound to different device.", args)
rm(pin_file)
bpftool_prog_list_wait(expected=0)
@@ -1334,12 +1334,12 @@ try:
ret, _, err = simA.set_xdp(progB, "offload", force=True, JSON=False,
fail=False, include_stderr=True)
fail(ret == 0, "cross-ASIC program allowed")
- check_extack_nsim(err, "program bound to different dev.", args)
+ check_extack(err, "Program bound to different device.", args)
for d in simdevB.nsims:
ret, _, err = d.set_xdp(progA, "offload", force=True, JSON=False,
fail=False, include_stderr=True)
fail(ret == 0, "cross-ASIC program allowed")
- check_extack_nsim(err, "program bound to different dev.", args)
+ check_extack(err, "Program bound to different device.", args)
start_test("Test multi-dev ASIC cross-dev map reuse...")
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 4716e38e153a..6d5e3022c75f 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -17,6 +17,7 @@
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/un.h>
+#include <bpf/btf.h>
static bool verbose(void)
{
@@ -967,6 +968,43 @@ int write_sysctl(const char *sysctl, const char *value)
return 0;
}
+int get_bpf_max_tramp_links_from(struct btf *btf)
+{
+ const struct btf_enum *e;
+ const struct btf_type *t;
+ __u32 i, type_cnt;
+ const char *name;
+ __u16 j, vlen;
+
+ for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) {
+ t = btf__type_by_id(btf, i);
+ if (!t || !btf_is_enum(t) || t->name_off)
+ continue;
+ e = btf_enum(t);
+ for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) {
+ name = btf__str_by_offset(btf, e->name_off);
+ if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS"))
+ return e->val;
+ }
+ }
+
+ return -1;
+}
+
+int get_bpf_max_tramp_links(void)
+{
+ struct btf *vmlinux_btf;
+ int ret;
+
+ vmlinux_btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf"))
+ return -1;
+ ret = get_bpf_max_tramp_links_from(vmlinux_btf);
+ btf__free(vmlinux_btf);
+
+ return ret;
+}
+
#define MAX_BACKTRACE_SZ 128
void crash_handler(int signum)
{
@@ -975,12 +1013,12 @@ void crash_handler(int signum)
sz = backtrace(bt, ARRAY_SIZE(bt));
+ if (env.stdout)
+ stdio_restore();
if (env.test) {
env.test_state->error_cnt++;
dump_test_log(env.test, env.test_state, true, false);
}
- if (env.stdout)
- stdio_restore();
if (env.worker_id != -1)
fprintf(stderr, "[%d]: ", env.worker_id);
fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 3f058dfadbaf..d5d51ec97ec8 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -394,6 +394,8 @@ int kern_sync_rcu(void);
int trigger_module_test_read(int read_sz);
int trigger_module_test_write(int write_sz);
int write_sysctl(const char *sysctl, const char *value);
+int get_bpf_max_tramp_links_from(struct btf *btf);
+int get_bpf_max_tramp_links(void);
#ifdef __x86_64__
#define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
index 3256de30f563..ed518d075d1d 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
@@ -93,7 +93,7 @@ int get_map_fd_by_prog_id(int prog_id)
info.nr_map_ids = 1;
info.map_ids = (__u64) (unsigned long) map_ids;
- if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+ if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
log_err("Failed to get info by prog fd %d", prog_fd);
goto err;
}
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
index 334bdfeab940..910044f08908 100755
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -100,6 +100,9 @@ if [[ "$#" -eq "0" ]]; then
echo "ipip"
$0 ipv4 ipip none 100
+ echo "ipip6"
+ $0 ipv4 ipip6 none 100
+
echo "ip6ip6"
$0 ipv6 ip6tnl none 100
@@ -224,6 +227,9 @@ elif [[ "$tuntype" =~ "gre" && "$mac" == "eth" ]]; then
elif [[ "$tuntype" =~ "vxlan" && "$mac" == "eth" ]]; then
ttype="vxlan"
targs="id 1 dstport 8472 udp6zerocsumrx"
+elif [[ "$tuntype" == "ipip6" ]]; then
+ ttype="ip6tnl"
+ targs=""
else
ttype=$tuntype
targs=""
@@ -233,6 +239,9 @@ fi
if [[ "${tuntype}" == "sit" ]]; then
link_addr1="${ns1_v4}"
link_addr2="${ns2_v4}"
+elif [[ "${tuntype}" == "ipip6" ]]; then
+ link_addr1="${ns1_v6}"
+ link_addr2="${ns2_v6}"
else
link_addr1="${addr1}"
link_addr2="${addr2}"
@@ -287,12 +296,6 @@ else
server_listen
fi
-# bpf_skb_net_shrink does not take tunnel flags yet, cannot update L3.
-if [[ "${tuntype}" == "sit" ]]; then
- echo OK
- exit 0
-fi
-
# serverside, use BPF for decap
ip netns exec "${ns2}" ip link del dev testtun0
ip netns exec "${ns2}" tc qdisc add dev veth2 clsact
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
index 5c8ef062f760..32df93747095 100644
--- a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
@@ -96,7 +96,7 @@ static int get_map_fd_by_prog_id(int prog_id, bool *xdp)
info.nr_map_ids = 1;
info.map_ids = (__u64)(unsigned long)map_ids;
- if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+ if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
log_err("Failed to get info by prog fd %d", prog_fd);
goto err;
}
diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
index 2eaedc1d9ed3..06857b689c11 100755
--- a/tools/testing/selftests/bpf/test_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tunnel.sh
@@ -66,15 +66,20 @@ config_device()
add_gre_tunnel()
{
+ tun_key=
+ if [ -n "$1" ]; then
+ tun_key="key $1"
+ fi
+
# at_ns0 namespace
ip netns exec at_ns0 \
- ip link add dev $DEV_NS type $TYPE seq key 2 \
+ ip link add dev $DEV_NS type $TYPE seq $tun_key \
local 172.16.1.100 remote 172.16.1.200
ip netns exec at_ns0 ip link set dev $DEV_NS up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
# root namespace
- ip link add dev $DEV type $TYPE key 2 external
+ ip link add dev $DEV type $TYPE $tun_key external
ip link set dev $DEV up
ip addr add dev $DEV 10.1.1.200/24
}
@@ -238,7 +243,7 @@ test_gre()
check $TYPE
config_device
- add_gre_tunnel
+ add_gre_tunnel 2
attach_bpf $DEV gre_set_tunnel gre_get_tunnel
ping $PING_ARG 10.1.1.100
check_err $?
@@ -253,6 +258,30 @@ test_gre()
echo -e ${GREEN}"PASS: $TYPE"${NC}
}
+test_gre_no_tunnel_key()
+{
+ TYPE=gre
+ DEV_NS=gre00
+ DEV=gre11
+ ret=0
+
+ check $TYPE
+ config_device
+ add_gre_tunnel
+ attach_bpf $DEV gre_set_tunnel_no_key gre_get_tunnel
+ ping $PING_ARG 10.1.1.100
+ check_err $?
+ ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
+ check_err $?
+ cleanup
+
+ if [ $ret -ne 0 ]; then
+ echo -e ${RED}"FAIL: $TYPE"${NC}
+ return 1
+ fi
+ echo -e ${GREEN}"PASS: $TYPE"${NC}
+}
+
test_ip6gre()
{
TYPE=ip6gre
@@ -589,6 +618,7 @@ cleanup()
ip link del ipip6tnl11 2> /dev/null
ip link del ip6ip6tnl11 2> /dev/null
ip link del gretap11 2> /dev/null
+ ip link del gre11 2> /dev/null
ip link del ip6gre11 2> /dev/null
ip link del ip6gretap11 2> /dev/null
ip link del geneve11 2> /dev/null
@@ -641,6 +671,10 @@ bpf_tunnel_test()
test_gre
errors=$(( $errors + $? ))
+ echo "Testing GRE tunnel (without tunnel keys)..."
+ test_gre_no_tunnel_key
+ errors=$(( $errors + $? ))
+
echo "Testing IP6GRE tunnel..."
test_ip6gre
errors=$(( $errors + $? ))
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 8c808551dfd7..8b9949bb833d 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -209,7 +209,7 @@ loop:
insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
+ BPF_FUNC_skb_vlan_push);
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
i++;
}
@@ -220,7 +220,7 @@ loop:
i++;
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_pop),
+ BPF_FUNC_skb_vlan_pop);
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
i++;
}
@@ -1239,8 +1239,8 @@ static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
__u32 xlated_prog_len;
__u32 buf_element_size = sizeof(struct bpf_insn);
- if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("bpf_obj_get_info_by_fd failed");
+ if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("bpf_prog_get_info_by_fd failed");
return -1;
}
@@ -1261,8 +1261,8 @@ static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
bzero(&info, sizeof(info));
info.xlated_prog_len = xlated_prog_len;
info.xlated_prog_insns = (__u64)(unsigned long)*buf;
- if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("second bpf_obj_get_info_by_fd failed");
+ if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("second bpf_prog_get_info_by_fd failed");
goto out_free_buf;
}
diff --git a/tools/testing/selftests/bpf/test_xdp_features.sh b/tools/testing/selftests/bpf/test_xdp_features.sh
new file mode 100755
index 000000000000..0aa71c4455c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_features.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+readonly NS="ns1-$(mktemp -u XXXXXX)"
+readonly V0_IP4=10.10.0.11
+readonly V1_IP4=10.10.0.1
+readonly V0_IP6=2001:db8::11
+readonly V1_IP6=2001:db8::1
+
+ret=1
+
+setup() {
+ {
+ ip netns add ${NS}
+
+ ip link add v1 type veth peer name v0 netns ${NS}
+
+ ip link set v1 up
+ ip addr add $V1_IP4/24 dev v1
+ ip addr add $V1_IP6/64 nodad dev v1
+ ip -n ${NS} link set dev v0 up
+ ip -n ${NS} addr add $V0_IP4/24 dev v0
+ ip -n ${NS} addr add $V0_IP6/64 nodad dev v0
+
+ # Enable XDP mode and disable checksum offload
+ ethtool -K v1 gro on
+ ethtool -K v1 tx-checksumming off
+ ip netns exec ${NS} ethtool -K v0 gro on
+ ip netns exec ${NS} ethtool -K v0 tx-checksumming off
+ } > /dev/null 2>&1
+}
+
+cleanup() {
+ ip link del v1 2> /dev/null
+ ip netns del ${NS} 2> /dev/null
+ [ "$(pidof xdp_features)" = "" ] || kill $(pidof xdp_features) 2> /dev/null
+}
+
+wait_for_dut_server() {
+ while sleep 1; do
+ ss -tlp | grep -q xdp_features
+ [ $? -eq 0 ] && break
+ done
+}
+
+test_xdp_features() {
+ setup
+
+ ## XDP_PASS
+ ./xdp_features -f XDP_PASS -D $V1_IP6 -T $V0_IP6 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_PASS \
+ -D $V1_IP6 -C $V1_IP6 \
+ -T $V0_IP6 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_DROP
+ ./xdp_features -f XDP_DROP -D ::ffff:$V1_IP4 -T ::ffff:$V0_IP4 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_DROP \
+ -D ::ffff:$V1_IP4 \
+ -C ::ffff:$V1_IP4 \
+ -T ::ffff:$V0_IP4 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_ABORTED
+ ./xdp_features -f XDP_ABORTED -D $V1_IP6 -T $V0_IP6 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_ABORTED \
+ -D $V1_IP6 -C $V1_IP6 \
+ -T $V0_IP6 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_TX
+ ./xdp_features -f XDP_TX -D ::ffff:$V1_IP4 -T ::ffff:$V0_IP4 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_TX \
+ -D ::ffff:$V1_IP4 \
+ -C ::ffff:$V1_IP4 \
+ -T ::ffff:$V0_IP4 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_REDIRECT
+ ./xdp_features -f XDP_REDIRECT -D $V1_IP6 -T $V0_IP6 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_REDIRECT \
+ -D $V1_IP6 -C $V1_IP6 \
+ -T $V0_IP6 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_NDO_XMIT
+ ./xdp_features -f XDP_NDO_XMIT -D ::ffff:$V1_IP4 -T ::ffff:$V0_IP4 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_NDO_XMIT \
+ -D ::ffff:$V1_IP4 \
+ -C ::ffff:$V1_IP4 \
+ -T ::ffff:$V0_IP4 v0
+ ret=$?
+ cleanup
+}
+
+set -e
+trap cleanup 2 3 6 9
+
+test_xdp_features
+
+exit $ret
diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
index d821fd098504..b077cf58f825 100755
--- a/tools/testing/selftests/bpf/test_xsk.sh
+++ b/tools/testing/selftests/bpf/test_xsk.sh
@@ -24,8 +24,6 @@
# ----------- | ----------
# | vethX | --------- | vethY |
# ----------- peer ----------
-# | | |
-# namespaceX | namespaceY
#
# AF_XDP is an address family optimized for high performance packet processing,
# it is XDP’s user-space interface.
@@ -39,10 +37,9 @@
# Prerequisites setup by script:
#
# Set up veth interfaces as per the topology shown ^^:
-# * setup two veth interfaces and one namespace
-# ** veth<xxxx> in root namespace
-# ** veth<yyyy> in af_xdp<xxxx> namespace
-# ** namespace af_xdp<xxxx>
+# * setup two veth interfaces
+# ** veth<xxxx>
+# ** veth<yyyy>
# *** xxxx and yyyy are randomly generated 4 digit numbers used to avoid
# conflict with any existing interface
# * tests the veth and xsk layers of the topology
@@ -74,6 +71,9 @@
# Run and dump packet contents:
# sudo ./test_xsk.sh -D
#
+# Set up veth interfaces and leave them up so xskxceiver can be launched in a debugger:
+# sudo ./test_xsk.sh -d
+#
# Run test suite for physical device in loopback mode
# sudo ./test_xsk.sh -i IFACE
@@ -81,11 +81,12 @@
ETH=""
-while getopts "vDi:" flag
+while getopts "vDi:d" flag
do
case "${flag}" in
v) verbose=1;;
D) dump_pkts=1;;
+ d) debug=1;;
i) ETH=${OPTARG};;
esac
done
@@ -99,28 +100,25 @@ VETH0_POSTFIX=$(cat ${URANDOM} | tr -dc '0-9' | fold -w 256 | head -n 1 | head -
VETH0=ve${VETH0_POSTFIX}
VETH1_POSTFIX=$(cat ${URANDOM} | tr -dc '0-9' | fold -w 256 | head -n 1 | head --bytes 4)
VETH1=ve${VETH1_POSTFIX}
-NS0=root
-NS1=af_xdp${VETH1_POSTFIX}
MTU=1500
trap ctrl_c INT
function ctrl_c() {
- cleanup_exit ${VETH0} ${VETH1} ${NS1}
+ cleanup_exit ${VETH0} ${VETH1}
exit 1
}
setup_vethPairs() {
if [[ $verbose -eq 1 ]]; then
- echo "setting up ${VETH0}: namespace: ${NS0}"
+ echo "setting up ${VETH0}"
fi
- ip netns add ${NS1}
ip link add ${VETH0} numtxqueues 4 numrxqueues 4 type veth peer name ${VETH1} numtxqueues 4 numrxqueues 4
if [ -f /proc/net/if_inet6 ]; then
echo 1 > /proc/sys/net/ipv6/conf/${VETH0}/disable_ipv6
fi
if [[ $verbose -eq 1 ]]; then
- echo "setting up ${VETH1}: namespace: ${NS1}"
+ echo "setting up ${VETH1}"
fi
if [[ $busy_poll -eq 1 ]]; then
@@ -130,18 +128,15 @@ setup_vethPairs() {
echo 200000 > /sys/class/net/${VETH1}/gro_flush_timeout
fi
- ip link set ${VETH1} netns ${NS1}
- ip netns exec ${NS1} ip link set ${VETH1} mtu ${MTU}
+ ip link set ${VETH1} mtu ${MTU}
ip link set ${VETH0} mtu ${MTU}
- ip netns exec ${NS1} ip link set ${VETH1} up
- ip netns exec ${NS1} ip link set dev lo up
+ ip link set ${VETH1} up
ip link set ${VETH0} up
}
if [ ! -z $ETH ]; then
VETH0=${ETH}
VETH1=${ETH}
- NS1=""
else
validate_root_exec
validate_veth_support ${VETH0}
@@ -151,7 +146,7 @@ else
retval=$?
if [ $retval -ne 0 ]; then
test_status $retval "${TEST_NAME}"
- cleanup_exit ${VETH0} ${VETH1} ${NS1}
+ cleanup_exit ${VETH0} ${VETH1}
exit $retval
fi
fi
@@ -174,10 +169,15 @@ statusList=()
TEST_NAME="XSK_SELFTESTS_${VETH0}_SOFTIRQ"
+if [[ $debug -eq 1 ]]; then
+ echo "-i" ${VETH0} "-i" ${VETH1}
+ exit
+fi
+
exec_xskxceiver
if [ -z $ETH ]; then
- cleanup_exit ${VETH0} ${VETH1} ${NS1}
+ cleanup_exit ${VETH0} ${VETH1}
fi
TEST_NAME="XSK_SELFTESTS_${VETH0}_BUSY_POLL"
busy_poll=1
@@ -190,7 +190,7 @@ exec_xskxceiver
## END TESTS
if [ -z $ETH ]; then
- cleanup_exit ${VETH0} ${VETH1} ${NS1}
+ cleanup_exit ${VETH0} ${VETH1}
fi
failures=0
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 9695318e8132..6c44153755e6 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -164,7 +164,7 @@ __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
int err;
memset(info, 0, sizeof(*info));
- err = bpf_obj_get_info_by_fd(bpf_link__fd(link), info, &info_len);
+ err = bpf_link_get_info_by_fd(bpf_link__fd(link), info, &info_len);
if (err) {
printf("failed to get link info: %d\n", -errno);
return 0;
diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
index c2aa6f26738b..bf82b923c5fe 100644
--- a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
+++ b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
@@ -1,13 +1,14 @@
{
"bounds checks mixing signed and unsigned, positive bounds",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 2),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
@@ -17,20 +18,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
@@ -40,20 +42,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 2",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
@@ -65,20 +68,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 3",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
@@ -89,20 +93,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 4",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
@@ -112,19 +117,20 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.result = ACCEPT,
},
{
"bounds checks mixing signed and unsigned, variant 5",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
@@ -135,17 +141,20 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 6",
.insns = {
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_6, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
@@ -163,13 +172,14 @@
{
"bounds checks mixing signed and unsigned, variant 7",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
@@ -179,19 +189,20 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.result = ACCEPT,
},
{
"bounds checks mixing signed and unsigned, variant 8",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
@@ -203,20 +214,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 9",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
@@ -228,19 +240,20 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.result = ACCEPT,
},
{
"bounds checks mixing signed and unsigned, variant 10",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
@@ -252,20 +265,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 11",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
@@ -278,20 +292,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 12",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -6),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
@@ -303,20 +318,21 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 13",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 2),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
@@ -331,7 +347,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -340,13 +356,14 @@
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_MOV64_IMM(BPF_REG_8, 2),
@@ -360,20 +377,21 @@
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
- .fixup_map_hash_8b = { 4 },
+ .fixup_map_hash_8b = { 6 },
.errstr = "unbounded min value",
.result = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 15",
.insns = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -6),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
@@ -387,7 +405,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "unbounded min value",
.result = REJECT,
},
diff --git a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
new file mode 100644
index 000000000000..3af2501082b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
@@ -0,0 +1,67 @@
+{
+ "BPF_ST_MEM stack imm non-zero",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 42),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -42),
+ /* if value is tracked correctly R0 is zero */
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ /* Use prog type that requires return value in range [0, 1] */
+ .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+ .expected_attach_type = BPF_SK_LOOKUP,
+ .runs = -1,
+},
+{
+ "BPF_ST_MEM stack imm zero",
+ .insns = {
+ /* mark stack 0000 0000 */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* read and sum a few bytes */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -8),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* if value is tracked correctly R0 is zero */
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ /* Use prog type that requires return value in range [0, 1] */
+ .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+ .expected_attach_type = BPF_SK_LOOKUP,
+ .runs = -1,
+},
+{
+ "BPF_ST_MEM stack imm zero, variable offset",
+ .insns = {
+ /* set fp[-16], fp[-24] to zeros */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+ /* r0 = random value in range [-32, -15] */
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_0, 16, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 32),
+ /* fp[r0] = 0, make a variable offset write of zero,
+ * this should preserve zero marks on stack.
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_10),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ /* r0 = fp[-20], if variable offset write was tracked correctly
+ * r0 would be a known zero.
+ */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_10, -20),
+ /* Would fail return code verification if r0 range is not tracked correctly. */
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ /* Use prog type that requires return value in range [0, 1] */
+ .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+ .expected_attach_type = BPF_SK_LOOKUP,
+ .runs = -1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/sleepable.c b/tools/testing/selftests/bpf/verifier/sleepable.c
new file mode 100644
index 000000000000..1f0d2bdc673f
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/sleepable.c
@@ -0,0 +1,91 @@
+{
+ "sleepable fentry accept",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ .kfunc = "bpf_fentry_test1",
+ .result = ACCEPT,
+ .flags = BPF_F_SLEEPABLE,
+ .runs = -1,
+},
+{
+ "sleepable fexit accept",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ .kfunc = "bpf_fentry_test1",
+ .result = ACCEPT,
+ .flags = BPF_F_SLEEPABLE,
+ .runs = -1,
+},
+{
+ "sleepable fmod_ret accept",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .expected_attach_type = BPF_MODIFY_RETURN,
+ .kfunc = "bpf_fentry_test1",
+ .result = ACCEPT,
+ .flags = BPF_F_SLEEPABLE,
+ .runs = -1,
+},
+{
+ "sleepable iter accept",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .expected_attach_type = BPF_TRACE_ITER,
+ .kfunc = "task",
+ .result = ACCEPT,
+ .flags = BPF_F_SLEEPABLE,
+ .runs = -1,
+},
+{
+ "sleepable lsm accept",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_LSM,
+ .kfunc = "bpf",
+ .expected_attach_type = BPF_LSM_MAC,
+ .result = ACCEPT,
+ .flags = BPF_F_SLEEPABLE,
+ .runs = -1,
+},
+{
+ "sleepable uprobe accept",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_KPROBE,
+ .kfunc = "bpf_fentry_test1",
+ .result = ACCEPT,
+ .flags = BPF_F_SLEEPABLE,
+ .runs = -1,
+},
+{
+ "sleepable raw tracepoint reject",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .expected_attach_type = BPF_TRACE_RAW_TP,
+ .kfunc = "sched_switch",
+ .result = REJECT,
+ .errstr = "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable",
+ .flags = BPF_F_SLEEPABLE,
+ .runs = -1,
+},
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index f961b49b8ef4..83231456d3c5 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -144,7 +144,7 @@ static struct env {
struct verif_stats *prog_stats;
int prog_stat_cnt;
- /* baseline_stats is allocated and used only in comparsion mode */
+ /* baseline_stats is allocated and used only in comparison mode */
struct verif_stats *baseline_stats;
int baseline_stat_cnt;
@@ -882,7 +882,7 @@ static int process_obj(const char *filename)
* that BPF object file is incomplete and has to be statically
* linked into a final BPF object file; instead of bailing
* out, report it into stderr, mark it as skipped, and
- * proceeed
+ * proceed
*/
fprintf(stderr, "Failed to open '%s': %d\n", filename, -errno);
env.files_skipped++;
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index 316a56d680f2..685034528018 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -13,7 +13,7 @@ s390x)
QEMU_BINARY=qemu-system-s390x
QEMU_CONSOLE="ttyS1"
QEMU_FLAGS=(-smp 2)
- BZIMAGE="arch/s390/boot/compressed/vmlinux"
+ BZIMAGE="arch/s390/boot/vmlinux"
;;
x86_64)
QEMU_BINARY=qemu-system-x86_64
diff --git a/tools/testing/selftests/bpf/xdp_features.c b/tools/testing/selftests/bpf/xdp_features.c
new file mode 100644
index 000000000000..fce12165213b
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_features.c
@@ -0,0 +1,699 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/netdev.h>
+#include <linux/if_link.h>
+#include <signal.h>
+#include <argp.h>
+#include <net/if.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <pthread.h>
+
+#include <network_helpers.h>
+
+#include "xdp_features.skel.h"
+#include "xdp_features.h"
+
+#define RED(str) "\033[0;31m" str "\033[0m"
+#define GREEN(str) "\033[0;32m" str "\033[0m"
+#define YELLOW(str) "\033[0;33m" str "\033[0m"
+
+static struct env {
+ bool verbosity;
+ int ifindex;
+ bool is_tester;
+ struct {
+ enum netdev_xdp_act drv_feature;
+ enum xdp_action action;
+ } feature;
+ struct sockaddr_storage dut_ctrl_addr;
+ struct sockaddr_storage dut_addr;
+ struct sockaddr_storage tester_addr;
+} env;
+
+#define BUFSIZE 128
+
+void test__fail(void) { /* for network_helpers.c */ }
+
+static int libbpf_print_fn(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !env.verbosity)
+ return 0;
+ return vfprintf(stderr, format, args);
+}
+
+static volatile bool exiting;
+
+static void sig_handler(int sig)
+{
+ exiting = true;
+}
+
+const char *argp_program_version = "xdp-features 0.0";
+const char argp_program_doc[] =
+"XDP features detection application.\n"
+"\n"
+"XDP features application checks the XDP advertised features match detected ones.\n"
+"\n"
+"USAGE: ./xdp-features [-vt] [-f <xdp-feature>] [-D <dut-data-ip>] [-T <tester-data-ip>] [-C <dut-ctrl-ip>] <iface-name>\n"
+"\n"
+"dut-data-ip, tester-data-ip, dut-ctrl-ip: IPv6 or IPv4-mapped-IPv6 addresses;\n"
+"\n"
+"XDP features\n:"
+"- XDP_PASS\n"
+"- XDP_DROP\n"
+"- XDP_ABORTED\n"
+"- XDP_REDIRECT\n"
+"- XDP_NDO_XMIT\n"
+"- XDP_TX\n";
+
+static const struct argp_option opts[] = {
+ { "verbose", 'v', NULL, 0, "Verbose debug output" },
+ { "tester", 't', NULL, 0, "Tester mode" },
+ { "feature", 'f', "XDP-FEATURE", 0, "XDP feature to test" },
+ { "dut_data_ip", 'D', "DUT-DATA-IP", 0, "DUT IP data channel" },
+ { "dut_ctrl_ip", 'C', "DUT-CTRL-IP", 0, "DUT IP control channel" },
+ { "tester_data_ip", 'T', "TESTER-DATA-IP", 0, "Tester IP data channel" },
+ {},
+};
+
+static int get_xdp_feature(const char *arg)
+{
+ if (!strcmp(arg, "XDP_PASS")) {
+ env.feature.action = XDP_PASS;
+ env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
+ } else if (!strcmp(arg, "XDP_DROP")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
+ env.feature.action = XDP_DROP;
+ } else if (!strcmp(arg, "XDP_ABORTED")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
+ env.feature.action = XDP_ABORTED;
+ } else if (!strcmp(arg, "XDP_TX")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
+ env.feature.action = XDP_TX;
+ } else if (!strcmp(arg, "XDP_REDIRECT")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_REDIRECT;
+ env.feature.action = XDP_REDIRECT;
+ } else if (!strcmp(arg, "XDP_NDO_XMIT")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static char *get_xdp_feature_str(void)
+{
+ switch (env.feature.action) {
+ case XDP_PASS:
+ return YELLOW("XDP_PASS");
+ case XDP_DROP:
+ return YELLOW("XDP_DROP");
+ case XDP_ABORTED:
+ return YELLOW("XDP_ABORTED");
+ case XDP_TX:
+ return YELLOW("XDP_TX");
+ case XDP_REDIRECT:
+ return YELLOW("XDP_REDIRECT");
+ default:
+ break;
+ }
+
+ if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT)
+ return YELLOW("XDP_NDO_XMIT");
+
+ return "";
+}
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ switch (key) {
+ case 'v':
+ env.verbosity = true;
+ break;
+ case 't':
+ env.is_tester = true;
+ break;
+ case 'f':
+ if (get_xdp_feature(arg) < 0) {
+ fprintf(stderr, "Invalid xdp feature: %s\n", arg);
+ argp_usage(state);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ case 'D':
+ if (make_sockaddr(AF_INET6, arg, DUT_ECHO_PORT,
+ &env.dut_addr, NULL)) {
+ fprintf(stderr, "Invalid DUT address: %s\n", arg);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ case 'C':
+ if (make_sockaddr(AF_INET6, arg, DUT_CTRL_PORT,
+ &env.dut_ctrl_addr, NULL)) {
+ fprintf(stderr, "Invalid DUT CTRL address: %s\n", arg);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ case 'T':
+ if (make_sockaddr(AF_INET6, arg, 0, &env.tester_addr, NULL)) {
+ fprintf(stderr, "Invalid Tester address: %s\n", arg);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ case ARGP_KEY_ARG:
+ errno = 0;
+ if (strlen(arg) >= IF_NAMESIZE) {
+ fprintf(stderr, "Invalid device name: %s\n", arg);
+ argp_usage(state);
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ env.ifindex = if_nametoindex(arg);
+ if (!env.ifindex)
+ env.ifindex = strtoul(arg, NULL, 0);
+ if (!env.ifindex) {
+ fprintf(stderr,
+ "Bad interface index or name (%d): %s\n",
+ errno, strerror(errno));
+ argp_usage(state);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static const struct argp argp = {
+ .options = opts,
+ .parser = parse_arg,
+ .doc = argp_program_doc,
+};
+
+static void set_env_default(void)
+{
+ env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT;
+ env.feature.action = -EINVAL;
+ env.ifindex = -ENODEV;
+ make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_CTRL_PORT,
+ &env.dut_ctrl_addr, NULL);
+ make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_ECHO_PORT,
+ &env.dut_addr, NULL);
+ make_sockaddr(AF_INET6, "::ffff:127.0.0.1", 0, &env.tester_addr, NULL);
+}
+
+static void *dut_echo_thread(void *arg)
+{
+ unsigned char buf[sizeof(struct tlv_hdr)];
+ int sockfd = *(int *)arg;
+
+ while (!exiting) {
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ size_t n;
+
+ n = recvfrom(sockfd, buf, sizeof(buf), MSG_WAITALL,
+ (struct sockaddr *)&addr, &addrlen);
+ if (n != ntohs(tlv->len))
+ continue;
+
+ if (ntohs(tlv->type) != CMD_ECHO)
+ continue;
+
+ sendto(sockfd, buf, sizeof(buf), MSG_NOSIGNAL | MSG_CONFIRM,
+ (struct sockaddr *)&addr, addrlen);
+ }
+
+ pthread_exit((void *)0);
+ close(sockfd);
+
+ return NULL;
+}
+
+static int dut_run_echo_thread(pthread_t *t, int *sockfd)
+{
+ int err;
+
+ sockfd = start_reuseport_server(AF_INET6, SOCK_DGRAM, NULL,
+ DUT_ECHO_PORT, 0, 1);
+ if (!sockfd) {
+ fprintf(stderr, "Failed to create echo socket\n");
+ return -errno;
+ }
+
+ /* start echo channel */
+ err = pthread_create(t, NULL, dut_echo_thread, sockfd);
+ if (err) {
+ fprintf(stderr, "Failed creating dut_echo thread: %s\n",
+ strerror(-err));
+ free_fds(sockfd, 1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dut_attach_xdp_prog(struct xdp_features *skel, int flags)
+{
+ enum xdp_action action = env.feature.action;
+ struct bpf_program *prog;
+ unsigned int key = 0;
+ int err, fd = 0;
+
+ if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT) {
+ struct bpf_devmap_val entry = {
+ .ifindex = env.ifindex,
+ };
+
+ err = bpf_map__update_elem(skel->maps.dev_map,
+ &key, sizeof(key),
+ &entry, sizeof(entry), 0);
+ if (err < 0)
+ return err;
+
+ fd = bpf_program__fd(skel->progs.xdp_do_redirect_cpumap);
+ action = XDP_REDIRECT;
+ }
+
+ switch (action) {
+ case XDP_TX:
+ prog = skel->progs.xdp_do_tx;
+ break;
+ case XDP_DROP:
+ prog = skel->progs.xdp_do_drop;
+ break;
+ case XDP_ABORTED:
+ prog = skel->progs.xdp_do_aborted;
+ break;
+ case XDP_PASS:
+ prog = skel->progs.xdp_do_pass;
+ break;
+ case XDP_REDIRECT: {
+ struct bpf_cpumap_val entry = {
+ .qsize = 2048,
+ .bpf_prog.fd = fd,
+ };
+
+ err = bpf_map__update_elem(skel->maps.cpu_map,
+ &key, sizeof(key),
+ &entry, sizeof(entry), 0);
+ if (err < 0)
+ return err;
+
+ prog = skel->progs.xdp_do_redirect;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ err = bpf_xdp_attach(env.ifindex, bpf_program__fd(prog), flags, NULL);
+ if (err)
+ fprintf(stderr,
+ "Failed to attach XDP program to ifindex %d\n",
+ env.ifindex);
+ return err;
+}
+
+static int recv_msg(int sockfd, void *buf, size_t bufsize, void *val,
+ size_t val_size)
+{
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+ size_t len;
+
+ len = recv(sockfd, buf, bufsize, 0);
+ if (len != ntohs(tlv->len) || len < sizeof(*tlv))
+ return -EINVAL;
+
+ if (val) {
+ len -= sizeof(*tlv);
+ if (len > val_size)
+ return -ENOMEM;
+
+ memcpy(val, tlv->data, len);
+ }
+
+ return 0;
+}
+
+static int dut_run(struct xdp_features *skel)
+{
+ int flags = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_DRV_MODE;
+ int state, err, *sockfd, ctrl_sockfd, echo_sockfd;
+ struct sockaddr_storage ctrl_addr;
+ pthread_t dut_thread;
+ socklen_t addrlen;
+
+ sockfd = start_reuseport_server(AF_INET6, SOCK_STREAM, NULL,
+ DUT_CTRL_PORT, 0, 1);
+ if (!sockfd) {
+ fprintf(stderr, "Failed to create DUT socket\n");
+ return -errno;
+ }
+
+ ctrl_sockfd = accept(*sockfd, (struct sockaddr *)&ctrl_addr, &addrlen);
+ if (ctrl_sockfd < 0) {
+ fprintf(stderr, "Failed to accept connection on DUT socket\n");
+ free_fds(sockfd, 1);
+ return -errno;
+ }
+
+ /* CTRL loop */
+ while (!exiting) {
+ unsigned char buf[BUFSIZE] = {};
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+
+ err = recv_msg(ctrl_sockfd, buf, BUFSIZE, NULL, 0);
+ if (err)
+ continue;
+
+ switch (ntohs(tlv->type)) {
+ case CMD_START: {
+ if (state == CMD_START)
+ continue;
+
+ state = CMD_START;
+ /* Load the XDP program on the DUT */
+ err = dut_attach_xdp_prog(skel, flags);
+ if (err)
+ goto out;
+
+ err = dut_run_echo_thread(&dut_thread, &echo_sockfd);
+ if (err < 0)
+ goto out;
+
+ tlv->type = htons(CMD_ACK);
+ tlv->len = htons(sizeof(*tlv));
+ err = send(ctrl_sockfd, buf, sizeof(*tlv), 0);
+ if (err < 0)
+ goto end_thread;
+ break;
+ }
+ case CMD_STOP:
+ if (state != CMD_START)
+ break;
+
+ state = CMD_STOP;
+
+ exiting = true;
+ bpf_xdp_detach(env.ifindex, flags, NULL);
+
+ tlv->type = htons(CMD_ACK);
+ tlv->len = htons(sizeof(*tlv));
+ err = send(ctrl_sockfd, buf, sizeof(*tlv), 0);
+ goto end_thread;
+ case CMD_GET_XDP_CAP: {
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
+ unsigned long long val;
+ size_t n;
+
+ err = bpf_xdp_query(env.ifindex, XDP_FLAGS_DRV_MODE,
+ &opts);
+ if (err) {
+ fprintf(stderr,
+ "Failed to query XDP cap for ifindex %d\n",
+ env.ifindex);
+ goto end_thread;
+ }
+
+ tlv->type = htons(CMD_ACK);
+ n = sizeof(*tlv) + sizeof(opts.feature_flags);
+ tlv->len = htons(n);
+
+ val = htobe64(opts.feature_flags);
+ memcpy(tlv->data, &val, sizeof(val));
+
+ err = send(ctrl_sockfd, buf, n, 0);
+ if (err < 0)
+ goto end_thread;
+ break;
+ }
+ case CMD_GET_STATS: {
+ unsigned int key = 0, val;
+ size_t n;
+
+ err = bpf_map__lookup_elem(skel->maps.dut_stats,
+ &key, sizeof(key),
+ &val, sizeof(val), 0);
+ if (err) {
+ fprintf(stderr, "bpf_map_lookup_elem failed\n");
+ goto end_thread;
+ }
+
+ tlv->type = htons(CMD_ACK);
+ n = sizeof(*tlv) + sizeof(val);
+ tlv->len = htons(n);
+
+ val = htonl(val);
+ memcpy(tlv->data, &val, sizeof(val));
+
+ err = send(ctrl_sockfd, buf, n, 0);
+ if (err < 0)
+ goto end_thread;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+end_thread:
+ pthread_join(dut_thread, NULL);
+out:
+ bpf_xdp_detach(env.ifindex, flags, NULL);
+ close(ctrl_sockfd);
+ free_fds(sockfd, 1);
+
+ return err;
+}
+
+static bool tester_collect_detected_cap(struct xdp_features *skel,
+ unsigned int dut_stats)
+{
+ unsigned int err, key = 0, val;
+
+ if (!dut_stats)
+ return false;
+
+ err = bpf_map__lookup_elem(skel->maps.stats, &key, sizeof(key),
+ &val, sizeof(val), 0);
+ if (err) {
+ fprintf(stderr, "bpf_map_lookup_elem failed\n");
+ return false;
+ }
+
+ switch (env.feature.action) {
+ case XDP_PASS:
+ case XDP_TX:
+ case XDP_REDIRECT:
+ return val > 0;
+ case XDP_DROP:
+ case XDP_ABORTED:
+ return val == 0;
+ default:
+ break;
+ }
+
+ if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT)
+ return val > 0;
+
+ return false;
+}
+
+static int send_and_recv_msg(int sockfd, enum test_commands cmd, void *val,
+ size_t val_size)
+{
+ unsigned char buf[BUFSIZE] = {};
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+ int err;
+
+ tlv->type = htons(cmd);
+ tlv->len = htons(sizeof(*tlv));
+
+ err = send(sockfd, buf, sizeof(*tlv), 0);
+ if (err < 0)
+ return err;
+
+ err = recv_msg(sockfd, buf, BUFSIZE, val, val_size);
+ if (err < 0)
+ return err;
+
+ return ntohs(tlv->type) == CMD_ACK ? 0 : -EINVAL;
+}
+
+static int send_echo_msg(void)
+{
+ unsigned char buf[sizeof(struct tlv_hdr)];
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+ int sockfd, n;
+
+ sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (sockfd < 0) {
+ fprintf(stderr, "Failed to create echo socket\n");
+ return -errno;
+ }
+
+ tlv->type = htons(CMD_ECHO);
+ tlv->len = htons(sizeof(*tlv));
+
+ n = sendto(sockfd, buf, sizeof(*tlv), MSG_NOSIGNAL | MSG_CONFIRM,
+ (struct sockaddr *)&env.dut_addr, sizeof(env.dut_addr));
+ close(sockfd);
+
+ return n == ntohs(tlv->len) ? 0 : -EINVAL;
+}
+
+static int tester_run(struct xdp_features *skel)
+{
+ int flags = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_DRV_MODE;
+ unsigned long long advertised_feature;
+ struct bpf_program *prog;
+ unsigned int stats;
+ int i, err, sockfd;
+ bool detected_cap;
+
+ sockfd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (sockfd < 0) {
+ fprintf(stderr, "Failed to create tester socket\n");
+ return -errno;
+ }
+
+ if (settimeo(sockfd, 1000) < 0)
+ return -EINVAL;
+
+ err = connect(sockfd, (struct sockaddr *)&env.dut_ctrl_addr,
+ sizeof(env.dut_ctrl_addr));
+ if (err) {
+ fprintf(stderr, "Failed to connect to the DUT\n");
+ return -errno;
+ }
+
+ err = send_and_recv_msg(sockfd, CMD_GET_XDP_CAP, &advertised_feature,
+ sizeof(advertised_feature));
+ if (err < 0) {
+ close(sockfd);
+ return err;
+ }
+
+ advertised_feature = be64toh(advertised_feature);
+
+ if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT ||
+ env.feature.action == XDP_TX)
+ prog = skel->progs.xdp_tester_check_tx;
+ else
+ prog = skel->progs.xdp_tester_check_rx;
+
+ err = bpf_xdp_attach(env.ifindex, bpf_program__fd(prog), flags, NULL);
+ if (err) {
+ fprintf(stderr, "Failed to attach XDP program to ifindex %d\n",
+ env.ifindex);
+ goto out;
+ }
+
+ err = send_and_recv_msg(sockfd, CMD_START, NULL, 0);
+ if (err)
+ goto out;
+
+ for (i = 0; i < 10 && !exiting; i++) {
+ err = send_echo_msg();
+ if (err < 0)
+ goto out;
+
+ sleep(1);
+ }
+
+ err = send_and_recv_msg(sockfd, CMD_GET_STATS, &stats, sizeof(stats));
+ if (err)
+ goto out;
+
+ /* stop the test */
+ err = send_and_recv_msg(sockfd, CMD_STOP, NULL, 0);
+ /* send a new echo message to wake echo thread of the dut */
+ send_echo_msg();
+
+ detected_cap = tester_collect_detected_cap(skel, ntohl(stats));
+
+ fprintf(stdout, "Feature %s: [%s][%s]\n", get_xdp_feature_str(),
+ detected_cap ? GREEN("DETECTED") : RED("NOT DETECTED"),
+ env.feature.drv_feature & advertised_feature ? GREEN("ADVERTISED")
+ : RED("NOT ADVERTISED"));
+out:
+ bpf_xdp_detach(env.ifindex, flags, NULL);
+ close(sockfd);
+ return err < 0 ? err : 0;
+}
+
+int main(int argc, char **argv)
+{
+ struct xdp_features *skel;
+ int err;
+
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+ libbpf_set_print(libbpf_print_fn);
+
+ signal(SIGINT, sig_handler);
+ signal(SIGTERM, sig_handler);
+
+ set_env_default();
+
+ /* Parse command line arguments */
+ err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
+ if (err)
+ return err;
+
+ if (env.ifindex < 0) {
+ fprintf(stderr, "Invalid ifindex\n");
+ return -ENODEV;
+ }
+
+ /* Load and verify BPF application */
+ skel = xdp_features__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to open and load BPF skeleton\n");
+ return -EINVAL;
+ }
+
+ skel->rodata->tester_addr =
+ ((struct sockaddr_in6 *)&env.tester_addr)->sin6_addr;
+ skel->rodata->dut_addr =
+ ((struct sockaddr_in6 *)&env.dut_addr)->sin6_addr;
+
+ /* Load & verify BPF programs */
+ err = xdp_features__load(skel);
+ if (err) {
+ fprintf(stderr, "Failed to load and verify BPF skeleton\n");
+ goto cleanup;
+ }
+
+ err = xdp_features__attach(skel);
+ if (err) {
+ fprintf(stderr, "Failed to attach BPF skeleton\n");
+ goto cleanup;
+ }
+
+ if (env.is_tester) {
+ /* Tester */
+ fprintf(stdout, "Starting tester on device %d\n", env.ifindex);
+ err = tester_run(skel);
+ } else {
+ /* DUT */
+ fprintf(stdout, "Starting DUT on device %d\n", env.ifindex);
+ err = dut_run(skel);
+ }
+
+cleanup:
+ xdp_features__destroy(skel);
+
+ return err < 0 ? -err : 0;
+}
diff --git a/tools/testing/selftests/bpf/xdp_features.h b/tools/testing/selftests/bpf/xdp_features.h
new file mode 100644
index 000000000000..2670c541713b
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_features.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* test commands */
+enum test_commands {
+ CMD_STOP, /* CMD */
+ CMD_START, /* CMD */
+ CMD_ECHO, /* CMD */
+ CMD_ACK, /* CMD + data */
+ CMD_GET_XDP_CAP, /* CMD */
+ CMD_GET_STATS, /* CMD */
+};
+
+#define DUT_CTRL_PORT 12345
+#define DUT_ECHO_PORT 12346
+
+struct tlv_hdr {
+ __be16 type;
+ __be16 len;
+ __u8 data[];
+};
diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
new file mode 100644
index 000000000000..1c8acb68b977
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Reference program for verifying XDP metadata on real HW. Functional test
+ * only, doesn't test the performance.
+ *
+ * RX:
+ * - UDP 9091 packets are diverted into AF_XDP
+ * - Metadata verified:
+ * - rx_timestamp
+ * - rx_hash
+ *
+ * TX:
+ * - TBD
+ */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "xdp_hw_metadata.skel.h"
+#include "xsk.h"
+
+#include <error.h>
+#include <linux/errqueue.h>
+#include <linux/if_link.h>
+#include <linux/net_tstamp.h>
+#include <linux/udp.h>
+#include <linux/sockios.h>
+#include <sys/mman.h>
+#include <net/if.h>
+#include <poll.h>
+
+#include "xdp_metadata.h"
+
+#define UMEM_NUM 16
+#define UMEM_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE
+#define UMEM_SIZE (UMEM_FRAME_SIZE * UMEM_NUM)
+#define XDP_FLAGS (XDP_FLAGS_DRV_MODE | XDP_FLAGS_REPLACE)
+
+struct xsk {
+ void *umem_area;
+ struct xsk_umem *umem;
+ struct xsk_ring_prod fill;
+ struct xsk_ring_cons comp;
+ struct xsk_ring_prod tx;
+ struct xsk_ring_cons rx;
+ struct xsk_socket *socket;
+};
+
+struct xdp_hw_metadata *bpf_obj;
+struct xsk *rx_xsk;
+const char *ifname;
+int ifindex;
+int rxq;
+
+void test__fail(void) { /* for network_helpers.c */ }
+
+static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id)
+{
+ int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
+ const struct xsk_socket_config socket_config = {
+ .rx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .bind_flags = XDP_COPY,
+ };
+ const struct xsk_umem_config umem_config = {
+ .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
+ .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG,
+ };
+ __u32 idx;
+ u64 addr;
+ int ret;
+ int i;
+
+ xsk->umem_area = mmap(NULL, UMEM_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
+ if (xsk->umem_area == MAP_FAILED)
+ return -ENOMEM;
+
+ ret = xsk_umem__create(&xsk->umem,
+ xsk->umem_area, UMEM_SIZE,
+ &xsk->fill,
+ &xsk->comp,
+ &umem_config);
+ if (ret)
+ return ret;
+
+ ret = xsk_socket__create(&xsk->socket, ifindex, queue_id,
+ xsk->umem,
+ &xsk->rx,
+ &xsk->tx,
+ &socket_config);
+ if (ret)
+ return ret;
+
+ /* First half of umem is for TX. This way address matches 1-to-1
+ * to the completion queue index.
+ */
+
+ for (i = 0; i < UMEM_NUM / 2; i++) {
+ addr = i * UMEM_FRAME_SIZE;
+ printf("%p: tx_desc[%d] -> %lx\n", xsk, i, addr);
+ }
+
+ /* Second half of umem is for RX. */
+
+ ret = xsk_ring_prod__reserve(&xsk->fill, UMEM_NUM / 2, &idx);
+ for (i = 0; i < UMEM_NUM / 2; i++) {
+ addr = (UMEM_NUM / 2 + i) * UMEM_FRAME_SIZE;
+ printf("%p: rx_desc[%d] -> %lx\n", xsk, i, addr);
+ *xsk_ring_prod__fill_addr(&xsk->fill, i) = addr;
+ }
+ xsk_ring_prod__submit(&xsk->fill, ret);
+
+ return 0;
+}
+
+static void close_xsk(struct xsk *xsk)
+{
+ if (xsk->umem)
+ xsk_umem__delete(xsk->umem);
+ if (xsk->socket)
+ xsk_socket__delete(xsk->socket);
+ munmap(xsk->umem_area, UMEM_SIZE);
+}
+
+static void refill_rx(struct xsk *xsk, __u64 addr)
+{
+ __u32 idx;
+
+ if (xsk_ring_prod__reserve(&xsk->fill, 1, &idx) == 1) {
+ printf("%p: complete idx=%u addr=%llx\n", xsk, idx, addr);
+ *xsk_ring_prod__fill_addr(&xsk->fill, idx) = addr;
+ xsk_ring_prod__submit(&xsk->fill, 1);
+ }
+}
+
+static void verify_xdp_metadata(void *data)
+{
+ struct xdp_meta *meta;
+
+ meta = data - sizeof(*meta);
+
+ printf("rx_timestamp: %llu\n", meta->rx_timestamp);
+ printf("rx_hash: %u\n", meta->rx_hash);
+}
+
+static void verify_skb_metadata(int fd)
+{
+ char cmsg_buf[1024];
+ char packet_buf[128];
+
+ struct scm_timestamping *ts;
+ struct iovec packet_iov;
+ struct cmsghdr *cmsg;
+ struct msghdr hdr;
+
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.msg_iov = &packet_iov;
+ hdr.msg_iovlen = 1;
+ packet_iov.iov_base = packet_buf;
+ packet_iov.iov_len = sizeof(packet_buf);
+
+ hdr.msg_control = cmsg_buf;
+ hdr.msg_controllen = sizeof(cmsg_buf);
+
+ if (recvmsg(fd, &hdr, 0) < 0)
+ error(1, errno, "recvmsg");
+
+ for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(&hdr, cmsg)) {
+
+ if (cmsg->cmsg_level != SOL_SOCKET)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case SCM_TIMESTAMPING:
+ ts = (struct scm_timestamping *)CMSG_DATA(cmsg);
+ if (ts->ts[2].tv_sec || ts->ts[2].tv_nsec) {
+ printf("found skb hwtstamp = %lu.%lu\n",
+ ts->ts[2].tv_sec, ts->ts[2].tv_nsec);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ printf("skb hwtstamp is not found!\n");
+}
+
+static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd)
+{
+ const struct xdp_desc *rx_desc;
+ struct pollfd fds[rxq + 1];
+ __u64 comp_addr;
+ __u64 addr;
+ __u32 idx;
+ int ret;
+ int i;
+
+ for (i = 0; i < rxq; i++) {
+ fds[i].fd = xsk_socket__fd(rx_xsk[i].socket);
+ fds[i].events = POLLIN;
+ fds[i].revents = 0;
+ }
+
+ fds[rxq].fd = server_fd;
+ fds[rxq].events = POLLIN;
+ fds[rxq].revents = 0;
+
+ while (true) {
+ errno = 0;
+ ret = poll(fds, rxq + 1, 1000);
+ printf("poll: %d (%d)\n", ret, errno);
+ if (ret < 0)
+ break;
+ if (ret == 0)
+ continue;
+
+ if (fds[rxq].revents)
+ verify_skb_metadata(server_fd);
+
+ for (i = 0; i < rxq; i++) {
+ if (fds[i].revents == 0)
+ continue;
+
+ struct xsk *xsk = &rx_xsk[i];
+
+ ret = xsk_ring_cons__peek(&xsk->rx, 1, &idx);
+ printf("xsk_ring_cons__peek: %d\n", ret);
+ if (ret != 1)
+ continue;
+
+ rx_desc = xsk_ring_cons__rx_desc(&xsk->rx, idx);
+ comp_addr = xsk_umem__extract_addr(rx_desc->addr);
+ addr = xsk_umem__add_offset_to_addr(rx_desc->addr);
+ printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n",
+ xsk, idx, rx_desc->addr, addr, comp_addr);
+ verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr));
+ xsk_ring_cons__release(&xsk->rx, 1);
+ refill_rx(xsk, comp_addr);
+ }
+ }
+
+ return 0;
+}
+
+struct ethtool_channels {
+ __u32 cmd;
+ __u32 max_rx;
+ __u32 max_tx;
+ __u32 max_other;
+ __u32 max_combined;
+ __u32 rx_count;
+ __u32 tx_count;
+ __u32 other_count;
+ __u32 combined_count;
+};
+
+#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */
+
+static int rxq_num(const char *ifname)
+{
+ struct ethtool_channels ch = {
+ .cmd = ETHTOOL_GCHANNELS,
+ };
+
+ struct ifreq ifr = {
+ .ifr_data = (void *)&ch,
+ };
+ strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1);
+ int fd, ret;
+
+ fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+ if (fd < 0)
+ error(1, errno, "socket");
+
+ ret = ioctl(fd, SIOCETHTOOL, &ifr);
+ if (ret < 0)
+ error(1, errno, "ioctl(SIOCETHTOOL)");
+
+ close(fd);
+
+ return ch.rx_count + ch.combined_count;
+}
+
+static void hwtstamp_ioctl(int op, const char *ifname, struct hwtstamp_config *cfg)
+{
+ struct ifreq ifr = {
+ .ifr_data = (void *)cfg,
+ };
+ strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1);
+ int fd, ret;
+
+ fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+ if (fd < 0)
+ error(1, errno, "socket");
+
+ ret = ioctl(fd, op, &ifr);
+ if (ret < 0)
+ error(1, errno, "ioctl(%d)", op);
+
+ close(fd);
+}
+
+static struct hwtstamp_config saved_hwtstamp_cfg;
+static const char *saved_hwtstamp_ifname;
+
+static void hwtstamp_restore(void)
+{
+ hwtstamp_ioctl(SIOCSHWTSTAMP, saved_hwtstamp_ifname, &saved_hwtstamp_cfg);
+}
+
+static void hwtstamp_enable(const char *ifname)
+{
+ struct hwtstamp_config cfg = {
+ .rx_filter = HWTSTAMP_FILTER_ALL,
+ };
+
+ hwtstamp_ioctl(SIOCGHWTSTAMP, ifname, &saved_hwtstamp_cfg);
+ saved_hwtstamp_ifname = strdup(ifname);
+ atexit(hwtstamp_restore);
+
+ hwtstamp_ioctl(SIOCSHWTSTAMP, ifname, &cfg);
+}
+
+static void cleanup(void)
+{
+ LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
+ int ret;
+ int i;
+
+ if (bpf_obj) {
+ opts.old_prog_fd = bpf_program__fd(bpf_obj->progs.rx);
+ if (opts.old_prog_fd >= 0) {
+ printf("detaching bpf program....\n");
+ ret = bpf_xdp_detach(ifindex, XDP_FLAGS, &opts);
+ if (ret)
+ printf("failed to detach XDP program: %d\n", ret);
+ }
+ }
+
+ for (i = 0; i < rxq; i++)
+ close_xsk(&rx_xsk[i]);
+
+ if (bpf_obj)
+ xdp_hw_metadata__destroy(bpf_obj);
+}
+
+static void handle_signal(int sig)
+{
+ /* interrupting poll() is all we need */
+}
+
+static void timestamping_enable(int fd, int val)
+{
+ int ret;
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, &val, sizeof(val));
+ if (ret < 0)
+ error(1, errno, "setsockopt(SO_TIMESTAMPING)");
+}
+
+int main(int argc, char *argv[])
+{
+ int server_fd = -1;
+ int ret;
+ int i;
+
+ struct bpf_program *prog;
+
+ if (argc != 2) {
+ fprintf(stderr, "pass device name\n");
+ return -1;
+ }
+
+ ifname = argv[1];
+ ifindex = if_nametoindex(ifname);
+ rxq = rxq_num(ifname);
+
+ printf("rxq: %d\n", rxq);
+
+ hwtstamp_enable(ifname);
+
+ rx_xsk = malloc(sizeof(struct xsk) * rxq);
+ if (!rx_xsk)
+ error(1, ENOMEM, "malloc");
+
+ for (i = 0; i < rxq; i++) {
+ printf("open_xsk(%s, %p, %d)\n", ifname, &rx_xsk[i], i);
+ ret = open_xsk(ifindex, &rx_xsk[i], i);
+ if (ret)
+ error(1, -ret, "open_xsk");
+
+ printf("xsk_socket__fd() -> %d\n", xsk_socket__fd(rx_xsk[i].socket));
+ }
+
+ printf("open bpf program...\n");
+ bpf_obj = xdp_hw_metadata__open();
+ if (libbpf_get_error(bpf_obj))
+ error(1, libbpf_get_error(bpf_obj), "xdp_hw_metadata__open");
+
+ prog = bpf_object__find_program_by_name(bpf_obj->obj, "rx");
+ bpf_program__set_ifindex(prog, ifindex);
+ bpf_program__set_flags(prog, BPF_F_XDP_DEV_BOUND_ONLY);
+
+ printf("load bpf program...\n");
+ ret = xdp_hw_metadata__load(bpf_obj);
+ if (ret)
+ error(1, -ret, "xdp_hw_metadata__load");
+
+ printf("prepare skb endpoint...\n");
+ server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 9092, 1000);
+ if (server_fd < 0)
+ error(1, errno, "start_server");
+ timestamping_enable(server_fd,
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE);
+
+ printf("prepare xsk map...\n");
+ for (i = 0; i < rxq; i++) {
+ int sock_fd = xsk_socket__fd(rx_xsk[i].socket);
+ __u32 queue_id = i;
+
+ printf("map[%d] = %d\n", queue_id, sock_fd);
+ ret = bpf_map_update_elem(bpf_map__fd(bpf_obj->maps.xsk), &queue_id, &sock_fd, 0);
+ if (ret)
+ error(1, -ret, "bpf_map_update_elem");
+ }
+
+ printf("attach bpf program...\n");
+ ret = bpf_xdp_attach(ifindex,
+ bpf_program__fd(bpf_obj->progs.rx),
+ XDP_FLAGS, NULL);
+ if (ret)
+ error(1, -ret, "bpf_xdp_attach");
+
+ signal(SIGINT, handle_signal);
+ ret = verify_metadata(rx_xsk, rxq, server_fd);
+ close(server_fd);
+ cleanup();
+ if (ret)
+ error(1, -ret, "verify_metadata");
+}
diff --git a/tools/testing/selftests/bpf/xdp_metadata.h b/tools/testing/selftests/bpf/xdp_metadata.h
new file mode 100644
index 000000000000..f6780fbb0a21
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_metadata.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#pragma once
+
+#ifndef ETH_P_IP
+#define ETH_P_IP 0x0800
+#endif
+
+#ifndef ETH_P_IPV6
+#define ETH_P_IPV6 0x86DD
+#endif
+
+struct xdp_meta {
+ __u64 rx_timestamp;
+ __u32 rx_hash;
+};
diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c
index 410a1385a01d..ce68c342b56f 100644
--- a/tools/testing/selftests/bpf/xdp_synproxy.c
+++ b/tools/testing/selftests/bpf/xdp_synproxy.c
@@ -116,6 +116,7 @@ static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *
*tcpipopts = 0;
*ports = NULL;
*single = false;
+ *tc = false;
while (true) {
int opt;
@@ -216,9 +217,10 @@ static int syncookie_attach(const char *argv0, unsigned int ifindex, bool tc)
prog_fd = bpf_program__fd(prog);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err < 0) {
- fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err));
+ fprintf(stderr, "Error: bpf_prog_get_info_by_fd: %s\n",
+ strerror(-err));
goto out;
}
attached_tc = tc;
@@ -291,9 +293,10 @@ static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports
};
info_len = sizeof(prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
if (err != 0) {
- fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err));
+ fprintf(stderr, "Error: bpf_prog_get_info_by_fd: %s\n",
+ strerror(-err));
goto out;
}
@@ -316,9 +319,10 @@ static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports
map_fd = err;
info_len = sizeof(map_info);
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
if (err != 0) {
- fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err));
+ fprintf(stderr, "Error: bpf_map_get_info_by_fd: %s\n",
+ strerror(-err));
close(map_fd);
goto err_close_map_fds;
}
diff --git a/tools/testing/selftests/bpf/xsk.c b/tools/testing/selftests/bpf/xsk.c
index 39d349509ba4..687d83e707f8 100644
--- a/tools/testing/selftests/bpf/xsk.c
+++ b/tools/testing/selftests/bpf/xsk.c
@@ -49,10 +49,7 @@
#define pr_warn(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
-enum xsk_prog {
- XSK_PROG_FALLBACK,
- XSK_PROG_REDIRECT_FLAGS,
-};
+#define XSKMAP_SIZE 1
struct xsk_umem {
struct xsk_ring_prod *fill_save;
@@ -74,43 +71,16 @@ struct xsk_ctx {
int refcount;
int ifindex;
struct list_head list;
- int prog_fd;
- int link_fd;
- int xsks_map_fd;
- char ifname[IFNAMSIZ];
- bool has_bpf_link;
};
struct xsk_socket {
struct xsk_ring_cons *rx;
struct xsk_ring_prod *tx;
- __u64 outstanding_tx;
struct xsk_ctx *ctx;
struct xsk_socket_config config;
int fd;
};
-struct xsk_nl_info {
- bool xdp_prog_attached;
- int ifindex;
- int fd;
-};
-
-/* Up until and including Linux 5.3 */
-struct xdp_ring_offset_v1 {
- __u64 producer;
- __u64 consumer;
- __u64 desc;
-};
-
-/* Up until and including Linux 5.3 */
-struct xdp_mmap_offsets_v1 {
- struct xdp_ring_offset_v1 rx;
- struct xdp_ring_offset_v1 tx;
- struct xdp_ring_offset_v1 fr;
- struct xdp_ring_offset_v1 cr;
-};
-
int xsk_umem__fd(const struct xsk_umem *umem)
{
return umem ? umem->fd : -EINVAL;
@@ -153,55 +123,17 @@ static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
if (!usr_cfg) {
cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
- cfg->libbpf_flags = 0;
- cfg->xdp_flags = 0;
cfg->bind_flags = 0;
return 0;
}
- if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
- return -EINVAL;
-
cfg->rx_size = usr_cfg->rx_size;
cfg->tx_size = usr_cfg->tx_size;
- cfg->libbpf_flags = usr_cfg->libbpf_flags;
- cfg->xdp_flags = usr_cfg->xdp_flags;
cfg->bind_flags = usr_cfg->bind_flags;
return 0;
}
-static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
-{
- struct xdp_mmap_offsets_v1 off_v1;
-
- /* getsockopt on a kernel <= 5.3 has no flags fields.
- * Copy over the offsets to the correct places in the >=5.4 format
- * and put the flags where they would have been on that kernel.
- */
- memcpy(&off_v1, off, sizeof(off_v1));
-
- off->rx.producer = off_v1.rx.producer;
- off->rx.consumer = off_v1.rx.consumer;
- off->rx.desc = off_v1.rx.desc;
- off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
-
- off->tx.producer = off_v1.tx.producer;
- off->tx.consumer = off_v1.tx.consumer;
- off->tx.desc = off_v1.tx.desc;
- off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
-
- off->fr.producer = off_v1.fr.producer;
- off->fr.consumer = off_v1.fr.consumer;
- off->fr.desc = off_v1.fr.desc;
- off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
-
- off->cr.producer = off_v1.cr.producer;
- off->cr.consumer = off_v1.cr.consumer;
- off->cr.desc = off_v1.cr.desc;
- off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
-}
-
static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
{
socklen_t optlen;
@@ -215,11 +147,6 @@ static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
if (optlen == sizeof(*off))
return 0;
- if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
- xsk_mmap_offsets_v1(off);
- return 0;
- }
-
return -EINVAL;
}
@@ -340,531 +267,56 @@ out_umem_alloc:
return err;
}
-struct xsk_umem_config_v1 {
- __u32 fill_size;
- __u32 comp_size;
- __u32 frame_size;
- __u32 frame_headroom;
-};
-
-static enum xsk_prog get_xsk_prog(void)
-{
- enum xsk_prog detected = XSK_PROG_FALLBACK;
- char data_in = 0, data_out;
- struct bpf_insn insns[] = {
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, XDP_PASS),
- BPF_EMIT_CALL(BPF_FUNC_redirect_map),
- BPF_EXIT_INSN(),
- };
- LIBBPF_OPTS(bpf_test_run_opts, opts,
- .data_in = &data_in,
- .data_size_in = 1,
- .data_out = &data_out,
- );
-
- int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns);
-
- map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, NULL, sizeof(int), sizeof(int), 1, NULL);
- if (map_fd < 0)
- return detected;
-
- insns[0].imm = map_fd;
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
- if (prog_fd < 0) {
- close(map_fd);
- return detected;
- }
-
- ret = bpf_prog_test_run_opts(prog_fd, &opts);
- if (!ret && opts.retval == XDP_PASS)
- detected = XSK_PROG_REDIRECT_FLAGS;
- close(prog_fd);
- close(map_fd);
- return detected;
-}
-
-static int xsk_load_xdp_prog(struct xsk_socket *xsk)
-{
- static const int log_buf_size = 16 * 1024;
- struct xsk_ctx *ctx = xsk->ctx;
- char log_buf[log_buf_size];
- int prog_fd;
-
- /* This is the fallback C-program:
- * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
- * {
- * int ret, index = ctx->rx_queue_index;
- *
- * // A set entry here means that the correspnding queue_id
- * // has an active AF_XDP socket bound to it.
- * ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
- * if (ret > 0)
- * return ret;
- *
- * // Fallback for pre-5.3 kernels, not supporting default
- * // action in the flags parameter.
- * if (bpf_map_lookup_elem(&xsks_map, &index))
- * return bpf_redirect_map(&xsks_map, index, 0);
- * return XDP_PASS;
- * }
- */
- struct bpf_insn prog[] = {
- /* r2 = *(u32 *)(r1 + 16) */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
- /* *(u32 *)(r10 - 4) = r2 */
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
- /* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
- /* r3 = XDP_PASS */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- /* call bpf_redirect_map */
- BPF_EMIT_CALL(BPF_FUNC_redirect_map),
- /* if w0 != 0 goto pc+13 */
- BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
- /* r2 = r10 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- /* r2 += -4 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- /* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
- /* call bpf_map_lookup_elem */
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- /* r1 = r0 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- /* r0 = XDP_PASS */
- BPF_MOV64_IMM(BPF_REG_0, 2),
- /* if r1 == 0 goto pc+5 */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
- /* r2 = *(u32 *)(r10 - 4) */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
- /* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
- /* r3 = 0 */
- BPF_MOV64_IMM(BPF_REG_3, 0),
- /* call bpf_redirect_map */
- BPF_EMIT_CALL(BPF_FUNC_redirect_map),
- /* The jumps are to this instruction */
- BPF_EXIT_INSN(),
- };
-
- /* This is the post-5.3 kernel C-program:
- * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
- * {
- * return bpf_redirect_map(&xsks_map, ctx->rx_queue_index, XDP_PASS);
- * }
- */
- struct bpf_insn prog_redirect_flags[] = {
- /* r2 = *(u32 *)(r1 + 16) */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
- /* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
- /* r3 = XDP_PASS */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- /* call bpf_redirect_map */
- BPF_EMIT_CALL(BPF_FUNC_redirect_map),
- BPF_EXIT_INSN(),
- };
- size_t insns_cnt[] = {ARRAY_SIZE(prog),
- ARRAY_SIZE(prog_redirect_flags),
- };
- struct bpf_insn *progs[] = {prog, prog_redirect_flags};
- enum xsk_prog option = get_xsk_prog();
- LIBBPF_OPTS(bpf_prog_load_opts, opts,
- .log_buf = log_buf,
- .log_size = log_buf_size,
- );
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "LGPL-2.1 or BSD-2-Clause",
- progs[option], insns_cnt[option], &opts);
- if (prog_fd < 0) {
- pr_warn("BPF log buffer:\n%s", log_buf);
- return prog_fd;
- }
-
- ctx->prog_fd = prog_fd;
- return 0;
-}
-
-static int xsk_create_bpf_link(struct xsk_socket *xsk)
-{
- DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
- struct xsk_ctx *ctx = xsk->ctx;
- __u32 prog_id = 0;
- int link_fd;
- int err;
-
- err = bpf_xdp_query_id(ctx->ifindex, xsk->config.xdp_flags, &prog_id);
- if (err) {
- pr_warn("getting XDP prog id failed\n");
- return err;
- }
-
- /* if there's a netlink-based XDP prog loaded on interface, bail out
- * and ask user to do the removal by himself
- */
- if (prog_id) {
- pr_warn("Netlink-based XDP prog detected, please unload it in order to launch AF_XDP prog\n");
- return -EINVAL;
- }
-
- opts.flags = xsk->config.xdp_flags & ~(XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_REPLACE);
-
- link_fd = bpf_link_create(ctx->prog_fd, ctx->ifindex, BPF_XDP, &opts);
- if (link_fd < 0) {
- pr_warn("bpf_link_create failed: %s\n", strerror(errno));
- return link_fd;
- }
-
- ctx->link_fd = link_fd;
- return 0;
-}
-
-static int xsk_get_max_queues(struct xsk_socket *xsk)
-{
- struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
- struct xsk_ctx *ctx = xsk->ctx;
- struct ifreq ifr = {};
- int fd, err, ret;
-
- fd = socket(AF_LOCAL, SOCK_DGRAM | SOCK_CLOEXEC, 0);
- if (fd < 0)
- return -errno;
-
- ifr.ifr_data = (void *)&channels;
- bpf_strlcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ);
- err = ioctl(fd, SIOCETHTOOL, &ifr);
- if (err && errno != EOPNOTSUPP) {
- ret = -errno;
- goto out;
- }
-
- if (err) {
- /* If the device says it has no channels, then all traffic
- * is sent to a single stream, so max queues = 1.
- */
- ret = 1;
- } else {
- /* Take the max of rx, tx, combined. Drivers return
- * the number of channels in different ways.
- */
- ret = max(channels.max_rx, channels.max_tx);
- ret = max(ret, (int)channels.max_combined);
- }
-
-out:
- close(fd);
- return ret;
-}
-
-static int xsk_create_bpf_maps(struct xsk_socket *xsk)
-{
- struct xsk_ctx *ctx = xsk->ctx;
- int max_queues;
- int fd;
-
- max_queues = xsk_get_max_queues(xsk);
- if (max_queues < 0)
- return max_queues;
-
- fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, "xsks_map",
- sizeof(int), sizeof(int), max_queues, NULL);
- if (fd < 0)
- return fd;
-
- ctx->xsks_map_fd = fd;
-
- return 0;
-}
-
-static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
-{
- struct xsk_ctx *ctx = xsk->ctx;
-
- bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id);
- close(ctx->xsks_map_fd);
-}
-
-static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
-{
- __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
- __u32 map_len = sizeof(struct bpf_map_info);
- struct bpf_prog_info prog_info = {};
- struct xsk_ctx *ctx = xsk->ctx;
- struct bpf_map_info map_info;
- int fd, err;
-
- err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
- if (err)
- return err;
-
- num_maps = prog_info.nr_map_ids;
-
- map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
- if (!map_ids)
- return -ENOMEM;
-
- memset(&prog_info, 0, prog_len);
- prog_info.nr_map_ids = num_maps;
- prog_info.map_ids = (__u64)(unsigned long)map_ids;
-
- err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
- if (err)
- goto out_map_ids;
-
- ctx->xsks_map_fd = -1;
-
- for (i = 0; i < prog_info.nr_map_ids; i++) {
- fd = bpf_map_get_fd_by_id(map_ids[i]);
- if (fd < 0)
- continue;
-
- memset(&map_info, 0, map_len);
- err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
- if (err) {
- close(fd);
- continue;
- }
-
- if (!strncmp(map_info.name, "xsks_map", sizeof(map_info.name))) {
- ctx->xsks_map_fd = fd;
- break;
- }
-
- close(fd);
- }
-
- if (ctx->xsks_map_fd == -1)
- err = -ENOENT;
-
-out_map_ids:
- free(map_ids);
- return err;
-}
-
-static int xsk_set_bpf_maps(struct xsk_socket *xsk)
-{
- struct xsk_ctx *ctx = xsk->ctx;
-
- return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id,
- &xsk->fd, 0);
-}
-
-static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd)
+bool xsk_is_in_mode(u32 ifindex, int mode)
{
- struct bpf_link_info link_info;
- __u32 link_len;
- __u32 id = 0;
- int err;
- int fd;
-
- while (true) {
- err = bpf_link_get_next_id(id, &id);
- if (err) {
- if (errno == ENOENT) {
- err = 0;
- break;
- }
- pr_warn("can't get next link: %s\n", strerror(errno));
- break;
- }
-
- fd = bpf_link_get_fd_by_id(id);
- if (fd < 0) {
- if (errno == ENOENT)
- continue;
- pr_warn("can't get link by id (%u): %s\n", id, strerror(errno));
- err = -errno;
- break;
- }
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
+ int ret;
- link_len = sizeof(struct bpf_link_info);
- memset(&link_info, 0, link_len);
- err = bpf_obj_get_info_by_fd(fd, &link_info, &link_len);
- if (err) {
- pr_warn("can't get link info: %s\n", strerror(errno));
- close(fd);
- break;
- }
- if (link_info.type == BPF_LINK_TYPE_XDP) {
- if (link_info.xdp.ifindex == ifindex) {
- *link_fd = fd;
- if (prog_id)
- *prog_id = link_info.prog_id;
- break;
- }
- }
- close(fd);
+ ret = bpf_xdp_query(ifindex, mode, &opts);
+ if (ret) {
+ printf("XDP mode query returned error %s\n", strerror(errno));
+ return false;
}
- return err;
-}
+ if (mode == XDP_FLAGS_DRV_MODE)
+ return opts.attach_mode == XDP_ATTACHED_DRV;
+ else if (mode == XDP_FLAGS_SKB_MODE)
+ return opts.attach_mode == XDP_ATTACHED_SKB;
-static bool xsk_probe_bpf_link(void)
-{
- LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = XDP_FLAGS_SKB_MODE);
- struct bpf_insn insns[2] = {
- BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
- BPF_EXIT_INSN()
- };
- int prog_fd, link_fd = -1, insn_cnt = ARRAY_SIZE(insns);
- int ifindex_lo = 1;
- bool ret = false;
- int err;
-
- err = xsk_link_lookup(ifindex_lo, NULL, &link_fd);
- if (err)
- return ret;
-
- if (link_fd >= 0)
- return true;
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
- if (prog_fd < 0)
- return ret;
-
- link_fd = bpf_link_create(prog_fd, ifindex_lo, BPF_XDP, &opts);
- close(prog_fd);
-
- if (link_fd >= 0) {
- ret = true;
- close(link_fd);
- }
-
- return ret;
+ return false;
}
-static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
+int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags)
{
- char ifname[IFNAMSIZ];
- struct xsk_ctx *ctx;
- char *interface;
-
- ctx = calloc(1, sizeof(*ctx));
- if (!ctx)
- return -ENOMEM;
-
- interface = if_indextoname(ifindex, &ifname[0]);
- if (!interface) {
- free(ctx);
- return -errno;
- }
-
- ctx->ifindex = ifindex;
- bpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
-
- xsk->ctx = ctx;
- xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
+ int prog_fd;
- return 0;
+ prog_fd = bpf_program__fd(prog);
+ return bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL);
}
-static int xsk_init_xdp_res(struct xsk_socket *xsk,
- int *xsks_map_fd)
+void xsk_detach_xdp_program(int ifindex, u32 xdp_flags)
{
- struct xsk_ctx *ctx = xsk->ctx;
- int err;
-
- err = xsk_create_bpf_maps(xsk);
- if (err)
- return err;
-
- err = xsk_load_xdp_prog(xsk);
- if (err)
- goto err_load_xdp_prog;
-
- if (ctx->has_bpf_link)
- err = xsk_create_bpf_link(xsk);
- else
- err = bpf_xdp_attach(xsk->ctx->ifindex, ctx->prog_fd,
- xsk->config.xdp_flags, NULL);
-
- if (err)
- goto err_attach_xdp_prog;
-
- if (!xsk->rx)
- return err;
-
- err = xsk_set_bpf_maps(xsk);
- if (err)
- goto err_set_bpf_maps;
-
- return err;
-
-err_set_bpf_maps:
- if (ctx->has_bpf_link)
- close(ctx->link_fd);
- else
- bpf_xdp_detach(ctx->ifindex, 0, NULL);
-err_attach_xdp_prog:
- close(ctx->prog_fd);
-err_load_xdp_prog:
- xsk_delete_bpf_maps(xsk);
- return err;
+ bpf_xdp_detach(ifindex, xdp_flags, NULL);
}
-static int xsk_lookup_xdp_res(struct xsk_socket *xsk, int *xsks_map_fd, int prog_id)
+void xsk_clear_xskmap(struct bpf_map *map)
{
- struct xsk_ctx *ctx = xsk->ctx;
- int err;
-
- ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
- if (ctx->prog_fd < 0) {
- err = -errno;
- goto err_prog_fd;
- }
- err = xsk_lookup_bpf_maps(xsk);
- if (err)
- goto err_lookup_maps;
-
- if (!xsk->rx)
- return err;
-
- err = xsk_set_bpf_maps(xsk);
- if (err)
- goto err_set_maps;
+ u32 index = 0;
+ int map_fd;
- return err;
-
-err_set_maps:
- close(ctx->xsks_map_fd);
-err_lookup_maps:
- close(ctx->prog_fd);
-err_prog_fd:
- if (ctx->has_bpf_link)
- close(ctx->link_fd);
- return err;
+ map_fd = bpf_map__fd(map);
+ bpf_map_delete_elem(map_fd, &index);
}
-static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp, int *xsks_map_fd)
+int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk)
{
- struct xsk_socket *xsk = _xdp;
- struct xsk_ctx *ctx = xsk->ctx;
- __u32 prog_id = 0;
- int err;
+ int map_fd, sock_fd;
+ u32 index = 0;
- if (ctx->has_bpf_link)
- err = xsk_link_lookup(ctx->ifindex, &prog_id, &ctx->link_fd);
- else
- err = bpf_xdp_query_id(ctx->ifindex, xsk->config.xdp_flags, &prog_id);
+ map_fd = bpf_map__fd(map);
+ sock_fd = xsk_socket__fd(xsk);
- if (err)
- return err;
-
- err = !prog_id ? xsk_init_xdp_res(xsk, xsks_map_fd) :
- xsk_lookup_xdp_res(xsk, xsks_map_fd, prog_id);
-
- if (!err && xsks_map_fd)
- *xsks_map_fd = ctx->xsks_map_fd;
-
- return err;
-}
-
-int xsk_setup_xdp_prog_xsk(struct xsk_socket *xsk, int *xsks_map_fd)
-{
- return __xsk_setup_xdp_prog(xsk, xsks_map_fd);
+ return bpf_map_update_elem(map_fd, &index, &sock_fd, 0);
}
static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
@@ -913,7 +365,7 @@ out_free:
static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
struct xsk_umem *umem, int ifindex,
- const char *ifname, __u32 queue_id,
+ __u32 queue_id,
struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp)
{
@@ -940,51 +392,15 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
ctx->refcount = 1;
ctx->umem = umem;
ctx->queue_id = queue_id;
- bpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
ctx->fill = fill;
ctx->comp = comp;
list_add(&ctx->list, &umem->ctx_list);
- ctx->has_bpf_link = xsk_probe_bpf_link();
return ctx;
}
-static void xsk_destroy_xsk_struct(struct xsk_socket *xsk)
-{
- free(xsk->ctx);
- free(xsk);
-}
-
-int xsk_socket__update_xskmap(struct xsk_socket *xsk, int fd)
-{
- xsk->ctx->xsks_map_fd = fd;
- return xsk_set_bpf_maps(xsk);
-}
-
-int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd)
-{
- struct xsk_socket *xsk;
- int res;
-
- xsk = calloc(1, sizeof(*xsk));
- if (!xsk)
- return -ENOMEM;
-
- res = xsk_create_xsk_struct(ifindex, xsk);
- if (res) {
- free(xsk);
- return -EINVAL;
- }
-
- res = __xsk_setup_xdp_prog(xsk, xsks_map_fd);
-
- xsk_destroy_xsk_struct(xsk);
-
- return res;
-}
-
int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
- const char *ifname,
+ int ifindex,
__u32 queue_id, struct xsk_umem *umem,
struct xsk_ring_cons *rx,
struct xsk_ring_prod *tx,
@@ -998,7 +414,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
struct xdp_mmap_offsets off;
struct xsk_socket *xsk;
struct xsk_ctx *ctx;
- int err, ifindex;
+ int err;
if (!umem || !xsk_ptr || !(rx || tx))
return -EFAULT;
@@ -1013,13 +429,6 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
if (err)
goto out_xsk_alloc;
- xsk->outstanding_tx = 0;
- ifindex = if_nametoindex(ifname);
- if (!ifindex) {
- err = -errno;
- goto out_xsk_alloc;
- }
-
if (umem->refcount++ > 0) {
xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
if (xsk->fd < 0) {
@@ -1039,8 +448,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
goto out_socket;
}
- ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id,
- fill, comp);
+ ctx = xsk_create_ctx(xsk, umem, ifindex, queue_id, fill, comp);
if (!ctx) {
err = -ENOMEM;
goto out_socket;
@@ -1138,12 +546,6 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
goto out_mmap_tx;
}
- if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
- err = __xsk_setup_xdp_prog(xsk, NULL);
- if (err)
- goto out_mmap_tx;
- }
-
*xsk_ptr = xsk;
umem->fill_save = NULL;
umem->comp_save = NULL;
@@ -1167,7 +569,7 @@ out_xsk_alloc:
return err;
}
-int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+int xsk_socket__create(struct xsk_socket **xsk_ptr, int ifindex,
__u32 queue_id, struct xsk_umem *umem,
struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
const struct xsk_socket_config *usr_config)
@@ -1175,7 +577,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
if (!umem)
return -EFAULT;
- return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
+ return xsk_socket__create_shared(xsk_ptr, ifindex, queue_id, umem,
rx, tx, umem->fill_save,
umem->comp_save, usr_config);
}
@@ -1219,13 +621,6 @@ void xsk_socket__delete(struct xsk_socket *xsk)
ctx = xsk->ctx;
umem = ctx->umem;
- if (ctx->refcount == 1) {
- xsk_delete_bpf_maps(xsk);
- close(ctx->prog_fd);
- if (ctx->has_bpf_link)
- close(ctx->link_fd);
- }
-
xsk_put_ctx(ctx, true);
err = xsk_get_mmap_offsets(xsk->fd, &off);
diff --git a/tools/testing/selftests/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h
index 997723b0bfb2..04ed8b544712 100644
--- a/tools/testing/selftests/bpf/xsk.h
+++ b/tools/testing/selftests/bpf/xsk.h
@@ -23,77 +23,6 @@
extern "C" {
#endif
-/* This whole API has been deprecated and moved to libxdp that can be found at
- * https://github.com/xdp-project/xdp-tools. The APIs are exactly the same so
- * it should just be linking with libxdp instead of libbpf for this set of
- * functionality. If not, please submit a bug report on the aforementioned page.
- */
-
-/* Load-Acquire Store-Release barriers used by the XDP socket
- * library. The following macros should *NOT* be considered part of
- * the xsk.h API, and is subject to change anytime.
- *
- * LIBRARY INTERNAL
- */
-
-#define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x)
-#define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
-
-#if defined(__i386__) || defined(__x86_64__)
-# define libbpf_smp_store_release(p, v) \
- do { \
- asm volatile("" : : : "memory"); \
- __XSK_WRITE_ONCE(*p, v); \
- } while (0)
-# define libbpf_smp_load_acquire(p) \
- ({ \
- typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
- asm volatile("" : : : "memory"); \
- ___p1; \
- })
-#elif defined(__aarch64__)
-# define libbpf_smp_store_release(p, v) \
- asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
-# define libbpf_smp_load_acquire(p) \
- ({ \
- typeof(*p) ___p1; \
- asm volatile ("ldar %w0, %1" \
- : "=r" (___p1) : "Q" (*p) : "memory"); \
- ___p1; \
- })
-#elif defined(__riscv)
-# define libbpf_smp_store_release(p, v) \
- do { \
- asm volatile ("fence rw,w" : : : "memory"); \
- __XSK_WRITE_ONCE(*p, v); \
- } while (0)
-# define libbpf_smp_load_acquire(p) \
- ({ \
- typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
- asm volatile ("fence r,rw" : : : "memory"); \
- ___p1; \
- })
-#endif
-
-#ifndef libbpf_smp_store_release
-#define libbpf_smp_store_release(p, v) \
- do { \
- __sync_synchronize(); \
- __XSK_WRITE_ONCE(*p, v); \
- } while (0)
-#endif
-
-#ifndef libbpf_smp_load_acquire
-#define libbpf_smp_load_acquire(p) \
- ({ \
- typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
- __sync_synchronize(); \
- ___p1; \
- })
-#endif
-
-/* LIBRARY INTERNAL -- END */
-
/* Do not access these members directly. Use the functions below. */
#define DEFINE_XSK_RING(name) \
struct name { \
@@ -168,7 +97,7 @@ static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
* this function. Without this optimization it whould have been
* free_entries = r->cached_prod - r->cached_cons + r->size.
*/
- r->cached_cons = libbpf_smp_load_acquire(r->consumer);
+ r->cached_cons = __atomic_load_n(r->consumer, __ATOMIC_ACQUIRE);
r->cached_cons += r->size;
return r->cached_cons - r->cached_prod;
@@ -179,7 +108,7 @@ static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
__u32 entries = r->cached_prod - r->cached_cons;
if (entries == 0) {
- r->cached_prod = libbpf_smp_load_acquire(r->producer);
+ r->cached_prod = __atomic_load_n(r->producer, __ATOMIC_ACQUIRE);
entries = r->cached_prod - r->cached_cons;
}
@@ -202,7 +131,7 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
/* Make sure everything has been written to the ring before indicating
* this to the kernel by writing the producer pointer.
*/
- libbpf_smp_store_release(prod->producer, *prod->producer + nb);
+ __atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE);
}
static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
@@ -227,8 +156,7 @@ static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
/* Make sure data has been read before indicating we are done
* with the entries by updating the consumer pointer.
*/
- libbpf_smp_store_release(cons->consumer, *cons->consumer + nb);
-
+ __atomic_store_n(cons->consumer, *cons->consumer + nb, __ATOMIC_RELEASE);
}
static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
@@ -269,18 +197,15 @@ struct xsk_umem_config {
__u32 flags;
};
-int xsk_setup_xdp_prog_xsk(struct xsk_socket *xsk, int *xsks_map_fd);
-int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd);
-int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd);
-
-/* Flags for the libbpf_flags field. */
-#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
+int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags);
+void xsk_detach_xdp_program(int ifindex, u32 xdp_flags);
+int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk);
+void xsk_clear_xskmap(struct bpf_map *map);
+bool xsk_is_in_mode(u32 ifindex, int mode);
struct xsk_socket_config {
__u32 rx_size;
__u32 tx_size;
- __u32 libbpf_flags;
- __u32 xdp_flags;
__u16 bind_flags;
};
@@ -291,13 +216,13 @@ int xsk_umem__create(struct xsk_umem **umem,
struct xsk_ring_cons *comp,
const struct xsk_umem_config *config);
int xsk_socket__create(struct xsk_socket **xsk,
- const char *ifname, __u32 queue_id,
+ int ifindex, __u32 queue_id,
struct xsk_umem *umem,
struct xsk_ring_cons *rx,
struct xsk_ring_prod *tx,
const struct xsk_socket_config *config);
int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
- const char *ifname,
+ int ifindex,
__u32 queue_id, struct xsk_umem *umem,
struct xsk_ring_cons *rx,
struct xsk_ring_prod *tx,
diff --git a/tools/testing/selftests/bpf/xsk_prereqs.sh b/tools/testing/selftests/bpf/xsk_prereqs.sh
index a0b71723a818..ae697a10a056 100755
--- a/tools/testing/selftests/bpf/xsk_prereqs.sh
+++ b/tools/testing/selftests/bpf/xsk_prereqs.sh
@@ -55,21 +55,13 @@ test_exit()
clear_configs()
{
- if [ $(ip netns show | grep $3 &>/dev/null; echo $?;) == 0 ]; then
- [ $(ip netns exec $3 ip link show $2 &>/dev/null; echo $?;) == 0 ] &&
- { ip netns exec $3 ip link del $2; }
- ip netns del $3
- fi
- #Once we delete a veth pair node, the entire veth pair is removed,
- #this is just to be cautious just incase the NS does not exist then
- #veth node inside NS won't get removed so we explicitly remove it
[ $(ip link show $1 &>/dev/null; echo $?;) == 0 ] &&
{ ip link del $1; }
}
cleanup_exit()
{
- clear_configs $1 $2 $3
+ clear_configs $1 $2
}
validate_ip_utility()
@@ -83,7 +75,7 @@ exec_xskxceiver()
ARGS+="-b "
fi
- ./${XSKOBJ} -i ${VETH0} -i ${VETH1},${NS1} ${ARGS}
+ ./${XSKOBJ} -i ${VETH0} -i ${VETH1} ${ARGS}
retval=$?
test_status $retval "${TEST_NAME}"
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 162d3a516f2c..a17655107a94 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -55,12 +55,11 @@
* Flow:
* -----
* - Single process spawns two threads: Tx and Rx
- * - Each of these two threads attach to a veth interface within their assigned
- * namespaces
- * - Each thread Creates one AF_XDP socket connected to a unique umem for each
+ * - Each of these two threads attach to a veth interface
+ * - Each thread creates one AF_XDP socket connected to a unique umem for each
* veth interface
- * - Tx thread Transmits 10k packets from veth<xxxx> to veth<yyyy>
- * - Rx thread verifies if all 10k packets were received and delivered in-order,
+ * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy>
+ * - Rx thread verifies if all packets were received and delivered in-order,
* and have the right content
*
* Enable/disable packet dump mode:
@@ -97,18 +96,14 @@
#include <time.h>
#include <unistd.h>
#include <stdatomic.h>
+
+#include "xsk_xdp_progs.skel.h"
#include "xsk.h"
#include "xskxceiver.h"
#include <bpf/bpf.h>
#include <linux/filter.h>
#include "../kselftest.h"
-/* AF_XDP APIs were moved into libxdp and marked as deprecated in libbpf.
- * Until xskxceiver is either moved or re-writed into libxdp, suppress
- * deprecation warnings in this file
- */
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-
static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
static const char *IP1 = "192.168.100.162";
@@ -269,6 +264,11 @@ static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr)
udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr);
}
+static u32 mode_to_xdp_flags(enum test_mode mode)
+{
+ return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
+}
+
static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size)
{
struct xsk_umem_config cfg = {
@@ -322,15 +322,13 @@ static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_i
xsk->umem = umem;
cfg.rx_size = xsk->rxqsize;
cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
- cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
- cfg.xdp_flags = ifobject->xdp_flags;
cfg.bind_flags = ifobject->bind_flags;
if (shared)
cfg.bind_flags |= XDP_SHARED_UMEM;
txr = ifobject->tx_on ? &xsk->tx : NULL;
rxr = ifobject->rx_on ? &xsk->rx : NULL;
- return xsk_socket__create(&xsk->xsk, ifobject->ifname, 0, umem->umem, rxr, txr, &cfg);
+ return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
}
static bool ifobj_zc_avail(struct ifobject *ifobject)
@@ -350,7 +348,7 @@ static bool ifobj_zc_avail(struct ifobject *ifobject)
umem = calloc(1, sizeof(struct xsk_umem_info));
if (!umem) {
munmap(bufs, umem_sz);
- exit_with_error(-ENOMEM);
+ exit_with_error(ENOMEM);
}
umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
ret = xsk_configure_umem(umem, bufs, umem_sz);
@@ -360,8 +358,6 @@ static bool ifobj_zc_avail(struct ifobject *ifobject)
xsk = calloc(1, sizeof(struct xsk_socket_info));
if (!xsk)
goto out;
- ifobject->xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
- ifobject->xdp_flags |= XDP_FLAGS_DRV_MODE;
ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY;
ifobject->rx_on = true;
xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
@@ -399,28 +395,6 @@ static void usage(const char *prog)
ksft_print_msg(str, prog);
}
-static int switch_namespace(const char *nsname)
-{
- char fqns[26] = "/var/run/netns/";
- int nsfd;
-
- if (!nsname || strlen(nsname) == 0)
- return -1;
-
- strncat(fqns, nsname, sizeof(fqns) - strlen(fqns) - 1);
- nsfd = open(fqns, O_RDONLY);
-
- if (nsfd == -1)
- exit_with_error(errno);
-
- if (setns(nsfd, 0) == -1)
- exit_with_error(errno);
-
- print_verbose("NS switched: %s\n", nsname);
-
- return nsfd;
-}
-
static bool validate_interface(struct ifobject *ifobj)
{
if (!strcmp(ifobj->ifname, ""))
@@ -438,8 +412,6 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
opterr = 0;
for (;;) {
- char *sptr, *token;
-
c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index);
if (c == -1)
break;
@@ -453,11 +425,13 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
else
break;
- sptr = strndupa(optarg, strlen(optarg));
- memcpy(ifobj->ifname, strsep(&sptr, ","), MAX_INTERFACE_NAME_CHARS);
- token = strsep(&sptr, ",");
- if (token)
- memcpy(ifobj->nsname, token, MAX_INTERFACES_NAMESPACE_CHARS);
+ memcpy(ifobj->ifname, optarg,
+ min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg)));
+
+ ifobj->ifindex = if_nametoindex(ifobj->ifname);
+ if (!ifobj->ifindex)
+ exit_with_error(errno);
+
interface_nb++;
break;
case 'D':
@@ -520,6 +494,10 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
test->total_steps = 1;
test->nb_sockets = 1;
test->fail = false;
+ test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
+ test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
+ test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
+ test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
}
static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
@@ -538,12 +516,6 @@ static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
for (i = 0; i < MAX_INTERFACES; i++) {
struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
- ifobj->xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
- if (mode == TEST_MODE_SKB)
- ifobj->xdp_flags |= XDP_FLAGS_SKB_MODE;
- else
- ifobj->xdp_flags |= XDP_FLAGS_DRV_MODE;
-
ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
if (mode == TEST_MODE_ZC)
ifobj->bind_flags |= XDP_ZEROCOPY;
@@ -565,6 +537,16 @@ static void test_spec_set_name(struct test_spec *test, const char *name)
strncpy(test->name, name, MAX_TEST_NAME_SIZE);
}
+static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
+ struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
+ struct bpf_map *xskmap_tx)
+{
+ test->xdp_prog_rx = xdp_prog_rx;
+ test->xdp_prog_tx = xdp_prog_tx;
+ test->xskmap_rx = xskmap_rx;
+ test->xskmap_tx = xskmap_tx;
+}
+
static void pkt_stream_reset(struct pkt_stream *pkt_stream)
{
if (pkt_stream)
@@ -767,7 +749,7 @@ static void pkt_dump(void *pkt, u32 len)
struct ethhdr *ethhdr;
struct udphdr *udphdr;
struct iphdr *iphdr;
- int payload, i;
+ u32 payload, i;
ethhdr = pkt;
iphdr = pkt + sizeof(*ethhdr);
@@ -792,7 +774,7 @@ static void pkt_dump(void *pkt, u32 len)
fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source));
fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest));
/*extract L5 frame */
- payload = *((uint32_t *)(pkt + PKT_HDR_SIZE));
+ payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE)));
fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload);
fprintf(stdout, "---------------------------------------\n");
@@ -936,7 +918,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
if (ifobj->use_poll) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret < 0)
- exit_with_error(-ret);
+ exit_with_error(errno);
if (!ret) {
if (!is_umem_valid(test->ifobj_tx))
@@ -963,7 +945,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret < 0)
- exit_with_error(-ret);
+ exit_with_error(errno);
}
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
@@ -1015,7 +997,7 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd
if (timeout) {
if (ret < 0) {
ksft_print_msg("ERROR: [%s] Poll error %d\n",
- __func__, ret);
+ __func__, errno);
return TEST_FAILURE;
}
if (ret == 0)
@@ -1024,7 +1006,7 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd
}
if (ret <= 0) {
ksft_print_msg("ERROR: [%s] Poll error %d\n",
- __func__, ret);
+ __func__, errno);
return TEST_FAILURE;
}
}
@@ -1240,7 +1222,7 @@ static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobje
{
xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
ifobject->xsk = &ifobject->xsk_arr[0];
- ifobject->xsk_map_fd = test->ifobj_rx->xsk_map_fd;
+ ifobject->xskmap = test->ifobj_rx->xskmap;
memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
}
@@ -1272,7 +1254,7 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
}
- xsk_ring_prod__submit(&umem->fq, buffers_to_fill);
+ xsk_ring_prod__submit(&umem->fq, i);
}
static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
@@ -1280,10 +1262,8 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
- int ret, ifindex;
void *bufs;
-
- ifobject->ns_fd = switch_namespace(ifobject->nsname);
+ int ret;
if (ifobject->umem->unaligned_mode)
mmap_flags |= MAP_HUGETLB;
@@ -1308,33 +1288,9 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (!ifobject->rx_on)
return;
- ifindex = if_nametoindex(ifobject->ifname);
- if (!ifindex)
- exit_with_error(errno);
-
- ret = xsk_setup_xdp_prog_xsk(ifobject->xsk->xsk, &ifobject->xsk_map_fd);
- if (ret)
- exit_with_error(-ret);
-
- ret = bpf_xdp_query(ifindex, ifobject->xdp_flags, &opts);
- if (ret)
- exit_with_error(-ret);
-
- if (ifobject->xdp_flags & XDP_FLAGS_SKB_MODE) {
- if (opts.attach_mode != XDP_ATTACHED_SKB) {
- ksft_print_msg("ERROR: [%s] XDP prog not in SKB mode\n");
- exit_with_error(-EINVAL);
- }
- } else if (ifobject->xdp_flags & XDP_FLAGS_DRV_MODE) {
- if (opts.attach_mode != XDP_ATTACHED_DRV) {
- ksft_print_msg("ERROR: [%s] XDP prog not in DRV mode\n");
- exit_with_error(-EINVAL);
- }
- }
-
- ret = xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
+ ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
if (ret)
- exit_with_error(-ret);
+ exit_with_error(errno);
}
static void *worker_testapp_validate_tx(void *arg)
@@ -1367,14 +1323,17 @@ static void *worker_testapp_validate_rx(void *arg)
struct test_spec *test = (struct test_spec *)arg;
struct ifobject *ifobject = test->ifobj_rx;
struct pollfd fds = { };
- int id = 0;
int err;
if (test->current_step == 1) {
thread_common_ops(test, ifobject);
} else {
- bpf_map_delete_elem(ifobject->xsk_map_fd, &id);
- xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
+ xsk_clear_xskmap(ifobject->xskmap);
+ err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
+ if (err) {
+ printf("Error: Failed to update xskmap, error %s\n", strerror(-err));
+ exit_with_error(-err);
+ }
}
fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
@@ -1412,84 +1371,106 @@ static void handler(int signum)
pthread_exit(NULL);
}
-static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj,
- enum test_type type)
+static bool xdp_prog_changed(struct test_spec *test, struct ifobject *ifobj)
{
- bool old_shared_umem = ifobj->shared_umem;
- pthread_t t0;
-
- if (pthread_barrier_init(&barr, NULL, 2))
- exit_with_error(errno);
-
- test->current_step++;
- if (type == TEST_TYPE_POLL_RXQ_TMOUT)
- pkt_stream_reset(ifobj->pkt_stream);
- pkts_in_flight = 0;
-
- test->ifobj_rx->shared_umem = false;
- test->ifobj_tx->shared_umem = false;
+ return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
+}
- signal(SIGUSR1, handler);
- /* Spawn thread */
- pthread_create(&t0, NULL, ifobj->func_ptr, test);
+static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
+ struct bpf_map *xskmap, enum test_mode mode)
+{
+ int err;
- if (type != TEST_TYPE_POLL_TXQ_TMOUT)
- pthread_barrier_wait(&barr);
+ xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
+ err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
+ if (err) {
+ printf("Error attaching XDP program\n");
+ exit_with_error(-err);
+ }
- if (pthread_barrier_destroy(&barr))
- exit_with_error(errno);
+ if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
+ if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
+ ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
+ exit_with_error(EINVAL);
+ }
- pthread_kill(t0, SIGUSR1);
- pthread_join(t0, NULL);
+ ifobj->xdp_prog = xdp_prog;
+ ifobj->xskmap = xskmap;
+ ifobj->mode = mode;
+}
- if (test->total_steps == test->current_step || test->fail) {
- xsk_socket__delete(ifobj->xsk->xsk);
- testapp_clean_xsk_umem(ifobj);
- }
+static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
+ struct ifobject *ifobj_tx)
+{
+ if (xdp_prog_changed(test, ifobj_rx))
+ xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
- test->ifobj_rx->shared_umem = old_shared_umem;
- test->ifobj_tx->shared_umem = old_shared_umem;
+ if (!ifobj_tx || ifobj_tx->shared_umem)
+ return;
- return !!test->fail;
+ if (xdp_prog_changed(test, ifobj_tx))
+ xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
}
-static int testapp_validate_traffic(struct test_spec *test)
+static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
+ struct ifobject *ifobj2)
{
- struct ifobject *ifobj_tx = test->ifobj_tx;
- struct ifobject *ifobj_rx = test->ifobj_rx;
pthread_t t0, t1;
- if (pthread_barrier_init(&barr, NULL, 2))
- exit_with_error(errno);
+ if (ifobj2)
+ if (pthread_barrier_init(&barr, NULL, 2))
+ exit_with_error(errno);
test->current_step++;
- pkt_stream_reset(ifobj_rx->pkt_stream);
+ pkt_stream_reset(ifobj1->pkt_stream);
pkts_in_flight = 0;
+ signal(SIGUSR1, handler);
/*Spawn RX thread */
- pthread_create(&t0, NULL, ifobj_rx->func_ptr, test);
+ pthread_create(&t0, NULL, ifobj1->func_ptr, test);
- pthread_barrier_wait(&barr);
- if (pthread_barrier_destroy(&barr))
- exit_with_error(errno);
+ if (ifobj2) {
+ pthread_barrier_wait(&barr);
+ if (pthread_barrier_destroy(&barr))
+ exit_with_error(errno);
- /*Spawn TX thread */
- pthread_create(&t1, NULL, ifobj_tx->func_ptr, test);
+ /*Spawn TX thread */
+ pthread_create(&t1, NULL, ifobj2->func_ptr, test);
- pthread_join(t1, NULL);
- pthread_join(t0, NULL);
+ pthread_join(t1, NULL);
+ }
+
+ if (!ifobj2)
+ pthread_kill(t0, SIGUSR1);
+ else
+ pthread_join(t0, NULL);
if (test->total_steps == test->current_step || test->fail) {
- xsk_socket__delete(ifobj_tx->xsk->xsk);
- xsk_socket__delete(ifobj_rx->xsk->xsk);
- testapp_clean_xsk_umem(ifobj_rx);
- if (!ifobj_tx->shared_umem)
- testapp_clean_xsk_umem(ifobj_tx);
+ if (ifobj2)
+ xsk_socket__delete(ifobj2->xsk->xsk);
+ xsk_socket__delete(ifobj1->xsk->xsk);
+ testapp_clean_xsk_umem(ifobj1);
+ if (ifobj2 && !ifobj2->shared_umem)
+ testapp_clean_xsk_umem(ifobj2);
}
return !!test->fail;
}
+static int testapp_validate_traffic(struct test_spec *test)
+{
+ struct ifobject *ifobj_rx = test->ifobj_rx;
+ struct ifobject *ifobj_tx = test->ifobj_tx;
+
+ xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx);
+ return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
+}
+
+static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
+{
+ return __testapp_validate_traffic(test, ifobj, NULL);
+}
+
static void testapp_teardown(struct test_spec *test)
{
int i;
@@ -1525,7 +1506,7 @@ static void testapp_bidi(struct test_spec *test)
print_verbose("Switching Tx/Rx vectors\n");
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
- testapp_validate_traffic(test);
+ __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
}
@@ -1539,9 +1520,9 @@ static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj
ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
- ret = xsk_socket__update_xskmap(ifobj_rx->xsk->xsk, ifobj_rx->xsk_map_fd);
+ ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk);
if (ret)
- exit_with_error(-ret);
+ exit_with_error(errno);
}
static void testapp_bpf_res(struct test_spec *test)
@@ -1580,8 +1561,6 @@ static void testapp_stats_tx_invalid_descs(struct test_spec *test)
pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
test->ifobj_tx->validation_func = validate_tx_invalid_descs;
testapp_validate_traffic(test);
-
- pkt_stream_restore_default(test);
}
static void testapp_stats_rx_full(struct test_spec *test)
@@ -1597,8 +1576,6 @@ static void testapp_stats_rx_full(struct test_spec *test)
test->ifobj_rx->release_rx = false;
test->ifobj_rx->validation_func = validate_rx_full;
testapp_validate_traffic(test);
-
- pkt_stream_restore_default(test);
}
static void testapp_stats_fill_empty(struct test_spec *test)
@@ -1613,8 +1590,6 @@ static void testapp_stats_fill_empty(struct test_spec *test)
test->ifobj_rx->use_fill_ring = false;
test->ifobj_rx->validation_func = validate_fill_empty;
testapp_validate_traffic(test);
-
- pkt_stream_restore_default(test);
}
/* Simple test */
@@ -1647,7 +1622,6 @@ static bool testapp_unaligned(struct test_spec *test)
test->ifobj_rx->pkt_stream->use_addr_for_fill = true;
testapp_validate_traffic(test);
- pkt_stream_restore_default(test);
return true;
}
@@ -1657,7 +1631,6 @@ static void testapp_single_pkt(struct test_spec *test)
pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
testapp_validate_traffic(test);
- pkt_stream_restore_default(test);
}
static void testapp_invalid_desc(struct test_spec *test)
@@ -1698,7 +1671,51 @@ static void testapp_invalid_desc(struct test_spec *test)
pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
testapp_validate_traffic(test);
- pkt_stream_restore_default(test);
+}
+
+static void testapp_xdp_drop(struct test_spec *test)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+
+ test_spec_set_name(test, "XDP_DROP_HALF");
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+
+ pkt_stream_receive_half(test);
+ testapp_validate_traffic(test);
+}
+
+static void testapp_poll_txq_tmout(struct test_spec *test)
+{
+ test_spec_set_name(test, "POLL_TXQ_FULL");
+
+ test->ifobj_tx->use_poll = true;
+ /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
+ test->ifobj_tx->umem->frame_size = 2048;
+ pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
+ testapp_validate_traffic_single_thread(test, test->ifobj_tx);
+}
+
+static void testapp_poll_rxq_tmout(struct test_spec *test)
+{
+ test_spec_set_name(test, "POLL_RXQ_EMPTY");
+ test->ifobj_rx->use_poll = true;
+ testapp_validate_traffic_single_thread(test, test->ifobj_rx);
+}
+
+static int xsk_load_xdp_programs(struct ifobject *ifobj)
+{
+ ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
+ if (libbpf_get_error(ifobj->xdp_progs))
+ return libbpf_get_error(ifobj->xdp_progs);
+
+ return 0;
+}
+
+static void xsk_unload_xdp_programs(struct ifobject *ifobj)
+{
+ xsk_xdp_progs__destroy(ifobj->xdp_progs);
}
static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
@@ -1706,6 +1723,7 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *
const u16 src_port, thread_func_t func_ptr)
{
struct in_addr ip;
+ int err;
memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
@@ -1720,6 +1738,12 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *
ifobj->src_port = src_port;
ifobj->func_ptr = func_ptr;
+
+ err = xsk_load_xdp_programs(ifobj);
+ if (err) {
+ printf("Error loading XDP program\n");
+ exit_with_error(err);
+ }
}
static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
@@ -1764,8 +1788,6 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
test->ifobj_rx->umem->frame_size = 2048;
pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE);
testapp_validate_traffic(test);
-
- pkt_stream_restore_default(test);
break;
case TEST_TYPE_RX_POLL:
test->ifobj_rx->use_poll = true;
@@ -1778,18 +1800,10 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
testapp_validate_traffic(test);
break;
case TEST_TYPE_POLL_TXQ_TMOUT:
- test_spec_set_name(test, "POLL_TXQ_FULL");
- test->ifobj_tx->use_poll = true;
- /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
- test->ifobj_tx->umem->frame_size = 2048;
- pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
- testapp_validate_traffic_single_thread(test, test->ifobj_tx, type);
- pkt_stream_restore_default(test);
+ testapp_poll_txq_tmout(test);
break;
case TEST_TYPE_POLL_RXQ_TMOUT:
- test_spec_set_name(test, "POLL_RXQ_EMPTY");
- test->ifobj_rx->use_poll = true;
- testapp_validate_traffic_single_thread(test, test->ifobj_rx, type);
+ testapp_poll_rxq_tmout(test);
break;
case TEST_TYPE_ALIGNED_INV_DESC:
test_spec_set_name(test, "ALIGNED_INV_DESC");
@@ -1818,6 +1832,9 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
case TEST_TYPE_HEADROOM:
testapp_headroom(test);
break;
+ case TEST_TYPE_XDP_DROP_HALF:
+ testapp_xdp_drop(test);
+ break;
default:
break;
}
@@ -1825,6 +1842,7 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
if (!test->fail)
ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
test->name);
+ pkt_stream_restore_default(test);
}
static struct ifobject *ifobject_create(void)
@@ -1843,8 +1861,6 @@ static struct ifobject *ifobject_create(void)
if (!ifobj->umem)
goto out_umem;
- ifobj->ns_fd = -1;
-
return ifobj;
out_umem:
@@ -1856,14 +1872,12 @@ out_xsk_arr:
static void ifobject_delete(struct ifobject *ifobj)
{
- if (ifobj->ns_fd != -1)
- close(ifobj->ns_fd);
free(ifobj->umem);
free(ifobj->xsk_arr);
free(ifobj);
}
-static bool is_xdp_supported(struct ifobject *ifobject)
+static bool is_xdp_supported(int ifindex)
{
int flags = XDP_FLAGS_DRV_MODE;
@@ -1872,7 +1886,6 @@ static bool is_xdp_supported(struct ifobject *ifobject)
BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
BPF_EXIT_INSN()
};
- int ifindex = if_nametoindex(ifobject->ifname);
int prog_fd, insn_cnt = ARRAY_SIZE(insns);
int err;
@@ -1900,7 +1913,7 @@ int main(int argc, char **argv)
int modes = TEST_MODE_SKB + 1;
u32 i, j, failed_tests = 0;
struct test_spec test;
- bool shared_umem;
+ bool shared_netdev;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
@@ -1915,27 +1928,27 @@ int main(int argc, char **argv)
setlocale(LC_ALL, "");
parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
- shared_umem = !strcmp(ifobj_tx->ifname, ifobj_rx->ifname);
- ifobj_tx->shared_umem = shared_umem;
- ifobj_rx->shared_umem = shared_umem;
+ shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex);
+ ifobj_tx->shared_umem = shared_netdev;
+ ifobj_rx->shared_umem = shared_netdev;
if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
usage(basename(argv[0]));
ksft_exit_xfail();
}
- init_iface(ifobj_tx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2,
- worker_testapp_validate_tx);
- init_iface(ifobj_rx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1,
- worker_testapp_validate_rx);
-
- if (is_xdp_supported(ifobj_tx)) {
+ if (is_xdp_supported(ifobj_tx->ifindex)) {
modes++;
if (ifobj_zc_avail(ifobj_tx))
modes++;
}
+ init_iface(ifobj_rx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2,
+ worker_testapp_validate_rx);
+ init_iface(ifobj_tx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1,
+ worker_testapp_validate_tx);
+
test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
@@ -1946,7 +1959,7 @@ int main(int argc, char **argv)
ksft_set_plan(modes * TEST_TYPE_MAX);
- for (i = 0; i < modes; i++)
+ for (i = 0; i < modes; i++) {
for (j = 0; j < TEST_TYPE_MAX; j++) {
test_spec_init(&test, ifobj_tx, ifobj_rx, i);
run_pkt_test(&test, i, j);
@@ -1955,9 +1968,12 @@ int main(int argc, char **argv)
if (test.fail)
failed_tests++;
}
+ }
pkt_stream_delete(tx_pkt_stream_default);
pkt_stream_delete(rx_pkt_stream_default);
+ xsk_unload_xdp_programs(ifobj_tx);
+ xsk_unload_xdp_programs(ifobj_rx);
ifobject_delete(ifobj_tx);
ifobject_delete(ifobj_rx);
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index edb76d2def9f..3e8ec7d8ec32 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -5,6 +5,8 @@
#ifndef XSKXCEIVER_H_
#define XSKXCEIVER_H_
+#include "xsk_xdp_progs.skel.h"
+
#ifndef SOL_XDP
#define SOL_XDP 283
#endif
@@ -30,7 +32,6 @@
#define TEST_CONTINUE 1
#define MAX_INTERFACES 2
#define MAX_INTERFACE_NAME_CHARS 16
-#define MAX_INTERFACES_NAMESPACE_CHARS 16
#define MAX_SOCKETS 2
#define MAX_TEST_NAME_SIZE 32
#define MAX_TEARDOWN_ITER 10
@@ -86,6 +87,7 @@ enum test_type {
TEST_TYPE_STATS_RX_FULL,
TEST_TYPE_STATS_FILL_EMPTY,
TEST_TYPE_BPF_RES,
+ TEST_TYPE_XDP_DROP_HALF,
TEST_TYPE_MAX
};
@@ -133,18 +135,19 @@ typedef void *(*thread_func_t)(void *arg);
struct ifobject {
char ifname[MAX_INTERFACE_NAME_CHARS];
- char nsname[MAX_INTERFACES_NAMESPACE_CHARS];
struct xsk_socket_info *xsk;
struct xsk_socket_info *xsk_arr;
struct xsk_umem_info *umem;
thread_func_t func_ptr;
validation_func_t validation_func;
struct pkt_stream *pkt_stream;
- int ns_fd;
- int xsk_map_fd;
+ struct xsk_xdp_progs *xdp_progs;
+ struct bpf_map *xskmap;
+ struct bpf_program *xdp_prog;
+ enum test_mode mode;
+ int ifindex;
u32 dst_ip;
u32 src_ip;
- u32 xdp_flags;
u32 bind_flags;
u16 src_port;
u16 dst_port;
@@ -164,6 +167,10 @@ struct test_spec {
struct ifobject *ifobj_rx;
struct pkt_stream *tx_pkt_stream_default;
struct pkt_stream *rx_pkt_stream_default;
+ struct bpf_program *xdp_prog_rx;
+ struct bpf_program *xdp_prog_tx;
+ struct bpf_map *xskmap_rx;
+ struct bpf_map *xskmap_tx;
u16 total_steps;
u16 current_step;
u16 nb_sockets;
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh
index 71066bc4b886..5492fa5550d7 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh
@@ -5,18 +5,18 @@
# prioritized according to the default priority specified at the port.
# rx_octets_prio_* counters are used to verify the prioritization.
#
-# +-----------------------+
-# | H1 |
-# | + $h1 |
-# | | 192.0.2.1/28 |
-# +----|------------------+
+# +----------------------------------+
+# | H1 |
+# | + $h1 |
+# | | 192.0.2.1/28 |
+# +----|-----------------------------+
# |
-# +----|------------------+
-# | SW | |
-# | + $swp1 |
-# | 192.0.2.2/28 |
-# | APP=<prio>,1,0 |
-# +-----------------------+
+# +----|-----------------------------+
+# | SW | |
+# | + $swp1 |
+# | 192.0.2.2/28 |
+# | dcb app default-prio <prio> |
+# +----------------------------------+
ALL_TESTS="
ping_ipv4
@@ -29,42 +29,6 @@ NUM_NETIFS=2
: ${HIT_TIMEOUT:=1000} # ms
source $lib_dir/lib.sh
-declare -a APP
-
-defprio_install()
-{
- local dev=$1; shift
- local prio=$1; shift
- local app="app=$prio,1,0"
-
- lldptool -T -i $dev -V APP $app >/dev/null
- lldpad_app_wait_set $dev
- APP[$prio]=$app
-}
-
-defprio_uninstall()
-{
- local dev=$1; shift
- local prio=$1; shift
- local app=${APP[$prio]}
-
- lldptool -T -i $dev -V APP -d $app >/dev/null
- lldpad_app_wait_del
- unset APP[$prio]
-}
-
-defprio_flush()
-{
- local dev=$1; shift
- local prio
-
- if ((${#APP[@]})); then
- lldptool -T -i $dev -V APP -d ${APP[@]} >/dev/null
- fi
- lldpad_app_wait_del
- APP=()
-}
-
h1_create()
{
simple_if_init $h1 192.0.2.1/28
@@ -83,7 +47,7 @@ switch_create()
switch_destroy()
{
- defprio_flush $swp1
+ dcb app flush dev $swp1 default-prio
ip addr del dev $swp1 192.0.2.2/28
ip link set dev $swp1 down
}
@@ -124,7 +88,7 @@ __test_defprio()
RET=0
- defprio_install $swp1 $prio_install
+ dcb app add dev $swp1 default-prio $prio_install
local t0=$(ethtool_stats_get $swp1 rx_frames_prio_$prio_observe)
mausezahn -q $h1 -d 100m -c 10 -t arp reply
@@ -134,7 +98,7 @@ __test_defprio()
check_err $? "Default priority $prio_install/$prio_observe: Expected to capture 10 packets, got $((t1 - t0))."
log_test "Default priority $prio_install/$prio_observe"
- defprio_uninstall $swp1 $prio_install
+ dcb app del dev $swp1 default-prio $prio_install
}
test_defprio()
@@ -145,7 +109,7 @@ test_defprio()
__test_defprio $prio $prio
done
- defprio_install $swp1 3
+ dcb app add dev $swp1 default-prio 3
__test_defprio 0 3
__test_defprio 1 3
__test_defprio 2 3
@@ -153,7 +117,7 @@ test_defprio()
__test_defprio 5 5
__test_defprio 6 6
__test_defprio 7 7
- defprio_uninstall $swp1 3
+ dcb app del dev $swp1 default-prio 3
}
trap cleanup EXIT
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
index 28a570006d4d..87c41f5727c9 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
@@ -20,7 +20,7 @@
# | SW | | |
# | +-|----------------------------------------------------------------|-+ |
# | | + $swp1 BR $swp2 + | |
-# | | APP=0,5,10 .. 7,5,17 APP=0,5,20 .. 7,5,27 | |
+# | | dcb dscp-prio 10:0...17:7 dcb dscp-prio 20:0...27:7 | |
# | +--------------------------------------------------------------------+ |
# +---------------------------------------------------------------------------+
@@ -62,16 +62,6 @@ h2_destroy()
simple_if_fini $h2 192.0.2.2/28
}
-dscp_map()
-{
- local base=$1; shift
- local prio
-
- for prio in {0..7}; do
- echo app=$prio,5,$((base + prio))
- done
-}
-
switch_create()
{
ip link add name br1 type bridge vlan_filtering 1
@@ -81,17 +71,14 @@ switch_create()
ip link set dev $swp2 master br1
ip link set dev $swp2 up
- lldptool -T -i $swp1 -V APP $(dscp_map 10) >/dev/null
- lldptool -T -i $swp2 -V APP $(dscp_map 20) >/dev/null
- lldpad_app_wait_set $swp1
- lldpad_app_wait_set $swp2
+ dcb app add dev $swp1 dscp-prio 10:0 11:1 12:2 13:3 14:4 15:5 16:6 17:7
+ dcb app add dev $swp2 dscp-prio 20:0 21:1 22:2 23:3 24:4 25:5 26:6 27:7
}
switch_destroy()
{
- lldptool -T -i $swp2 -V APP -d $(dscp_map 20) >/dev/null
- lldptool -T -i $swp1 -V APP -d $(dscp_map 10) >/dev/null
- lldpad_app_wait_del
+ dcb app del dev $swp2 dscp-prio 20:0 21:1 22:2 23:3 24:4 25:5 26:6 27:7
+ dcb app del dev $swp1 dscp-prio 10:0 11:1 12:2 13:3 14:4 15:5 16:6 17:7
ip link set dev $swp2 down
ip link set dev $swp2 nomaster
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
index 4cb2aa65278a..f6c23f84423e 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
@@ -94,16 +94,6 @@ h2_destroy()
simple_if_fini $h2 192.0.2.18/28
}
-dscp_map()
-{
- local base=$1; shift
- local prio
-
- for prio in {0..7}; do
- echo app=$prio,5,$((base + prio))
- done
-}
-
switch_create()
{
simple_if_init $swp1 192.0.2.2/28
@@ -112,17 +102,14 @@ switch_create()
tc qdisc add dev $swp1 clsact
tc qdisc add dev $swp2 clsact
- lldptool -T -i $swp1 -V APP $(dscp_map 0) >/dev/null
- lldptool -T -i $swp2 -V APP $(dscp_map 0) >/dev/null
- lldpad_app_wait_set $swp1
- lldpad_app_wait_set $swp2
+ dcb app add dev $swp1 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+ dcb app add dev $swp2 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
}
switch_destroy()
{
- lldptool -T -i $swp2 -V APP -d $(dscp_map 0) >/dev/null
- lldptool -T -i $swp1 -V APP -d $(dscp_map 0) >/dev/null
- lldpad_app_wait_del
+ dcb app del dev $swp2 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+ dcb app del dev $swp1 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
tc qdisc del dev $swp2 clsact
tc qdisc del dev $swp1 clsact
@@ -265,13 +252,11 @@ test_dscp_leftover()
{
echo "Test that last removed DSCP rule is deconfigured correctly"
- lldptool -T -i $swp2 -V APP -d $(dscp_map 0) >/dev/null
- lldpad_app_wait_del
+ dcb app del dev $swp2 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
__test_update 0 zero
- lldptool -T -i $swp2 -V APP $(dscp_map 0) >/dev/null
- lldpad_app_wait_set $swp2
+ dcb app add dev $swp2 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
}
trap cleanup EXIT
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index a08c02abde12..7f7d20f22207 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -17,6 +17,18 @@ SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV_NAME/net/
DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV_NAME/
DL_HANDLE=netdevsim/$DEV_NAME
+wait_for_devlink()
+{
+ "$@" | grep -q $DL_HANDLE
+}
+
+devlink_wait()
+{
+ local timeout=$1
+
+ busywait "$timeout" wait_for_devlink devlink dev
+}
+
fw_flash_test()
{
RET=0
@@ -256,6 +268,9 @@ netns_reload_test()
ip netns del testns2
ip netns del testns1
+ # Wait until netns async cleanup is done.
+ devlink_wait 2000
+
log_test "netns reload test"
}
@@ -348,6 +363,9 @@ resource_test()
ip netns del testns2
ip netns del testns1
+ # Wait until netns async cleanup is done.
+ devlink_wait 2000
+
log_test "resource test"
}
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 3007e98a6d64..6cd8993454d7 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -38,6 +38,7 @@ TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
TEST_PROGS += srv6_hencap_red_l3vpn_test.sh
TEST_PROGS += srv6_hl2encap_red_l2vpn_test.sh
TEST_PROGS += srv6_end_next_csid_l3vpn_test.sh
+TEST_PROGS += srv6_end_flavors_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
TEST_PROGS += arp_ndisc_evict_nocarrier.sh
TEST_PROGS += ndisc_unsolicited_na_test.sh
@@ -45,6 +46,8 @@ TEST_PROGS += arp_ndisc_untracked_subnets.sh
TEST_PROGS += stress_reuseport_listen.sh
TEST_PROGS += l2_tos_ttl_inherit.sh
TEST_PROGS += bind_bhash.sh
+TEST_PROGS += ip_local_port_range.sh
+TEST_PROGS += rps_default_mask.sh
TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
@@ -75,14 +78,61 @@ TEST_GEN_PROGS += so_incoming_cpu
TEST_PROGS += sctp_vrf.sh
TEST_GEN_FILES += sctp_hello
TEST_GEN_FILES += csum
+TEST_GEN_FILES += nat6to4.o
+TEST_GEN_FILES += ip_local_port_range
TEST_FILES := settings
include ../lib.mk
-include bpf/Makefile
-
$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
$(OUTPUT)/tcp_mmap: LDLIBS += -lpthread
$(OUTPUT)/tcp_inq: LDLIBS += -lpthread
$(OUTPUT)/bind_bhash: LDLIBS += -lpthread
+
+# Rules to generate bpf obj nat6to4.o
+CLANG ?= clang
+SCRATCH_DIR := $(OUTPUT)/tools
+BUILD_DIR := $(SCRATCH_DIR)/build
+BPFDIR := $(abspath ../../../lib/bpf)
+APIDIR := $(abspath ../../../include/uapi)
+
+CCINCLUDE += -I../bpf
+CCINCLUDE += -I../../../../usr/include/
+CCINCLUDE += -I$(SCRATCH_DIR)/include
+
+BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a
+
+MAKE_DIRS := $(BUILD_DIR)/libbpf
+$(MAKE_DIRS):
+ mkdir -p $@
+
+# Get Clang's default includes on this system, as opposed to those seen by
+# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
+#
+# Use '-idirafter': Don't interfere with include mechanics except where the
+# build would have failed anyways.
+define get_sys_includes
+$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
+ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
+$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
+endef
+
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif
+
+CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
+
+$(OUTPUT)/nat6to4.o: nat6to4.c $(BPFOBJ) | $(MAKE_DIRS)
+ $(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) $(CLANG_SYS_INCLUDES) -o $@
+
+$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
+ $(APIDIR)/linux/bpf.h \
+ | $(BUILD_DIR)/libbpf
+ $(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
+ EXTRA_CFLAGS='-g -O0' \
+ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
+
+EXTRA_CLEAN := $(SCRATCH_DIR)
diff --git a/tools/testing/selftests/net/bpf/Makefile b/tools/testing/selftests/net/bpf/Makefile
deleted file mode 100644
index 4abaf16d2077..000000000000
--- a/tools/testing/selftests/net/bpf/Makefile
+++ /dev/null
@@ -1,51 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-CLANG ?= clang
-SCRATCH_DIR := $(OUTPUT)/tools
-BUILD_DIR := $(SCRATCH_DIR)/build
-BPFDIR := $(abspath ../../../lib/bpf)
-APIDIR := $(abspath ../../../include/uapi)
-
-CCINCLUDE += -I../../bpf
-CCINCLUDE += -I../../../../../usr/include/
-CCINCLUDE += -I$(SCRATCH_DIR)/include
-
-BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a
-
-MAKE_DIRS := $(BUILD_DIR)/libbpf $(OUTPUT)/bpf
-$(MAKE_DIRS):
- mkdir -p $@
-
-TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
-all: $(TEST_CUSTOM_PROGS)
-
-# Get Clang's default includes on this system, as opposed to those seen by
-# '-target bpf'. This fixes "missing" files on some architectures/distros,
-# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
-#
-# Use '-idirafter': Don't interfere with include mechanics except where the
-# build would have failed anyways.
-define get_sys_includes
-$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
- | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
-$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
-endef
-
-ifneq ($(CROSS_COMPILE),)
-CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
-endif
-
-CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
-
-$(TEST_CUSTOM_PROGS): $(OUTPUT)/%.o: %.c $(BPFOBJ) | $(MAKE_DIRS)
- $(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) $(CLANG_SYS_INCLUDES) -o $@
-
-$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
- $(APIDIR)/linux/bpf.h \
- | $(BUILD_DIR)/libbpf
- $(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
- EXTRA_CFLAGS='-g -O0' \
- DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
-
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR)
-
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index bd89198cd817..cc9fd55ab869 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -3,6 +3,9 @@ CONFIG_NET_NS=y
CONFIG_BPF_SYSCALL=y
CONFIG_TEST_BPF=m
CONFIG_NUMA=y
+CONFIG_RPS=y
+CONFIG_SYSFS=y
+CONFIG_PROC_SYSCTL=y
CONFIG_NET_VRF=y
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_IPV6=y
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 5637b5dadabd..70ea8798b1f6 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -2065,6 +2065,8 @@ EOF
################################################################################
# main
+trap cleanup EXIT
+
while getopts :t:pPhv o
do
case $o in
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index 453ae006fbcf..91201ab3c4fc 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -4,6 +4,7 @@ TEST_PROGS = bridge_igmp.sh \
bridge_locked_port.sh \
bridge_mdb.sh \
bridge_mdb_host.sh \
+ bridge_mdb_max.sh \
bridge_mdb_port_down.sh \
bridge_mld.sh \
bridge_port_isolation.sh \
diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
index 2fa5973c0c28..ae3f9462a2b6 100755
--- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
@@ -742,10 +742,109 @@ cfg_test_port()
cfg_test_port_l2
}
+ipv4_grps_get()
+{
+ local max_grps=$1; shift
+ local i
+
+ for i in $(seq 0 $((max_grps - 1))); do
+ echo "239.1.1.$i"
+ done
+}
+
+ipv6_grps_get()
+{
+ local max_grps=$1; shift
+ local i
+
+ for i in $(seq 0 $((max_grps - 1))); do
+ echo "ff0e::$(printf %x $i)"
+ done
+}
+
+l2_grps_get()
+{
+ local max_grps=$1; shift
+ local i
+
+ for i in $(seq 0 $((max_grps - 1))); do
+ echo "01:00:00:00:00:$(printf %02x $i)"
+ done
+}
+
+cfg_test_dump_common()
+{
+ local name=$1; shift
+ local fn=$1; shift
+ local max_bridges=2
+ local max_grps=256
+ local max_ports=32
+ local num_entries
+ local batch_file
+ local grp
+ local i j
+
+ RET=0
+
+ # Create net devices.
+ for i in $(seq 1 $max_bridges); do
+ ip link add name br-test${i} up type bridge vlan_filtering 1 \
+ mcast_snooping 1
+ for j in $(seq 1 $max_ports); do
+ ip link add name br-test${i}-du${j} up \
+ master br-test${i} type dummy
+ done
+ done
+
+ # Create batch file with MDB entries.
+ batch_file=$(mktemp)
+ for i in $(seq 1 $max_bridges); do
+ for j in $(seq 1 $max_ports); do
+ for grp in $($fn $max_grps); do
+ echo "mdb add dev br-test${i} \
+ port br-test${i}-du${j} grp $grp \
+ permanent vid 1" >> $batch_file
+ done
+ done
+ done
+
+ # Program the batch file and check for expected number of entries.
+ bridge -b $batch_file
+ for i in $(seq 1 $max_bridges); do
+ num_entries=$(bridge mdb show dev br-test${i} | \
+ grep "permanent" | wc -l)
+ [[ $num_entries -eq $((max_grps * max_ports)) ]]
+ check_err $? "Wrong number of entries in br-test${i}"
+ done
+
+ # Cleanup.
+ rm $batch_file
+ for i in $(seq 1 $max_bridges); do
+ ip link del dev br-test${i}
+ for j in $(seq $max_ports); do
+ ip link del dev br-test${i}-du${j}
+ done
+ done
+
+ log_test "$name large scale dump tests"
+}
+
+# Check large scale dump.
+cfg_test_dump()
+{
+ echo
+ log_info "# Large scale dump tests"
+
+ cfg_test_dump_common "IPv4" ipv4_grps_get
+ cfg_test_dump_common "IPv6" ipv6_grps_get
+ cfg_test_dump_common "L2" l2_grps_get
+}
+
cfg_test()
{
cfg_test_host
cfg_test_port
+ cfg_test_dump
}
__fwd_test_host_ip()
@@ -1018,26 +1117,6 @@ fwd_test()
ip -6 address del fe80::1/64 dev br0
}
-igmpv3_is_in_get()
-{
- local igmpv3
-
- igmpv3=$(:
- )"22:"$( : Type - Membership Report
- )"00:"$( : Reserved
- )"2a:f8:"$( : Checksum
- )"00:00:"$( : Reserved
- )"00:01:"$( : Number of Group Records
- )"01:"$( : Record Type - IS_IN
- )"00:"$( : Aux Data Len
- )"00:01:"$( : Number of Sources
- )"ef:01:01:01:"$( : Multicast Address - 239.1.1.1
- )"c0:00:02:02"$( : Source Address - 192.0.2.2
- )
-
- echo $igmpv3
-}
-
ctrl_igmpv3_is_in_test()
{
RET=0
@@ -1049,7 +1128,7 @@ ctrl_igmpv3_is_in_test()
# IS_IN ( 192.0.2.2 )
$MZ $h1.10 -c 1 -A 192.0.2.1 -B 239.1.1.1 \
- -t ip proto=2,p=$(igmpv3_is_in_get) -q
+ -t ip proto=2,p=$(igmpv3_is_in_get 239.1.1.1 192.0.2.2) -q
bridge -d mdb show dev br0 vid 10 | grep 239.1.1.1 | grep -q 192.0.2.2
check_fail $? "Permanent entry affected by IGMP packet"
@@ -1062,7 +1141,7 @@ ctrl_igmpv3_is_in_test()
# IS_IN ( 192.0.2.2 )
$MZ $h1.10 -c 1 -A 192.0.2.1 -B 239.1.1.1 \
- -t ip proto=2,p=$(igmpv3_is_in_get) -q
+ -t ip proto=2,p=$(igmpv3_is_in_get 239.1.1.1 192.0.2.2) -q
bridge -d mdb show dev br0 vid 10 | grep 239.1.1.1 | grep -v "src" | \
grep -q 192.0.2.2
@@ -1074,36 +1153,7 @@ ctrl_igmpv3_is_in_test()
bridge mdb del dev br0 port $swp1 grp 239.1.1.1 vid 10
- log_test "IGMPv3 MODE_IS_INCLUE tests"
-}
-
-mldv2_is_in_get()
-{
- local hbh
- local icmpv6
-
- hbh=$(:
- )"3a:"$( : Next Header - ICMPv6
- )"00:"$( : Hdr Ext Len
- )"00:00:00:00:00:00:"$( : Options and Padding
- )
-
- icmpv6=$(:
- )"8f:"$( : Type - MLDv2 Report
- )"00:"$( : Code
- )"45:39:"$( : Checksum
- )"00:00:"$( : Reserved
- )"00:01:"$( : Number of Group Records
- )"01:"$( : Record Type - IS_IN
- )"00:"$( : Aux Data Len
- )"00:01:"$( : Number of Sources
- )"ff:0e:00:00:00:00:00:00:"$( : Multicast address - ff0e::1
- )"00:00:00:00:00:00:00:01:"$( :
- )"20:01:0d:b8:00:01:00:00:"$( : Source Address - 2001:db8:1::2
- )"00:00:00:00:00:00:00:02:"$( :
- )
-
- echo ${hbh}${icmpv6}
+ log_test "IGMPv3 MODE_IS_INCLUDE tests"
}
ctrl_mldv2_is_in_test()
@@ -1116,8 +1166,9 @@ ctrl_mldv2_is_in_test()
filter_mode include source_list 2001:db8:1::1
# IS_IN ( 2001:db8:1::2 )
+ local p=$(mldv2_is_in_get fe80::1 ff0e::1 2001:db8:1::2)
$MZ -6 $h1.10 -c 1 -A fe80::1 -B ff0e::1 \
- -t ip hop=1,next=0,p=$(mldv2_is_in_get) -q
+ -t ip hop=1,next=0,p="$p" -q
bridge -d mdb show dev br0 vid 10 | grep ff0e::1 | \
grep -q 2001:db8:1::2
@@ -1131,7 +1182,7 @@ ctrl_mldv2_is_in_test()
# IS_IN ( 2001:db8:1::2 )
$MZ -6 $h1.10 -c 1 -A fe80::1 -B ff0e::1 \
- -t ip hop=1,next=0,p=$(mldv2_is_in_get) -q
+ -t ip hop=1,next=0,p="$p" -q
bridge -d mdb show dev br0 vid 10 | grep ff0e::1 | grep -v "src" | \
grep -q 2001:db8:1::2
diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb_max.sh b/tools/testing/selftests/net/forwarding/bridge_mdb_max.sh
new file mode 100755
index 000000000000..ae255b662ba3
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_mdb_max.sh
@@ -0,0 +1,1336 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-----------------------+ +------------------------+
+# | H1 (vrf) | | H2 (vrf) |
+# | + $h1.10 | | + $h2.10 |
+# | | 192.0.2.1/28 | | | 192.0.2.2/28 |
+# | | 2001:db8:1::1/64 | | | 2001:db8:1::2/64 |
+# | | | | | |
+# | | + $h1.20 | | | + $h2.20 |
+# | \ | 198.51.100.1/24 | | \ | 198.51.100.2/24 |
+# | \ | 2001:db8:2::1/64 | | \ | 2001:db8:2::2/64 |
+# | \| | | \| |
+# | + $h1 | | + $h2 |
+# +----|------------------+ +----|-------------------+
+# | |
+# +----|--------------------------------------------------|-------------------+
+# | SW | | |
+# | +--|--------------------------------------------------|-----------------+ |
+# | | + $swp1 BR0 (802.1q) + $swp2 | |
+# | | vid 10 vid 10 | |
+# | | vid 20 vid 20 | |
+# | | | |
+# | +-----------------------------------------------------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ test_8021d
+ test_8021q
+ test_8021qvs
+"
+
+NUM_NETIFS=4
+source lib.sh
+source tc_common.sh
+
+h1_create()
+{
+ simple_if_init $h1
+ vlan_create $h1 10 v$h1 192.0.2.1/28 2001:db8:1::1/64
+ vlan_create $h1 20 v$h1 198.51.100.1/24 2001:db8:2::1/64
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 20
+ vlan_destroy $h1 10
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+ vlan_create $h2 10 v$h2 192.0.2.2/28
+ vlan_create $h2 20 v$h2 198.51.100.2/24
+}
+
+h2_destroy()
+{
+ vlan_destroy $h2 20
+ vlan_destroy $h2 10
+ simple_if_fini $h2
+}
+
+switch_create_8021d()
+{
+ log_info "802.1d tests"
+
+ ip link add name br0 type bridge vlan_filtering 0 \
+ mcast_snooping 1 \
+ mcast_igmp_version 3 mcast_mld_version 2
+ ip link set dev br0 up
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp1 up
+ bridge link set dev $swp1 fastleave on
+
+ ip link set dev $swp2 master br0
+ ip link set dev $swp2 up
+}
+
+switch_create_8021q()
+{
+ local br_flags=$1; shift
+
+ log_info "802.1q $br_flags${br_flags:+ }tests"
+
+ ip link add name br0 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 1 $br_flags \
+ mcast_igmp_version 3 mcast_mld_version 2
+ bridge vlan add vid 10 dev br0 self
+ bridge vlan add vid 20 dev br0 self
+ ip link set dev br0 up
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp1 up
+ bridge link set dev $swp1 fastleave on
+ bridge vlan add vid 10 dev $swp1
+ bridge vlan add vid 20 dev $swp1
+
+ ip link set dev $swp2 master br0
+ ip link set dev $swp2 up
+ bridge vlan add vid 10 dev $swp2
+ bridge vlan add vid 20 dev $swp2
+}
+
+switch_create_8021qvs()
+{
+ switch_create_8021q "mcast_vlan_snooping 1"
+ bridge vlan global set dev br0 vid 10 mcast_igmp_version 3
+ bridge vlan global set dev br0 vid 10 mcast_mld_version 2
+ bridge vlan global set dev br0 vid 20 mcast_igmp_version 3
+ bridge vlan global set dev br0 vid 20 mcast_mld_version 2
+}
+
+switch_destroy()
+{
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ ip link set dev br0 down
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy 2>/dev/null
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+cfg_src_list()
+{
+ local IPs=("$@")
+ local IPstr=$(echo ${IPs[@]} | tr '[:space:]' , | sed 's/,$//')
+
+ echo ${IPstr:+source_list }${IPstr}
+}
+
+cfg_group_op()
+{
+ local op=$1; shift
+ local locus=$1; shift
+ local GRP=$1; shift
+ local state=$1; shift
+ local IPs=("$@")
+
+ local source_list=$(cfg_src_list ${IPs[@]})
+
+ # Everything besides `bridge mdb' uses the "dev X vid Y" syntax,
+ # so we use it here as well and convert.
+ local br_locus=$(echo "$locus" | sed 's/^dev /port /')
+
+ bridge mdb $op dev br0 $br_locus grp $GRP $state \
+ filter_mode include $source_list
+}
+
+cfg4_entries_op()
+{
+ local op=$1; shift
+ local locus=$1; shift
+ local state=$1; shift
+ local n=$1; shift
+ local grp=${1:-1}; shift
+
+ local GRP=239.1.1.${grp}
+ local IPs=$(seq -f 192.0.2.%g 1 $((n - 1)))
+ cfg_group_op "$op" "$locus" "$GRP" "$state" ${IPs[@]}
+}
+
+cfg4_entries_add()
+{
+ cfg4_entries_op add "$@"
+}
+
+cfg4_entries_del()
+{
+ cfg4_entries_op del "$@"
+}
+
+cfg6_entries_op()
+{
+ local op=$1; shift
+ local locus=$1; shift
+ local state=$1; shift
+ local n=$1; shift
+ local grp=${1:-1}; shift
+
+ local GRP=ff0e::${grp}
+ local IPs=$(printf "2001:db8:1::%x\n" $(seq 1 $((n - 1))))
+ cfg_group_op "$op" "$locus" "$GRP" "$state" ${IPs[@]}
+}
+
+cfg6_entries_add()
+{
+ cfg6_entries_op add "$@"
+}
+
+cfg6_entries_del()
+{
+ cfg6_entries_op del "$@"
+}
+
+locus_dev_peer()
+{
+ local dev_kw=$1; shift
+ local dev=$1; shift
+ local vid_kw=$1; shift
+ local vid=$1; shift
+
+ echo "$h1.${vid:-10}"
+}
+
+locus_dev()
+{
+ local dev_kw=$1; shift
+ local dev=$1; shift
+
+ echo $dev
+}
+
+ctl4_entries_add()
+{
+ local locus=$1; shift
+ local state=$1; shift
+ local n=$1; shift
+ local grp=${1:-1}; shift
+
+ local IPs=$(seq -f 192.0.2.%g 1 $((n - 1)))
+ local peer=$(locus_dev_peer $locus)
+ local GRP=239.1.1.${grp}
+ $MZ $peer -c 1 -A 192.0.2.1 -B $GRP \
+ -t ip proto=2,p=$(igmpv3_is_in_get $GRP $IPs) -q
+ sleep 1
+
+ local nn=$(bridge mdb show dev br0 | grep $GRP | wc -l)
+ if ((nn != n)); then
+ echo mcast_max_groups > /dev/stderr
+ false
+ fi
+}
+
+ctl4_entries_del()
+{
+ local locus=$1; shift
+ local state=$1; shift
+ local n=$1; shift
+ local grp=${1:-1}; shift
+
+ local peer=$(locus_dev_peer $locus)
+ local GRP=239.1.1.${grp}
+ $MZ $peer -c 1 -A 192.0.2.1 -B 224.0.0.2 \
+ -t ip proto=2,p=$(igmpv2_leave_get $GRP) -q
+ sleep 1
+ ! bridge mdb show dev br0 | grep -q $GRP
+}
+
+ctl6_entries_add()
+{
+ local locus=$1; shift
+ local state=$1; shift
+ local n=$1; shift
+ local grp=${1:-1}; shift
+
+ local IPs=$(printf "2001:db8:1::%x\n" $(seq 1 $((n - 1))))
+ local peer=$(locus_dev_peer $locus)
+ local SIP=fe80::1
+ local GRP=ff0e::${grp}
+ local p=$(mldv2_is_in_get $SIP $GRP $IPs)
+ $MZ -6 $peer -c 1 -A $SIP -B $GRP -t ip hop=1,next=0,p="$p" -q
+ sleep 1
+
+ local nn=$(bridge mdb show dev br0 | grep $GRP | wc -l)
+ if ((nn != n)); then
+ echo mcast_max_groups > /dev/stderr
+ false
+ fi
+}
+
+ctl6_entries_del()
+{
+ local locus=$1; shift
+ local state=$1; shift
+ local n=$1; shift
+ local grp=${1:-1}; shift
+
+ local peer=$(locus_dev_peer $locus)
+ local SIP=fe80::1
+ local GRP=ff0e::${grp}
+ local p=$(mldv1_done_get $SIP $GRP)
+ $MZ -6 $peer -c 1 -A $SIP -B $GRP -t ip hop=1,next=0,p="$p" -q
+ sleep 1
+ ! bridge mdb show dev br0 | grep -q $GRP
+}
+
+bridge_maxgroups_errmsg_check_cfg()
+{
+ local msg=$1; shift
+ local needle=$1; shift
+
+ echo "$msg" | grep -q mcast_max_groups
+ check_err $? "Adding MDB entries failed for the wrong reason: $msg"
+}
+
+bridge_maxgroups_errmsg_check_cfg4()
+{
+ bridge_maxgroups_errmsg_check_cfg "$@"
+}
+
+bridge_maxgroups_errmsg_check_cfg6()
+{
+ bridge_maxgroups_errmsg_check_cfg "$@"
+}
+
+bridge_maxgroups_errmsg_check_ctl4()
+{
+ :
+}
+
+bridge_maxgroups_errmsg_check_ctl6()
+{
+ :
+}
+
+bridge_port_ngroups_get()
+{
+ local locus=$1; shift
+
+ bridge -j -d link show $locus |
+ jq '.[].mcast_n_groups'
+}
+
+bridge_port_maxgroups_get()
+{
+ local locus=$1; shift
+
+ bridge -j -d link show $locus |
+ jq '.[].mcast_max_groups'
+}
+
+bridge_port_maxgroups_set()
+{
+ local locus=$1; shift
+ local max=$1; shift
+
+ bridge link set dev $(locus_dev $locus) mcast_max_groups $max
+}
+
+bridge_port_vlan_ngroups_get()
+{
+ local locus=$1; shift
+
+ bridge -j -d vlan show $locus |
+ jq '.[].vlans[].mcast_n_groups'
+}
+
+bridge_port_vlan_maxgroups_get()
+{
+ local locus=$1; shift
+
+ bridge -j -d vlan show $locus |
+ jq '.[].vlans[].mcast_max_groups'
+}
+
+bridge_port_vlan_maxgroups_set()
+{
+ local locus=$1; shift
+ local max=$1; shift
+
+ bridge vlan set $locus mcast_max_groups $max
+}
+
+test_ngroups_reporting()
+{
+ local CFG=$1; shift
+ local context=$1; shift
+ local locus=$1; shift
+
+ RET=0
+
+ local n0=$(bridge_${context}_ngroups_get "$locus")
+ ${CFG}_entries_add "$locus" temp 5
+ check_err $? "Couldn't add MDB entries"
+ local n1=$(bridge_${context}_ngroups_get "$locus")
+
+ ((n1 == n0 + 5))
+ check_err $? "Number of groups was $n0, now is $n1, but $((n0 + 5)) expected"
+
+ ${CFG}_entries_del "$locus" temp 5
+ check_err $? "Couldn't delete MDB entries"
+ local n2=$(bridge_${context}_ngroups_get "$locus")
+
+ ((n2 == n0))
+ check_err $? "Number of groups was $n0, now is $n2, but should be back to $n0"
+
+ log_test "$CFG: $context: ngroups reporting"
+}
+
+test_8021d_ngroups_reporting_cfg4()
+{
+ test_ngroups_reporting cfg4 port "dev $swp1"
+}
+
+test_8021d_ngroups_reporting_ctl4()
+{
+ test_ngroups_reporting ctl4 port "dev $swp1"
+}
+
+test_8021d_ngroups_reporting_cfg6()
+{
+ test_ngroups_reporting cfg6 port "dev $swp1"
+}
+
+test_8021d_ngroups_reporting_ctl6()
+{
+ test_ngroups_reporting ctl6 port "dev $swp1"
+}
+
+test_8021q_ngroups_reporting_cfg4()
+{
+ test_ngroups_reporting cfg4 port "dev $swp1 vid 10"
+}
+
+test_8021q_ngroups_reporting_ctl4()
+{
+ test_ngroups_reporting ctl4 port "dev $swp1 vid 10"
+}
+
+test_8021q_ngroups_reporting_cfg6()
+{
+ test_ngroups_reporting cfg6 port "dev $swp1 vid 10"
+}
+
+test_8021q_ngroups_reporting_ctl6()
+{
+ test_ngroups_reporting ctl6 port "dev $swp1 vid 10"
+}
+
+test_8021qvs_ngroups_reporting_cfg4()
+{
+ test_ngroups_reporting cfg4 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_ngroups_reporting_ctl4()
+{
+ test_ngroups_reporting ctl4 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_ngroups_reporting_cfg6()
+{
+ test_ngroups_reporting cfg6 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_ngroups_reporting_ctl6()
+{
+ test_ngroups_reporting ctl6 port_vlan "dev $swp1 vid 10"
+}
+
+test_ngroups_cross_vlan()
+{
+ local CFG=$1; shift
+
+ local locus1="dev $swp1 vid 10"
+ local locus2="dev $swp1 vid 20"
+
+ RET=0
+
+ local n10=$(bridge_port_vlan_ngroups_get "$locus1")
+ local n20=$(bridge_port_vlan_ngroups_get "$locus2")
+ ${CFG}_entries_add "$locus1" temp 5 111
+ check_err $? "Couldn't add MDB entries to VLAN 10"
+ local n11=$(bridge_port_vlan_ngroups_get "$locus1")
+ local n21=$(bridge_port_vlan_ngroups_get "$locus2")
+
+ ((n11 == n10 + 5))
+ check_err $? "Number of groups at VLAN 10 was $n10, now is $n11, but 5 entries added on VLAN 10, $((n10 + 5)) expected"
+
+ ((n21 == n20))
+ check_err $? "Number of groups at VLAN 20 was $n20, now is $n21, but no change expected on VLAN 20"
+
+ ${CFG}_entries_add "$locus2" temp 5 112
+ check_err $? "Couldn't add MDB entries to VLAN 20"
+ local n12=$(bridge_port_vlan_ngroups_get "$locus1")
+ local n22=$(bridge_port_vlan_ngroups_get "$locus2")
+
+ ((n12 == n11))
+ check_err $? "Number of groups at VLAN 10 was $n11, now is $n12, but no change expected on VLAN 10"
+
+ ((n22 == n21 + 5))
+ check_err $? "Number of groups at VLAN 20 was $n21, now is $n22, but 5 entries added on VLAN 20, $((n21 + 5)) expected"
+
+ ${CFG}_entries_del "$locus1" temp 5 111
+ check_err $? "Couldn't delete MDB entries from VLAN 10"
+ ${CFG}_entries_del "$locus2" temp 5 112
+ check_err $? "Couldn't delete MDB entries from VLAN 20"
+ local n13=$(bridge_port_vlan_ngroups_get "$locus1")
+ local n23=$(bridge_port_vlan_ngroups_get "$locus2")
+
+ ((n13 == n10))
+ check_err $? "Number of groups at VLAN 10 was $n10, now is $n13, but should be back to $n10"
+
+ ((n23 == n20))
+ check_err $? "Number of groups at VLAN 20 was $n20, now is $n23, but should be back to $n20"
+
+ log_test "$CFG: port_vlan: isolation of port and per-VLAN ngroups"
+}
+
+test_8021qvs_ngroups_cross_vlan_cfg4()
+{
+ test_ngroups_cross_vlan cfg4
+}
+
+test_8021qvs_ngroups_cross_vlan_ctl4()
+{
+ test_ngroups_cross_vlan ctl4
+}
+
+test_8021qvs_ngroups_cross_vlan_cfg6()
+{
+ test_ngroups_cross_vlan cfg6
+}
+
+test_8021qvs_ngroups_cross_vlan_ctl6()
+{
+ test_ngroups_cross_vlan ctl6
+}
+
+test_maxgroups_zero()
+{
+ local CFG=$1; shift
+ local context=$1; shift
+ local locus=$1; shift
+
+ RET=0
+ local max
+
+ max=$(bridge_${context}_maxgroups_get "$locus")
+ ((max == 0))
+ check_err $? "Max groups on $locus should be 0, but $max reported"
+
+ bridge_${context}_maxgroups_set "$locus" 100
+ check_err $? "Failed to set max to 100"
+ max=$(bridge_${context}_maxgroups_get "$locus")
+ ((max == 100))
+ check_err $? "Max groups expected to be 100, but $max reported"
+
+ bridge_${context}_maxgroups_set "$locus" 0
+ check_err $? "Couldn't set maximum to 0"
+
+ # Test that setting 0 explicitly still serves as infinity.
+ ${CFG}_entries_add "$locus" temp 5
+ check_err $? "Adding 5 MDB entries failed but should have passed"
+ ${CFG}_entries_del "$locus" temp 5
+ check_err $? "Couldn't delete MDB entries"
+
+ log_test "$CFG: $context maxgroups: reporting and treatment of 0"
+}
+
+test_8021d_maxgroups_zero_cfg4()
+{
+ test_maxgroups_zero cfg4 port "dev $swp1"
+}
+
+test_8021d_maxgroups_zero_ctl4()
+{
+ test_maxgroups_zero ctl4 port "dev $swp1"
+}
+
+test_8021d_maxgroups_zero_cfg6()
+{
+ test_maxgroups_zero cfg6 port "dev $swp1"
+}
+
+test_8021d_maxgroups_zero_ctl6()
+{
+ test_maxgroups_zero ctl6 port "dev $swp1"
+}
+
+test_8021q_maxgroups_zero_cfg4()
+{
+ test_maxgroups_zero cfg4 port "dev $swp1 vid 10"
+}
+
+test_8021q_maxgroups_zero_ctl4()
+{
+ test_maxgroups_zero ctl4 port "dev $swp1 vid 10"
+}
+
+test_8021q_maxgroups_zero_cfg6()
+{
+ test_maxgroups_zero cfg6 port "dev $swp1 vid 10"
+}
+
+test_8021q_maxgroups_zero_ctl6()
+{
+ test_maxgroups_zero ctl6 port "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_zero_cfg4()
+{
+ test_maxgroups_zero cfg4 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_zero_ctl4()
+{
+ test_maxgroups_zero ctl4 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_zero_cfg6()
+{
+ test_maxgroups_zero cfg6 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_zero_ctl6()
+{
+ test_maxgroups_zero ctl6 port_vlan "dev $swp1 vid 10"
+}
+
+test_maxgroups_zero_cross_vlan()
+{
+ local CFG=$1; shift
+
+ local locus0="dev $swp1"
+ local locus1="dev $swp1 vid 10"
+ local locus2="dev $swp1 vid 20"
+ local max
+
+ RET=0
+
+ bridge_port_vlan_maxgroups_set "$locus1" 100
+ check_err $? "$locus1: Failed to set max to 100"
+
+ max=$(bridge_port_maxgroups_get "$locus0")
+ ((max == 0))
+ check_err $? "$locus0: Max groups expected to be 0, but $max reported"
+
+ max=$(bridge_port_vlan_maxgroups_get "$locus2")
+ ((max == 0))
+ check_err $? "$locus2: Max groups expected to be 0, but $max reported"
+
+ bridge_port_vlan_maxgroups_set "$locus2" 100
+ check_err $? "$locus2: Failed to set max to 100"
+
+ max=$(bridge_port_maxgroups_get "$locus0")
+ ((max == 0))
+ check_err $? "$locus0: Max groups expected to be 0, but $max reported"
+
+ max=$(bridge_port_vlan_maxgroups_get "$locus2")
+ ((max == 100))
+ check_err $? "$locus2: Max groups expected to be 100, but $max reported"
+
+ bridge_port_maxgroups_set "$locus0" 100
+ check_err $? "$locus0: Failed to set max to 100"
+
+ max=$(bridge_port_maxgroups_get "$locus0")
+ ((max == 100))
+ check_err $? "$locus0: Max groups expected to be 100, but $max reported"
+
+ max=$(bridge_port_vlan_maxgroups_get "$locus2")
+ ((max == 100))
+ check_err $? "$locus2: Max groups expected to be 100, but $max reported"
+
+ bridge_port_vlan_maxgroups_set "$locus1" 0
+ check_err $? "$locus1: Failed to set max to 0"
+
+ max=$(bridge_port_maxgroups_get "$locus0")
+ ((max == 100))
+ check_err $? "$locus0: Max groups expected to be 100, but $max reported"
+
+ max=$(bridge_port_vlan_maxgroups_get "$locus2")
+ ((max == 100))
+ check_err $? "$locus2: Max groups expected to be 100, but $max reported"
+
+ bridge_port_vlan_maxgroups_set "$locus2" 0
+ check_err $? "$locus2: Failed to set max to 0"
+
+ max=$(bridge_port_maxgroups_get "$locus0")
+ ((max == 100))
+ check_err $? "$locus0: Max groups expected to be 100, but $max reported"
+
+ max=$(bridge_port_vlan_maxgroups_get "$locus2")
+ ((max == 0))
+ check_err $? "$locus2: Max groups expected to be 0 but $max reported"
+
+ bridge_port_maxgroups_set "$locus0" 0
+ check_err $? "$locus0: Failed to set max to 0"
+
+ max=$(bridge_port_maxgroups_get "$locus0")
+ ((max == 0))
+ check_err $? "$locus0: Max groups expected to be 0, but $max reported"
+
+ max=$(bridge_port_vlan_maxgroups_get "$locus2")
+ ((max == 0))
+ check_err $? "$locus2: Max groups expected to be 0, but $max reported"
+
+ log_test "$CFG: port_vlan maxgroups: isolation of port and per-VLAN maximums"
+}
+
+test_8021qvs_maxgroups_zero_cross_vlan_cfg4()
+{
+ test_maxgroups_zero_cross_vlan cfg4
+}
+
+test_8021qvs_maxgroups_zero_cross_vlan_ctl4()
+{
+ test_maxgroups_zero_cross_vlan ctl4
+}
+
+test_8021qvs_maxgroups_zero_cross_vlan_cfg6()
+{
+ test_maxgroups_zero_cross_vlan cfg6
+}
+
+test_8021qvs_maxgroups_zero_cross_vlan_ctl6()
+{
+ test_maxgroups_zero_cross_vlan ctl6
+}
+
+test_maxgroups_too_low()
+{
+ local CFG=$1; shift
+ local context=$1; shift
+ local locus=$1; shift
+
+ RET=0
+
+ local n=$(bridge_${context}_ngroups_get "$locus")
+ local msg
+
+ ${CFG}_entries_add "$locus" temp 5 111
+ check_err $? "$locus: Couldn't add MDB entries"
+
+ bridge_${context}_maxgroups_set "$locus" $((n+2))
+ check_err $? "$locus: Setting maxgroups to $((n+2)) failed"
+
+ msg=$(${CFG}_entries_add "$locus" temp 2 112 2>&1)
+ check_fail $? "$locus: Adding more entries passed when max<n"
+ bridge_maxgroups_errmsg_check_cfg "$msg"
+
+ ${CFG}_entries_del "$locus" temp 5 111
+ check_err $? "$locus: Couldn't delete MDB entries"
+
+ ${CFG}_entries_add "$locus" temp 2 112
+ check_err $? "$locus: Adding more entries failed"
+
+ ${CFG}_entries_del "$locus" temp 2 112
+ check_err $? "$locus: Deleting more entries failed"
+
+ bridge_${context}_maxgroups_set "$locus" 0
+ check_err $? "$locus: Couldn't set maximum to 0"
+
+ log_test "$CFG: $context maxgroups: configure below ngroups"
+}
+
+test_8021d_maxgroups_too_low_cfg4()
+{
+ test_maxgroups_too_low cfg4 port "dev $swp1"
+}
+
+test_8021d_maxgroups_too_low_ctl4()
+{
+ test_maxgroups_too_low ctl4 port "dev $swp1"
+}
+
+test_8021d_maxgroups_too_low_cfg6()
+{
+ test_maxgroups_too_low cfg6 port "dev $swp1"
+}
+
+test_8021d_maxgroups_too_low_ctl6()
+{
+ test_maxgroups_too_low ctl6 port "dev $swp1"
+}
+
+test_8021q_maxgroups_too_low_cfg4()
+{
+ test_maxgroups_too_low cfg4 port "dev $swp1 vid 10"
+}
+
+test_8021q_maxgroups_too_low_ctl4()
+{
+ test_maxgroups_too_low ctl4 port "dev $swp1 vid 10"
+}
+
+test_8021q_maxgroups_too_low_cfg6()
+{
+ test_maxgroups_too_low cfg6 port "dev $swp1 vid 10"
+}
+
+test_8021q_maxgroups_too_low_ctl6()
+{
+ test_maxgroups_too_low ctl6 port "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_too_low_cfg4()
+{
+ test_maxgroups_too_low cfg4 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_too_low_ctl4()
+{
+ test_maxgroups_too_low ctl4 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_too_low_cfg6()
+{
+ test_maxgroups_too_low cfg6 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_too_low_ctl6()
+{
+ test_maxgroups_too_low ctl6 port_vlan "dev $swp1 vid 10"
+}
+
+test_maxgroups_too_many_entries()
+{
+ local CFG=$1; shift
+ local context=$1; shift
+ local locus=$1; shift
+
+ RET=0
+
+ local n=$(bridge_${context}_ngroups_get "$locus")
+ local msg
+
+ # Configure a low maximum
+ bridge_${context}_maxgroups_set "$locus" $((n+1))
+ check_err $? "$locus: Couldn't set maximum"
+
+ # Try to add more entries than the configured maximum
+ msg=$(${CFG}_entries_add "$locus" temp 5 2>&1)
+ check_fail $? "Adding 5 MDB entries passed, but should have failed"
+ bridge_maxgroups_errmsg_check_${CFG} "$msg"
+
+ # When adding entries through the control path, as many as possible
+ # get created. That's consistent with the mcast_hash_max behavior.
+ # So there, drop the entries explicitly.
+ if [[ ${CFG%[46]} == ctl ]]; then
+ ${CFG}_entries_del "$locus" temp 17 2>&1
+ fi
+
+ local n2=$(bridge_${context}_ngroups_get "$locus")
+ ((n2 == n))
+ check_err $? "Number of groups was $n, but after a failed attempt to add MDB entries it changed to $n2"
+
+ bridge_${context}_maxgroups_set "$locus" 0
+ check_err $? "$locus: Couldn't set maximum to 0"
+
+ log_test "$CFG: $context maxgroups: add too many MDB entries"
+}
+
+test_8021d_maxgroups_too_many_entries_cfg4()
+{
+ test_maxgroups_too_many_entries cfg4 port "dev $swp1"
+}
+
+test_8021d_maxgroups_too_many_entries_ctl4()
+{
+ test_maxgroups_too_many_entries ctl4 port "dev $swp1"
+}
+
+test_8021d_maxgroups_too_many_entries_cfg6()
+{
+ test_maxgroups_too_many_entries cfg6 port "dev $swp1"
+}
+
+test_8021d_maxgroups_too_many_entries_ctl6()
+{
+ test_maxgroups_too_many_entries ctl6 port "dev $swp1"
+}
+
+test_8021q_maxgroups_too_many_entries_cfg4()
+{
+ test_maxgroups_too_many_entries cfg4 port "dev $swp1 vid 10"
+}
+
+test_8021q_maxgroups_too_many_entries_ctl4()
+{
+ test_maxgroups_too_many_entries ctl4 port "dev $swp1 vid 10"
+}
+
+test_8021q_maxgroups_too_many_entries_cfg6()
+{
+ test_maxgroups_too_many_entries cfg6 port "dev $swp1 vid 10"
+}
+
+test_8021q_maxgroups_too_many_entries_ctl6()
+{
+ test_maxgroups_too_many_entries ctl6 port "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_too_many_entries_cfg4()
+{
+ test_maxgroups_too_many_entries cfg4 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_too_many_entries_ctl4()
+{
+ test_maxgroups_too_many_entries ctl4 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_too_many_entries_cfg6()
+{
+ test_maxgroups_too_many_entries cfg6 port_vlan "dev $swp1 vid 10"
+}
+
+test_8021qvs_maxgroups_too_many_entries_ctl6()
+{
+ test_maxgroups_too_many_entries ctl6 port_vlan "dev $swp1 vid 10"
+}
+
+test_maxgroups_too_many_cross_vlan()
+{
+ local CFG=$1; shift
+
+ RET=0
+
+ local locus0="dev $swp1"
+ local locus1="dev $swp1 vid 10"
+ local locus2="dev $swp1 vid 20"
+ local n1=$(bridge_port_vlan_ngroups_get "$locus1")
+ local n2=$(bridge_port_vlan_ngroups_get "$locus2")
+ local msg
+
+ if ((n1 > n2)); then
+ local tmp=$n1
+ n1=$n2
+ n2=$tmp
+
+ tmp="$locus1"
+ locus1="$locus2"
+ locus2="$tmp"
+ fi
+
+ # Now 0 <= n1 <= n2.
+ ${CFG}_entries_add "$locus2" temp 5 112
+ check_err $? "Couldn't add 5 entries"
+
+ n2=$(bridge_port_vlan_ngroups_get "$locus2")
+ # Now 0 <= n1 < n2-1.
+
+ # Setting locus1'maxgroups to n2-1 should pass. The number is
+ # smaller than both the absolute number of MDB entries, and in
+ # particular than number of locus2's number of entries, but it is
+ # large enough to cover locus1's entries. Thus we check that
+ # individual VLAN's ngroups are independent.
+ bridge_port_vlan_maxgroups_set "$locus1" $((n2-1))
+ check_err $? "Setting ${locus1}'s maxgroups to $((n2-1)) failed"
+
+ msg=$(${CFG}_entries_add "$locus1" temp $n2 111 2>&1)
+ check_fail $? "$locus1: Adding $n2 MDB entries passed, but should have failed"
+ bridge_maxgroups_errmsg_check_${CFG} "$msg"
+
+ bridge_port_maxgroups_set "$locus0" $((n1 + n2 + 2))
+ check_err $? "$locus0: Couldn't set maximum"
+
+ msg=$(${CFG}_entries_add "$locus1" temp 5 111 2>&1)
+ check_fail $? "$locus1: Adding 5 MDB entries passed, but should have failed"
+ bridge_maxgroups_errmsg_check_${CFG} "$msg"
+
+ # IGMP/MLD packets can cause several entries to be added, before
+ # the maximum is hit and the rest is then bounced. Remove what was
+ # committed, if anything.
+ ${CFG}_entries_del "$locus1" temp 5 111 2>/dev/null
+
+ ${CFG}_entries_add "$locus1" temp 2 111
+ check_err $? "$locus1: Adding 2 MDB entries failed, but should have passed"
+
+ ${CFG}_entries_del "$locus1" temp 2 111
+ check_err $? "Couldn't delete MDB entries"
+
+ ${CFG}_entries_del "$locus2" temp 5 112
+ check_err $? "Couldn't delete MDB entries"
+
+ bridge_port_vlan_maxgroups_set "$locus1" 0
+ check_err $? "$locus1: Couldn't set maximum to 0"
+
+ bridge_port_maxgroups_set "$locus0" 0
+ check_err $? "$locus0: Couldn't set maximum to 0"
+
+ log_test "$CFG: port_vlan maxgroups: isolation of port and per-VLAN ngroups"
+}
+
+test_8021qvs_maxgroups_too_many_cross_vlan_cfg4()
+{
+ test_maxgroups_too_many_cross_vlan cfg4
+}
+
+test_8021qvs_maxgroups_too_many_cross_vlan_ctl4()
+{
+ test_maxgroups_too_many_cross_vlan ctl4
+}
+
+test_8021qvs_maxgroups_too_many_cross_vlan_cfg6()
+{
+ test_maxgroups_too_many_cross_vlan cfg6
+}
+
+test_8021qvs_maxgroups_too_many_cross_vlan_ctl6()
+{
+ test_maxgroups_too_many_cross_vlan ctl6
+}
+
+test_vlan_attributes()
+{
+ local locus=$1; shift
+ local expect=$1; shift
+
+ RET=0
+
+ local max=$(bridge_port_vlan_maxgroups_get "$locus")
+ local n=$(bridge_port_vlan_ngroups_get "$locus")
+
+ eval "[[ $max $expect ]]"
+ check_err $? "$locus: maxgroups attribute expected to be $expect, but was $max"
+
+ eval "[[ $n $expect ]]"
+ check_err $? "$locus: ngroups attribute expected to be $expect, but was $n"
+
+ log_test "port_vlan: presence of ngroups and maxgroups attributes"
+}
+
+test_8021q_vlan_attributes()
+{
+ test_vlan_attributes "dev $swp1 vid 10" "== null"
+}
+
+test_8021qvs_vlan_attributes()
+{
+ test_vlan_attributes "dev $swp1 vid 10" "-ge 0"
+}
+
+test_toggle_vlan_snooping()
+{
+ local mode=$1; shift
+
+ RET=0
+
+ local CFG=cfg4
+ local context=port_vlan
+ local locus="dev $swp1 vid 10"
+
+ ${CFG}_entries_add "$locus" $mode 5
+ check_err $? "Couldn't add MDB entries"
+
+ bridge_${context}_maxgroups_set "$locus" 100
+ check_err $? "Failed to set max to 100"
+
+ ip link set dev br0 type bridge mcast_vlan_snooping 0
+ sleep 1
+ ip link set dev br0 type bridge mcast_vlan_snooping 1
+
+ local n=$(bridge_${context}_ngroups_get "$locus")
+ local nn=$(bridge mdb show dev br0 | grep $swp1 | wc -l)
+ ((nn == n))
+ check_err $? "mcast_n_groups expected to be $nn, but $n reported"
+
+ local max=$(bridge_${context}_maxgroups_get "$locus")
+ ((max == 100))
+ check_err $? "Max groups expected to be 100 but $max reported"
+
+ bridge_${context}_maxgroups_set "$locus" 0
+ check_err $? "Failed to set max to 0"
+
+ log_test "$CFG: $context: $mode: mcast_vlan_snooping toggle"
+}
+
+test_toggle_vlan_snooping_temp()
+{
+ test_toggle_vlan_snooping temp
+}
+
+test_toggle_vlan_snooping_permanent()
+{
+ test_toggle_vlan_snooping permanent
+}
+
+# ngroup test suites
+
+test_8021d_ngroups_cfg4()
+{
+ test_8021d_ngroups_reporting_cfg4
+}
+
+test_8021d_ngroups_ctl4()
+{
+ test_8021d_ngroups_reporting_ctl4
+}
+
+test_8021d_ngroups_cfg6()
+{
+ test_8021d_ngroups_reporting_cfg6
+}
+
+test_8021d_ngroups_ctl6()
+{
+ test_8021d_ngroups_reporting_ctl6
+}
+
+test_8021q_ngroups_cfg4()
+{
+ test_8021q_ngroups_reporting_cfg4
+}
+
+test_8021q_ngroups_ctl4()
+{
+ test_8021q_ngroups_reporting_ctl4
+}
+
+test_8021q_ngroups_cfg6()
+{
+ test_8021q_ngroups_reporting_cfg6
+}
+
+test_8021q_ngroups_ctl6()
+{
+ test_8021q_ngroups_reporting_ctl6
+}
+
+test_8021qvs_ngroups_cfg4()
+{
+ test_8021qvs_ngroups_reporting_cfg4
+ test_8021qvs_ngroups_cross_vlan_cfg4
+}
+
+test_8021qvs_ngroups_ctl4()
+{
+ test_8021qvs_ngroups_reporting_ctl4
+ test_8021qvs_ngroups_cross_vlan_ctl4
+}
+
+test_8021qvs_ngroups_cfg6()
+{
+ test_8021qvs_ngroups_reporting_cfg6
+ test_8021qvs_ngroups_cross_vlan_cfg6
+}
+
+test_8021qvs_ngroups_ctl6()
+{
+ test_8021qvs_ngroups_reporting_ctl6
+ test_8021qvs_ngroups_cross_vlan_ctl6
+}
+
+# maxgroups test suites
+
+test_8021d_maxgroups_cfg4()
+{
+ test_8021d_maxgroups_zero_cfg4
+ test_8021d_maxgroups_too_low_cfg4
+ test_8021d_maxgroups_too_many_entries_cfg4
+}
+
+test_8021d_maxgroups_ctl4()
+{
+ test_8021d_maxgroups_zero_ctl4
+ test_8021d_maxgroups_too_low_ctl4
+ test_8021d_maxgroups_too_many_entries_ctl4
+}
+
+test_8021d_maxgroups_cfg6()
+{
+ test_8021d_maxgroups_zero_cfg6
+ test_8021d_maxgroups_too_low_cfg6
+ test_8021d_maxgroups_too_many_entries_cfg6
+}
+
+test_8021d_maxgroups_ctl6()
+{
+ test_8021d_maxgroups_zero_ctl6
+ test_8021d_maxgroups_too_low_ctl6
+ test_8021d_maxgroups_too_many_entries_ctl6
+}
+
+test_8021q_maxgroups_cfg4()
+{
+ test_8021q_maxgroups_zero_cfg4
+ test_8021q_maxgroups_too_low_cfg4
+ test_8021q_maxgroups_too_many_entries_cfg4
+}
+
+test_8021q_maxgroups_ctl4()
+{
+ test_8021q_maxgroups_zero_ctl4
+ test_8021q_maxgroups_too_low_ctl4
+ test_8021q_maxgroups_too_many_entries_ctl4
+}
+
+test_8021q_maxgroups_cfg6()
+{
+ test_8021q_maxgroups_zero_cfg6
+ test_8021q_maxgroups_too_low_cfg6
+ test_8021q_maxgroups_too_many_entries_cfg6
+}
+
+test_8021q_maxgroups_ctl6()
+{
+ test_8021q_maxgroups_zero_ctl6
+ test_8021q_maxgroups_too_low_ctl6
+ test_8021q_maxgroups_too_many_entries_ctl6
+}
+
+test_8021qvs_maxgroups_cfg4()
+{
+ test_8021qvs_maxgroups_zero_cfg4
+ test_8021qvs_maxgroups_zero_cross_vlan_cfg4
+ test_8021qvs_maxgroups_too_low_cfg4
+ test_8021qvs_maxgroups_too_many_entries_cfg4
+ test_8021qvs_maxgroups_too_many_cross_vlan_cfg4
+}
+
+test_8021qvs_maxgroups_ctl4()
+{
+ test_8021qvs_maxgroups_zero_ctl4
+ test_8021qvs_maxgroups_zero_cross_vlan_ctl4
+ test_8021qvs_maxgroups_too_low_ctl4
+ test_8021qvs_maxgroups_too_many_entries_ctl4
+ test_8021qvs_maxgroups_too_many_cross_vlan_ctl4
+}
+
+test_8021qvs_maxgroups_cfg6()
+{
+ test_8021qvs_maxgroups_zero_cfg6
+ test_8021qvs_maxgroups_zero_cross_vlan_cfg6
+ test_8021qvs_maxgroups_too_low_cfg6
+ test_8021qvs_maxgroups_too_many_entries_cfg6
+ test_8021qvs_maxgroups_too_many_cross_vlan_cfg6
+}
+
+test_8021qvs_maxgroups_ctl6()
+{
+ test_8021qvs_maxgroups_zero_ctl6
+ test_8021qvs_maxgroups_zero_cross_vlan_ctl6
+ test_8021qvs_maxgroups_too_low_ctl6
+ test_8021qvs_maxgroups_too_many_entries_ctl6
+ test_8021qvs_maxgroups_too_many_cross_vlan_ctl6
+}
+
+# other test suites
+
+test_8021qvs_toggle_vlan_snooping()
+{
+ test_toggle_vlan_snooping_temp
+ test_toggle_vlan_snooping_permanent
+}
+
+# test groups
+
+test_8021d()
+{
+ # Tests for vlan_filtering 0 mcast_vlan_snooping 0.
+
+ switch_create_8021d
+ setup_wait
+
+ test_8021d_ngroups_cfg4
+ test_8021d_ngroups_ctl4
+ test_8021d_ngroups_cfg6
+ test_8021d_ngroups_ctl6
+ test_8021d_maxgroups_cfg4
+ test_8021d_maxgroups_ctl4
+ test_8021d_maxgroups_cfg6
+ test_8021d_maxgroups_ctl6
+
+ switch_destroy
+}
+
+test_8021q()
+{
+ # Tests for vlan_filtering 1 mcast_vlan_snooping 0.
+
+ switch_create_8021q
+ setup_wait
+
+ test_8021q_vlan_attributes
+ test_8021q_ngroups_cfg4
+ test_8021q_ngroups_ctl4
+ test_8021q_ngroups_cfg6
+ test_8021q_ngroups_ctl6
+ test_8021q_maxgroups_cfg4
+ test_8021q_maxgroups_ctl4
+ test_8021q_maxgroups_cfg6
+ test_8021q_maxgroups_ctl6
+
+ switch_destroy
+}
+
+test_8021qvs()
+{
+ # Tests for vlan_filtering 1 mcast_vlan_snooping 1.
+
+ switch_create_8021qvs
+ setup_wait
+
+ test_8021qvs_vlan_attributes
+ test_8021qvs_ngroups_cfg4
+ test_8021qvs_ngroups_ctl4
+ test_8021qvs_ngroups_cfg6
+ test_8021qvs_ngroups_ctl6
+ test_8021qvs_maxgroups_cfg4
+ test_8021qvs_maxgroups_ctl4
+ test_8021qvs_maxgroups_cfg6
+ test_8021qvs_maxgroups_ctl6
+ test_8021qvs_toggle_vlan_snooping
+
+ switch_destroy
+}
+
+trap cleanup EXIT
+
+setup_prepare
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 3d8e4ebda1b6..d47499ba81c7 100755
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -524,27 +524,6 @@ cmd_jq()
[ ! -z "$output" ]
}
-lldpad_app_wait_set()
-{
- local dev=$1; shift
-
- while lldptool -t -i $dev -V APP -c app | grep -Eq "pending|unknown"; do
- echo "$dev: waiting for lldpad to push pending APP updates"
- sleep 5
- done
-}
-
-lldpad_app_wait_del()
-{
- # Give lldpad a chance to push down the changes. If the device is downed
- # too soon, the updates will be left pending. However, they will have
- # been struck off the lldpad's DB already, so we won't be able to tell
- # they are pending. Then on next test iteration this would cause
- # weirdness as newly-added APP rules conflict with the old ones,
- # sometimes getting stuck in an "unknown" state.
- sleep 5
-}
-
pre_cleanup()
{
if [ "${PAUSE_ON_CLEANUP}" = "yes" ]; then
@@ -1692,3 +1671,219 @@ hw_stats_monitor_test()
log_test "${type}_stats notifications"
}
+
+ipv4_to_bytes()
+{
+ local IP=$1; shift
+
+ printf '%02x:' ${IP//./ } |
+ sed 's/:$//'
+}
+
+# Convert a given IPv6 address, `IP' such that the :: token, if present, is
+# expanded, and each 16-bit group is padded with zeroes to be 4 hexadecimal
+# digits. An optional `BYTESEP' parameter can be given to further separate
+# individual bytes of each 16-bit group.
+expand_ipv6()
+{
+ local IP=$1; shift
+ local bytesep=$1; shift
+
+ local cvt_ip=${IP/::/_}
+ local colons=${cvt_ip//[^:]/}
+ local allcol=:::::::
+ # IP where :: -> the appropriate number of colons:
+ local allcol_ip=${cvt_ip/_/${allcol:${#colons}}}
+
+ echo $allcol_ip | tr : '\n' |
+ sed s/^/0000/ |
+ sed 's/.*\(..\)\(..\)/\1'"$bytesep"'\2/' |
+ tr '\n' : |
+ sed 's/:$//'
+}
+
+ipv6_to_bytes()
+{
+ local IP=$1; shift
+
+ expand_ipv6 "$IP" :
+}
+
+u16_to_bytes()
+{
+ local u16=$1; shift
+
+ printf "%04x" $u16 | sed 's/^/000/;s/^.*\(..\)\(..\)$/\1:\2/'
+}
+
+# Given a mausezahn-formatted payload (colon-separated bytes given as %02x),
+# possibly with a keyword CHECKSUM stashed where a 16-bit checksum should be,
+# calculate checksum as per RFC 1071, assuming the CHECKSUM field (if any)
+# stands for 00:00.
+payload_template_calc_checksum()
+{
+ local payload=$1; shift
+
+ (
+ # Set input radix.
+ echo "16i"
+ # Push zero for the initial checksum.
+ echo 0
+
+ # Pad the payload with a terminating 00: in case we get an odd
+ # number of bytes.
+ echo "${payload%:}:00:" |
+ sed 's/CHECKSUM/00:00/g' |
+ tr '[:lower:]' '[:upper:]' |
+ # Add the word to the checksum.
+ sed 's/\(..\):\(..\):/\1\2+\n/g' |
+ # Strip the extra odd byte we pushed if left unconverted.
+ sed 's/\(..\):$//'
+
+ echo "10000 ~ +" # Calculate and add carry.
+ echo "FFFF r - p" # Bit-flip and print.
+ ) |
+ dc |
+ tr '[:upper:]' '[:lower:]'
+}
+
+payload_template_expand_checksum()
+{
+ local payload=$1; shift
+ local checksum=$1; shift
+
+ local ckbytes=$(u16_to_bytes $checksum)
+
+ echo "$payload" | sed "s/CHECKSUM/$ckbytes/g"
+}
+
+payload_template_nbytes()
+{
+ local payload=$1; shift
+
+ payload_template_expand_checksum "${payload%:}" 0 |
+ sed 's/:/\n/g' | wc -l
+}
+
+igmpv3_is_in_get()
+{
+ local GRP=$1; shift
+ local sources=("$@")
+
+ local igmpv3
+ local nsources=$(u16_to_bytes ${#sources[@]})
+
+ # IS_IN ( $sources )
+ igmpv3=$(:
+ )"22:"$( : Type - Membership Report
+ )"00:"$( : Reserved
+ )"CHECKSUM:"$( : Checksum
+ )"00:00:"$( : Reserved
+ )"00:01:"$( : Number of Group Records
+ )"01:"$( : Record Type - IS_IN
+ )"00:"$( : Aux Data Len
+ )"${nsources}:"$( : Number of Sources
+ )"$(ipv4_to_bytes $GRP):"$( : Multicast Address
+ )"$(for src in "${sources[@]}"; do
+ ipv4_to_bytes $src
+ echo -n :
+ done)"$( : Source Addresses
+ )
+ local checksum=$(payload_template_calc_checksum "$igmpv3")
+
+ payload_template_expand_checksum "$igmpv3" $checksum
+}
+
+igmpv2_leave_get()
+{
+ local GRP=$1; shift
+
+ local payload=$(:
+ )"17:"$( : Type - Leave Group
+ )"00:"$( : Max Resp Time - not meaningful
+ )"CHECKSUM:"$( : Checksum
+ )"$(ipv4_to_bytes $GRP)"$( : Group Address
+ )
+ local checksum=$(payload_template_calc_checksum "$payload")
+
+ payload_template_expand_checksum "$payload" $checksum
+}
+
+mldv2_is_in_get()
+{
+ local SIP=$1; shift
+ local GRP=$1; shift
+ local sources=("$@")
+
+ local hbh
+ local icmpv6
+ local nsources=$(u16_to_bytes ${#sources[@]})
+
+ hbh=$(:
+ )"3a:"$( : Next Header - ICMPv6
+ )"00:"$( : Hdr Ext Len
+ )"00:00:00:00:00:00:"$( : Options and Padding
+ )
+
+ icmpv6=$(:
+ )"8f:"$( : Type - MLDv2 Report
+ )"00:"$( : Code
+ )"CHECKSUM:"$( : Checksum
+ )"00:00:"$( : Reserved
+ )"00:01:"$( : Number of Group Records
+ )"01:"$( : Record Type - IS_IN
+ )"00:"$( : Aux Data Len
+ )"${nsources}:"$( : Number of Sources
+ )"$(ipv6_to_bytes $GRP):"$( : Multicast address
+ )"$(for src in "${sources[@]}"; do
+ ipv6_to_bytes $src
+ echo -n :
+ done)"$( : Source Addresses
+ )
+
+ local len=$(u16_to_bytes $(payload_template_nbytes $icmpv6))
+ local sudohdr=$(:
+ )"$(ipv6_to_bytes $SIP):"$( : SIP
+ )"$(ipv6_to_bytes $GRP):"$( : DIP is multicast address
+ )"${len}:"$( : Upper-layer length
+ )"00:3a:"$( : Zero and next-header
+ )
+ local checksum=$(payload_template_calc_checksum ${sudohdr}${icmpv6})
+
+ payload_template_expand_checksum "$hbh$icmpv6" $checksum
+}
+
+mldv1_done_get()
+{
+ local SIP=$1; shift
+ local GRP=$1; shift
+
+ local hbh
+ local icmpv6
+
+ hbh=$(:
+ )"3a:"$( : Next Header - ICMPv6
+ )"00:"$( : Hdr Ext Len
+ )"00:00:00:00:00:00:"$( : Options and Padding
+ )
+
+ icmpv6=$(:
+ )"84:"$( : Type - MLDv1 Done
+ )"00:"$( : Code
+ )"CHECKSUM:"$( : Checksum
+ )"00:00:"$( : Max Resp Delay - not meaningful
+ )"00:00:"$( : Reserved
+ )"$(ipv6_to_bytes $GRP):"$( : Multicast address
+ )
+
+ local len=$(u16_to_bytes $(payload_template_nbytes $icmpv6))
+ local sudohdr=$(:
+ )"$(ipv6_to_bytes $SIP):"$( : SIP
+ )"$(ipv6_to_bytes $GRP):"$( : DIP is multicast address
+ )"${len}:"$( : Upper-layer length
+ )"00:3a:"$( : Zero and next-header
+ )
+ local checksum=$(payload_template_calc_checksum ${sudohdr}${icmpv6})
+
+ payload_template_expand_checksum "$hbh$icmpv6" $checksum
+}
diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
index 1e0a62f638fe..a96cff8e7219 100755
--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
+++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
@@ -3,7 +3,8 @@
ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \
mirred_egress_mirror_test matchall_mirred_egress_mirror_test \
- gact_trap_test mirred_egress_to_ingress_test"
+ gact_trap_test mirred_egress_to_ingress_test \
+ mirred_egress_to_ingress_tcp_test"
NUM_NETIFS=4
source tc_common.sh
source lib.sh
@@ -198,6 +199,52 @@ mirred_egress_to_ingress_test()
log_test "mirred_egress_to_ingress ($tcflags)"
}
+mirred_egress_to_ingress_tcp_test()
+{
+ mirred_e2i_tf1=$(mktemp) mirred_e2i_tf2=$(mktemp)
+
+ RET=0
+ dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$mirred_e2i_tf1
+ tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \
+ $tcflags ip_proto tcp src_ip 192.0.2.1 dst_ip 192.0.2.2 \
+ action ct commit nat src addr 192.0.2.2 pipe \
+ action ct clear pipe \
+ action ct commit nat dst addr 192.0.2.1 pipe \
+ action ct clear pipe \
+ action skbedit ptype host pipe \
+ action mirred ingress redirect dev $h1
+ tc filter add dev $h1 protocol ip pref 101 handle 101 egress flower \
+ $tcflags ip_proto icmp \
+ action mirred ingress redirect dev $h1
+ tc filter add dev $h1 protocol ip pref 102 handle 102 ingress flower \
+ ip_proto icmp \
+ action drop
+
+ ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 &
+ local rpid=$!
+ ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1
+ wait -n $rpid
+ cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2
+ check_err $? "server output check failed"
+
+ $MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \
+ -t icmp "ping,id=42,seq=5" -q
+ tc_check_packets "dev $h1 egress" 101 10
+ check_err $? "didn't mirred redirect ICMP"
+ tc_check_packets "dev $h1 ingress" 102 10
+ check_err $? "didn't drop mirred ICMP"
+ local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
+ test ${overlimits} = 10
+ check_err $? "wrong overlimits, expected 10 got ${overlimits}"
+
+ tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
+ tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
+ tc filter del dev $h1 ingress protocol ip pref 102 handle 102 flower
+
+ rm -f $mirred_e2i_tf1 $mirred_e2i_tf2
+ log_test "mirred_egress_to_ingress_tcp ($tcflags)"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -223,6 +270,8 @@ setup_prepare()
cleanup()
{
+ local tf
+
pre_cleanup
switch_destroy
@@ -233,6 +282,8 @@ cleanup()
ip link set $swp2 address $swp2origmac
ip link set $swp1 address $swp1origmac
+
+ for tf in $mirred_e2i_tf1 $mirred_e2i_tf2; do rm -f $tf; done
}
mirred_egress_redirect_test()
diff --git a/tools/testing/selftests/net/ip_local_port_range.c b/tools/testing/selftests/net/ip_local_port_range.c
new file mode 100644
index 000000000000..75e3fdacdf73
--- /dev/null
+++ b/tools/testing/selftests/net/ip_local_port_range.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// Copyright (c) 2023 Cloudflare
+
+/* Test IP_LOCAL_PORT_RANGE socket option: IPv4 + IPv6, TCP + UDP.
+ *
+ * Tests assume that net.ipv4.ip_local_port_range is [40000, 49999].
+ * Don't run these directly but with ip_local_port_range.sh script.
+ */
+
+#include <fcntl.h>
+#include <netinet/ip.h>
+
+#include "../kselftest_harness.h"
+
+#ifndef IP_LOCAL_PORT_RANGE
+#define IP_LOCAL_PORT_RANGE 51
+#endif
+
+static __u32 pack_port_range(__u16 lo, __u16 hi)
+{
+ return (hi << 16) | (lo << 0);
+}
+
+static void unpack_port_range(__u32 range, __u16 *lo, __u16 *hi)
+{
+ *lo = range & 0xffff;
+ *hi = range >> 16;
+}
+
+static int get_so_domain(int fd)
+{
+ int domain, err;
+ socklen_t len;
+
+ len = sizeof(domain);
+ err = getsockopt(fd, SOL_SOCKET, SO_DOMAIN, &domain, &len);
+ if (err)
+ return -1;
+
+ return domain;
+}
+
+static int bind_to_loopback_any_port(int fd)
+{
+ union {
+ struct sockaddr sa;
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } addr;
+ socklen_t addr_len;
+
+ memset(&addr, 0, sizeof(addr));
+ switch (get_so_domain(fd)) {
+ case AF_INET:
+ addr.v4.sin_family = AF_INET;
+ addr.v4.sin_port = htons(0);
+ addr.v4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ addr_len = sizeof(addr.v4);
+ break;
+ case AF_INET6:
+ addr.v6.sin6_family = AF_INET6;
+ addr.v6.sin6_port = htons(0);
+ addr.v6.sin6_addr = in6addr_loopback;
+ addr_len = sizeof(addr.v6);
+ break;
+ default:
+ return -1;
+ }
+
+ return bind(fd, &addr.sa, addr_len);
+}
+
+static int get_sock_port(int fd)
+{
+ union {
+ struct sockaddr sa;
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } addr;
+ socklen_t addr_len;
+ int err;
+
+ addr_len = sizeof(addr);
+ memset(&addr, 0, sizeof(addr));
+ err = getsockname(fd, &addr.sa, &addr_len);
+ if (err)
+ return -1;
+
+ switch (addr.sa.sa_family) {
+ case AF_INET:
+ return ntohs(addr.v4.sin_port);
+ case AF_INET6:
+ return ntohs(addr.v6.sin6_port);
+ default:
+ errno = EAFNOSUPPORT;
+ return -1;
+ }
+}
+
+static int get_ip_local_port_range(int fd, __u32 *range)
+{
+ socklen_t len;
+ __u32 val;
+ int err;
+
+ len = sizeof(val);
+ err = getsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &val, &len);
+ if (err)
+ return -1;
+
+ *range = val;
+ return 0;
+}
+
+FIXTURE(ip_local_port_range) {};
+
+FIXTURE_SETUP(ip_local_port_range)
+{
+}
+
+FIXTURE_TEARDOWN(ip_local_port_range)
+{
+}
+
+FIXTURE_VARIANT(ip_local_port_range) {
+ int so_domain;
+ int so_type;
+ int so_protocol;
+};
+
+FIXTURE_VARIANT_ADD(ip_local_port_range, ip4_tcp) {
+ .so_domain = AF_INET,
+ .so_type = SOCK_STREAM,
+ .so_protocol = 0,
+};
+
+FIXTURE_VARIANT_ADD(ip_local_port_range, ip4_udp) {
+ .so_domain = AF_INET,
+ .so_type = SOCK_DGRAM,
+ .so_protocol = 0,
+};
+
+FIXTURE_VARIANT_ADD(ip_local_port_range, ip4_stcp) {
+ .so_domain = AF_INET,
+ .so_type = SOCK_STREAM,
+ .so_protocol = IPPROTO_SCTP,
+};
+
+FIXTURE_VARIANT_ADD(ip_local_port_range, ip6_tcp) {
+ .so_domain = AF_INET6,
+ .so_type = SOCK_STREAM,
+ .so_protocol = 0,
+};
+
+FIXTURE_VARIANT_ADD(ip_local_port_range, ip6_udp) {
+ .so_domain = AF_INET6,
+ .so_type = SOCK_DGRAM,
+ .so_protocol = 0,
+};
+
+FIXTURE_VARIANT_ADD(ip_local_port_range, ip6_stcp) {
+ .so_domain = AF_INET6,
+ .so_type = SOCK_STREAM,
+ .so_protocol = IPPROTO_SCTP,
+};
+
+TEST_F(ip_local_port_range, invalid_option_value)
+{
+ __u16 val16;
+ __u32 val32;
+ __u64 val64;
+ int fd, err;
+
+ fd = socket(variant->so_domain, variant->so_type, variant->so_protocol);
+ ASSERT_GE(fd, 0) TH_LOG("socket failed");
+
+ /* Too few bytes */
+ val16 = 40000;
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &val16, sizeof(val16));
+ EXPECT_TRUE(err) TH_LOG("expected setsockopt(IP_LOCAL_PORT_RANGE) to fail");
+ EXPECT_EQ(errno, EINVAL);
+
+ /* Empty range: low port > high port */
+ val32 = pack_port_range(40222, 40111);
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &val32, sizeof(val32));
+ EXPECT_TRUE(err) TH_LOG("expected setsockopt(IP_LOCAL_PORT_RANGE) to fail");
+ EXPECT_EQ(errno, EINVAL);
+
+ /* Too many bytes */
+ val64 = pack_port_range(40333, 40444);
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &val64, sizeof(val64));
+ EXPECT_TRUE(err) TH_LOG("expected setsockopt(IP_LOCAL_PORT_RANGE) to fail");
+ EXPECT_EQ(errno, EINVAL);
+
+ err = close(fd);
+ ASSERT_TRUE(!err) TH_LOG("close failed");
+}
+
+TEST_F(ip_local_port_range, port_range_out_of_netns_range)
+{
+ const struct test {
+ __u16 range_lo;
+ __u16 range_hi;
+ } tests[] = {
+ { 30000, 39999 }, /* socket range below netns range */
+ { 50000, 59999 }, /* socket range above netns range */
+ };
+ const struct test *t;
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ /* Bind a couple of sockets, not just one, to check
+ * that the range wasn't clamped to a single port from
+ * the netns range. That is [40000, 40000] or [49999,
+ * 49999], respectively for each test case.
+ */
+ int fds[2], i;
+
+ TH_LOG("lo %5hu, hi %5hu", t->range_lo, t->range_hi);
+
+ for (i = 0; i < ARRAY_SIZE(fds); i++) {
+ int fd, err, port;
+ __u32 range;
+
+ fd = socket(variant->so_domain, variant->so_type, variant->so_protocol);
+ ASSERT_GE(fd, 0) TH_LOG("#%d: socket failed", i);
+
+ range = pack_port_range(t->range_lo, t->range_hi);
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
+ ASSERT_TRUE(!err) TH_LOG("#%d: setsockopt(IP_LOCAL_PORT_RANGE) failed", i);
+
+ err = bind_to_loopback_any_port(fd);
+ ASSERT_TRUE(!err) TH_LOG("#%d: bind failed", i);
+
+ /* Check that socket port range outside of ephemeral range is ignored */
+ port = get_sock_port(fd);
+ ASSERT_GE(port, 40000) TH_LOG("#%d: expected port within netns range", i);
+ ASSERT_LE(port, 49999) TH_LOG("#%d: expected port within netns range", i);
+
+ fds[i] = fd;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fds); i++)
+ ASSERT_TRUE(close(fds[i]) == 0) TH_LOG("#%d: close failed", i);
+ }
+}
+
+TEST_F(ip_local_port_range, single_port_range)
+{
+ const struct test {
+ __u16 range_lo;
+ __u16 range_hi;
+ __u16 expected;
+ } tests[] = {
+ /* single port range within ephemeral range */
+ { 45000, 45000, 45000 },
+ /* first port in the ephemeral range (clamp from above) */
+ { 0, 40000, 40000 },
+ /* last port in the ephemeral range (clamp from below) */
+ { 49999, 0, 49999 },
+ };
+ const struct test *t;
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ int fd, err, port;
+ __u32 range;
+
+ TH_LOG("lo %5hu, hi %5hu, expected %5hu",
+ t->range_lo, t->range_hi, t->expected);
+
+ fd = socket(variant->so_domain, variant->so_type, variant->so_protocol);
+ ASSERT_GE(fd, 0) TH_LOG("socket failed");
+
+ range = pack_port_range(t->range_lo, t->range_hi);
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
+ ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed");
+
+ err = bind_to_loopback_any_port(fd);
+ ASSERT_TRUE(!err) TH_LOG("bind failed");
+
+ port = get_sock_port(fd);
+ ASSERT_EQ(port, t->expected) TH_LOG("unexpected local port");
+
+ err = close(fd);
+ ASSERT_TRUE(!err) TH_LOG("close failed");
+ }
+}
+
+TEST_F(ip_local_port_range, exhaust_8_port_range)
+{
+ __u8 port_set = 0;
+ int i, fd, err;
+ __u32 range;
+ __u16 port;
+ int fds[8];
+
+ for (i = 0; i < ARRAY_SIZE(fds); i++) {
+ fd = socket(variant->so_domain, variant->so_type, variant->so_protocol);
+ ASSERT_GE(fd, 0) TH_LOG("socket failed");
+
+ range = pack_port_range(40000, 40007);
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
+ ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed");
+
+ err = bind_to_loopback_any_port(fd);
+ ASSERT_TRUE(!err) TH_LOG("bind failed");
+
+ port = get_sock_port(fd);
+ ASSERT_GE(port, 40000) TH_LOG("expected port within sockopt range");
+ ASSERT_LE(port, 40007) TH_LOG("expected port within sockopt range");
+
+ port_set |= 1 << (port - 40000);
+ fds[i] = fd;
+ }
+
+ /* Check that all every port from the test range is in use */
+ ASSERT_EQ(port_set, 0xff) TH_LOG("expected all ports to be busy");
+
+ /* Check that bind() fails because the whole range is busy */
+ fd = socket(variant->so_domain, variant->so_type, variant->so_protocol);
+ ASSERT_GE(fd, 0) TH_LOG("socket failed");
+
+ range = pack_port_range(40000, 40007);
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
+ ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed");
+
+ err = bind_to_loopback_any_port(fd);
+ ASSERT_TRUE(err) TH_LOG("expected bind to fail");
+ ASSERT_EQ(errno, EADDRINUSE);
+
+ err = close(fd);
+ ASSERT_TRUE(!err) TH_LOG("close failed");
+
+ for (i = 0; i < ARRAY_SIZE(fds); i++) {
+ err = close(fds[i]);
+ ASSERT_TRUE(!err) TH_LOG("close failed");
+ }
+}
+
+TEST_F(ip_local_port_range, late_bind)
+{
+ union {
+ struct sockaddr sa;
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } addr;
+ socklen_t addr_len;
+ const int one = 1;
+ int fd, err;
+ __u32 range;
+ __u16 port;
+
+ if (variant->so_protocol == IPPROTO_SCTP)
+ SKIP(return, "SCTP doesn't support IP_BIND_ADDRESS_NO_PORT");
+
+ fd = socket(variant->so_domain, variant->so_type, 0);
+ ASSERT_GE(fd, 0) TH_LOG("socket failed");
+
+ range = pack_port_range(40100, 40199);
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
+ ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed");
+
+ err = setsockopt(fd, SOL_IP, IP_BIND_ADDRESS_NO_PORT, &one, sizeof(one));
+ ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_BIND_ADDRESS_NO_PORT) failed");
+
+ err = bind_to_loopback_any_port(fd);
+ ASSERT_TRUE(!err) TH_LOG("bind failed");
+
+ port = get_sock_port(fd);
+ ASSERT_EQ(port, 0) TH_LOG("getsockname failed");
+
+ /* Invalid destination */
+ memset(&addr, 0, sizeof(addr));
+ switch (variant->so_domain) {
+ case AF_INET:
+ addr.v4.sin_family = AF_INET;
+ addr.v4.sin_port = htons(0);
+ addr.v4.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr_len = sizeof(addr.v4);
+ break;
+ case AF_INET6:
+ addr.v6.sin6_family = AF_INET6;
+ addr.v6.sin6_port = htons(0);
+ addr.v6.sin6_addr = in6addr_any;
+ addr_len = sizeof(addr.v6);
+ break;
+ default:
+ ASSERT_TRUE(false) TH_LOG("unsupported socket domain");
+ }
+
+ /* connect() doesn't need to succeed for late bind to happen */
+ connect(fd, &addr.sa, addr_len);
+
+ port = get_sock_port(fd);
+ ASSERT_GE(port, 40100);
+ ASSERT_LE(port, 40199);
+
+ err = close(fd);
+ ASSERT_TRUE(!err) TH_LOG("close failed");
+}
+
+TEST_F(ip_local_port_range, get_port_range)
+{
+ __u16 lo, hi;
+ __u32 range;
+ int fd, err;
+
+ fd = socket(variant->so_domain, variant->so_type, variant->so_protocol);
+ ASSERT_GE(fd, 0) TH_LOG("socket failed");
+
+ /* Get range before it will be set */
+ err = get_ip_local_port_range(fd, &range);
+ ASSERT_TRUE(!err) TH_LOG("getsockopt(IP_LOCAL_PORT_RANGE) failed");
+
+ unpack_port_range(range, &lo, &hi);
+ ASSERT_EQ(lo, 0) TH_LOG("unexpected low port");
+ ASSERT_EQ(hi, 0) TH_LOG("unexpected high port");
+
+ range = pack_port_range(12345, 54321);
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
+ ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed");
+
+ /* Get range after it has been set */
+ err = get_ip_local_port_range(fd, &range);
+ ASSERT_TRUE(!err) TH_LOG("getsockopt(IP_LOCAL_PORT_RANGE) failed");
+
+ unpack_port_range(range, &lo, &hi);
+ ASSERT_EQ(lo, 12345) TH_LOG("unexpected low port");
+ ASSERT_EQ(hi, 54321) TH_LOG("unexpected high port");
+
+ /* Unset the port range */
+ range = pack_port_range(0, 0);
+ err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
+ ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed");
+
+ /* Get range after it has been unset */
+ err = get_ip_local_port_range(fd, &range);
+ ASSERT_TRUE(!err) TH_LOG("getsockopt(IP_LOCAL_PORT_RANGE) failed");
+
+ unpack_port_range(range, &lo, &hi);
+ ASSERT_EQ(lo, 0) TH_LOG("unexpected low port");
+ ASSERT_EQ(hi, 0) TH_LOG("unexpected high port");
+
+ err = close(fd);
+ ASSERT_TRUE(!err) TH_LOG("close failed");
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/ip_local_port_range.sh b/tools/testing/selftests/net/ip_local_port_range.sh
new file mode 100755
index 000000000000..6c6ad346eaa0
--- /dev/null
+++ b/tools/testing/selftests/net/ip_local_port_range.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+./in_netns.sh \
+ sh -c 'sysctl -q -w net.ipv4.ip_local_port_range="40000 49999" && ./ip_local_port_range'
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 24bcd7b9bdb2..ef628b16fe9b 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -17,6 +17,11 @@ flush_pids()
sleep 1.1
ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
+
+ for _ in $(seq 10); do
+ [ -z "$(ip netns pids "${ns}")" ] && break
+ sleep 0.1
+ done
}
cleanup()
@@ -37,15 +42,20 @@ if [ $? -ne 0 ];then
exit $ksft_skip
fi
+get_msk_inuse()
+{
+ ip netns exec $ns cat /proc/net/protocols | awk '$1~/^MPTCP$/{print $3}'
+}
+
__chk_nr()
{
- local condition="$1"
+ local command="$1"
local expected=$2
local msg nr
shift 2
msg=$*
- nr=$(ss -inmHMN $ns | $condition)
+ nr=$(eval $command)
printf "%-50s" "$msg"
if [ $nr != $expected ]; then
@@ -57,9 +67,17 @@ __chk_nr()
test_cnt=$((test_cnt+1))
}
+__chk_msk_nr()
+{
+ local condition=$1
+ shift 1
+
+ __chk_nr "ss -inmHMN $ns | $condition" $*
+}
+
chk_msk_nr()
{
- __chk_nr "grep -c token:" $*
+ __chk_msk_nr "grep -c token:" $*
}
wait_msk_nr()
@@ -97,12 +115,12 @@ wait_msk_nr()
chk_msk_fallback_nr()
{
- __chk_nr "grep -c fallback" $*
+ __chk_msk_nr "grep -c fallback" $*
}
chk_msk_remote_key_nr()
{
- __chk_nr "grep -c remote_key" $*
+ __chk_msk_nr "grep -c remote_key" $*
}
__chk_listen()
@@ -142,6 +160,26 @@ chk_msk_listen()
nr=$(ss -Ml $filter | wc -l)
}
+chk_msk_inuse()
+{
+ local expected=$1
+ local listen_nr
+
+ shift 1
+
+ listen_nr=$(ss -N "${ns}" -Ml | grep -c LISTEN)
+ expected=$((expected + listen_nr))
+
+ for _ in $(seq 10); do
+ if [ $(get_msk_inuse) -eq $expected ];then
+ break
+ fi
+ sleep 0.1
+ done
+
+ __chk_nr get_msk_inuse $expected $*
+}
+
# $1: ns, $2: port
wait_local_port_listen()
{
@@ -195,8 +233,10 @@ wait_connected $ns 10000
chk_msk_nr 2 "after MPC handshake "
chk_msk_remote_key_nr 2 "....chk remote_key"
chk_msk_fallback_nr 0 "....chk no fallback"
+chk_msk_inuse 2 "....chk 2 msk in use"
flush_pids
+chk_msk_inuse 0 "....chk 0 msk in use after flush"
echo "a" | \
timeout ${timeout_test} \
@@ -211,8 +251,11 @@ echo "b" | \
127.0.0.1 >/dev/null &
wait_connected $ns 10001
chk_msk_fallback_nr 1 "check fallback"
+chk_msk_inuse 1 "....chk 1 msk in use"
flush_pids
+chk_msk_inuse 0 "....chk 0 msk in use after flush"
+
NR_CLIENTS=100
for I in `seq 1 $NR_CLIENTS`; do
echo "a" | \
@@ -232,6 +275,9 @@ for I in `seq 1 $NR_CLIENTS`; do
done
wait_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
+chk_msk_inuse $((NR_CLIENTS*2)) "....chk many msk in use"
flush_pids
+chk_msk_inuse 0 "....chk 0 msk in use after flush"
+
exit $ret
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index 8a8266957bc5..b25a31445ded 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -627,7 +627,7 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd,
char rbuf[8192];
ssize_t len;
- if (fds.events == 0)
+ if (fds.events == 0 || quit)
break;
switch (poll(&fds, 1, poll_timeout)) {
@@ -733,7 +733,7 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd,
}
/* leave some time for late join/announce */
- if (cfg_remove)
+ if (cfg_remove && !quit)
usleep(cfg_wait);
return 0;
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 079f8f46849d..42e3bd1a05f5 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -780,24 +780,17 @@ do_transfer()
addr_nr_ns2=${addr_nr_ns2:9}
fi
- local local_addr
- if is_v6 "${connect_addr}"; then
- local_addr="::"
- else
- local_addr="0.0.0.0"
- fi
-
extra_srv_args="$extra_args $extra_srv_args"
if [ "$test_link_fail" -gt 1 ];then
timeout ${timeout_test} \
ip netns exec ${listener_ns} \
./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_srv_args ${local_addr} < "$sinfail" > "$sout" &
+ $extra_srv_args "::" < "$sinfail" > "$sout" &
else
timeout ${timeout_test} \
ip netns exec ${listener_ns} \
./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_srv_args ${local_addr} < "$sin" > "$sout" &
+ $extra_srv_args "::" < "$sin" > "$sout" &
fi
local spid=$!
@@ -2460,6 +2453,47 @@ v4mapped_tests()
fi
}
+mixed_tests()
+{
+ if reset "IPv4 sockets do not use IPv6 addresses"; then
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ chk_join_nr 0 0 0
+ fi
+
+ # Need an IPv6 mptcp socket to allow subflows of both families
+ if reset "simult IPv4 and IPv6 subflows"; then
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
+ run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
+ chk_join_nr 1 1 1
+ fi
+
+ # cross families subflows will not be created even in fullmesh mode
+ if reset "simult IPv4 and IPv6 subflows, fullmesh 1x1"; then
+ pm_nl_set_limits $ns1 0 4
+ pm_nl_set_limits $ns2 1 4
+ pm_nl_add_endpoint $ns2 dead:beef:2::2 flags subflow,fullmesh
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
+ run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
+ chk_join_nr 1 1 1
+ fi
+
+ # fullmesh still tries to create all the possibly subflows with
+ # matching family
+ if reset "simult IPv4 and IPv6 subflows, fullmesh 2x2"; then
+ pm_nl_set_limits $ns1 0 4
+ pm_nl_set_limits $ns2 2 4
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+ run_tests $ns1 $ns2 dead:beef:1::1 0 0 fullmesh_1 slow
+ chk_join_nr 4 4 4
+ fi
+}
+
backup_tests()
{
# single subflow, backup
@@ -3132,6 +3166,7 @@ all_tests_sorted=(
a@add_tests
6@ipv6_tests
4@v4mapped_tests
+ M@mixed_tests
b@backup_tests
p@add_addr_ports_tests
k@syncookies_tests
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index ab2d581f28a1..66c5be25c13d 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -43,41 +43,40 @@ rndh=$(printf %x "$sec")-$(mktemp -u XXXXXX)
ns1="ns1-$rndh"
ns2="ns2-$rndh"
+print_title()
+{
+ stdbuf -o0 -e0 printf "INFO: %s\n" "${1}"
+}
+
kill_wait()
{
+ [ $1 -eq 0 ] && return 0
+
+ kill -SIGUSR1 $1 > /dev/null 2>&1
kill $1 > /dev/null 2>&1
wait $1 2>/dev/null
}
cleanup()
{
- echo "cleanup"
-
- rm -rf $file $client_evts $server_evts
+ print_title "Cleanup"
# Terminate the MPTCP connection and related processes
- if [ $client4_pid -ne 0 ]; then
- kill -SIGUSR1 $client4_pid > /dev/null 2>&1
- fi
- if [ $server4_pid -ne 0 ]; then
- kill_wait $server4_pid
- fi
- if [ $client6_pid -ne 0 ]; then
- kill -SIGUSR1 $client6_pid > /dev/null 2>&1
- fi
- if [ $server6_pid -ne 0 ]; then
- kill_wait $server6_pid
- fi
- if [ $server_evts_pid -ne 0 ]; then
- kill_wait $server_evts_pid
- fi
- if [ $client_evts_pid -ne 0 ]; then
- kill_wait $client_evts_pid
- fi
+ local pid
+ for pid in $client4_pid $server4_pid $client6_pid $server6_pid\
+ $server_evts_pid $client_evts_pid
+ do
+ kill_wait $pid
+ done
+
local netns
for netns in "$ns1" "$ns2" ;do
ip netns del "$netns"
done
+
+ rm -rf $file $client_evts $server_evts
+
+ stdbuf -o0 -e0 printf "Done\n"
}
trap cleanup EXIT
@@ -108,6 +107,7 @@ ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
ip -net "$ns2" addr add dead:beef:2::2/64 dev ns2eth1 nodad
ip -net "$ns2" link set ns2eth1 up
+print_title "Init"
stdbuf -o0 -e0 printf "Created network namespaces ns1, ns2 \t\t\t[OK]\n"
make_file()
@@ -193,11 +193,16 @@ make_connection()
server_serverside=$(grep "type:1," "$server_evts" |
sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q')
+ stdbuf -o0 -e0 printf "Established IP%s MPTCP Connection ns2 => ns1 \t\t" $is_v6
if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
[ "$server_serverside" = 1 ]
then
- stdbuf -o0 -e0 printf "Established IP%s MPTCP Connection ns2 => ns1 \t\t[OK]\n" $is_v6
+ stdbuf -o0 -e0 printf "[OK]\n"
else
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ stdbuf -o0 -e0 printf "\tExpected tokens (c:%s - s:%s) and server (c:%d - s:%d)\n" \
+ "${client_token}" "${server_token}" \
+ "${client_serverside}" "${server_serverside}"
exit 1
fi
@@ -217,6 +222,48 @@ make_connection()
fi
}
+# $1: var name ; $2: prev ret
+check_expected_one()
+{
+ local var="${1}"
+ local exp="e_${var}"
+ local prev_ret="${2}"
+
+ if [ "${!var}" = "${!exp}" ]
+ then
+ return 0
+ fi
+
+ if [ "${prev_ret}" = "0" ]
+ then
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ fi
+
+ stdbuf -o0 -e0 printf "\tExpected value for '%s': '%s', got '%s'.\n" \
+ "${var}" "${!var}" "${!exp}"
+ return 1
+}
+
+# $@: all var names to check
+check_expected()
+{
+ local ret=0
+ local var
+
+ for var in "${@}"
+ do
+ check_expected_one "${var}" "${ret}" || ret=1
+ done
+
+ if [ ${ret} -eq 0 ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ return 0
+ fi
+
+ exit 1
+}
+
verify_announce_event()
{
local evt=$1
@@ -242,19 +289,14 @@ verify_announce_event()
fi
dport=$(sed --unbuffered -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- if [ "$type" = "$e_type" ] && [ "$token" = "$e_token" ] &&
- [ "$addr" = "$e_addr" ] && [ "$dport" = "$e_dport" ] &&
- [ "$id" = "$e_id" ]
- then
- stdbuf -o0 -e0 printf "[OK]\n"
- return 0
- fi
- stdbuf -o0 -e0 printf "[FAIL]\n"
- exit 1
+
+ check_expected "type" "token" "addr" "dport" "id"
}
test_announce()
{
+ print_title "Announce tests"
+
# Capture events on the network namespace running the server
:>"$server_evts"
@@ -270,7 +312,7 @@ test_announce()
then
stdbuf -o0 -e0 printf "[OK]\n"
else
- stdbuf -o0 -e0 printf "[FAIL]\n"
+ stdbuf -o0 -e0 printf "[FAIL]\n\ttype defined: %s\n" "${type}"
exit 1
fi
@@ -347,18 +389,14 @@ verify_remove_event()
type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- if [ "$type" = "$e_type" ] && [ "$token" = "$e_token" ] &&
- [ "$id" = "$e_id" ]
- then
- stdbuf -o0 -e0 printf "[OK]\n"
- return 0
- fi
- stdbuf -o0 -e0 printf "[FAIL]\n"
- exit 1
+
+ check_expected "type" "token" "id"
}
test_remove()
{
+ print_title "Remove tests"
+
# Capture events on the network namespace running the server
:>"$server_evts"
@@ -507,20 +545,13 @@ verify_subflow_events()
daddr=$(sed --unbuffered -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
fi
- if [ "$type" = "$e_type" ] && [ "$token" = "$e_token" ] &&
- [ "$daddr" = "$e_daddr" ] && [ "$e_dport" = "$dport" ] &&
- [ "$family" = "$e_family" ] && [ "$saddr" = "$e_saddr" ] &&
- [ "$e_locid" = "$locid" ] && [ "$e_remid" = "$remid" ]
- then
- stdbuf -o0 -e0 printf "[OK]\n"
- return 0
- fi
- stdbuf -o0 -e0 printf "[FAIL]\n"
- exit 1
+ check_expected "type" "token" "daddr" "dport" "family" "saddr" "locid" "remid"
}
test_subflows()
{
+ print_title "Subflows v4 or v6 only tests"
+
# Capture events on the network namespace running the server
:>"$server_evts"
@@ -754,6 +785,8 @@ test_subflows()
test_subflows_v4_v6_mix()
{
+ print_title "Subflows v4 and v6 mix tests"
+
# Attempt to add a listener at 10.0.2.1:<subflow-port>
ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
$app6_port > /dev/null 2>&1 &
@@ -800,6 +833,8 @@ test_subflows_v4_v6_mix()
test_prio()
{
+ print_title "Prio tests"
+
local count
# Send MP_PRIO signal from client to server machine
@@ -811,7 +846,7 @@ test_prio()
count=$(ip netns exec "$ns2" nstat -as | grep MPTcpExtMPPrioTx | awk '{print $2}')
[ -z "$count" ] && count=0
if [ $count != 1 ]; then
- stdbuf -o0 -e0 printf "[FAIL]\n"
+ stdbuf -o0 -e0 printf "[FAIL]\n\tCount != 1: %d\n" "${count}"
exit 1
else
stdbuf -o0 -e0 printf "[OK]\n"
@@ -822,7 +857,7 @@ test_prio()
count=$(ip netns exec "$ns1" nstat -as | grep MPTcpExtMPPrioRx | awk '{print $2}')
[ -z "$count" ] && count=0
if [ $count != 1 ]; then
- stdbuf -o0 -e0 printf "[FAIL]\n"
+ stdbuf -o0 -e0 printf "[FAIL]\n\tCount != 1: %d\n" "${count}"
exit 1
else
stdbuf -o0 -e0 printf "[OK]\n"
@@ -863,19 +898,13 @@ verify_listener_events()
sed --unbuffered -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q')
fi
- if [ $type ] && [ $type = $e_type ] &&
- [ $family ] && [ $family = $e_family ] &&
- [ $saddr ] && [ $saddr = $e_saddr ] &&
- [ $sport ] && [ $sport = $e_sport ]; then
- stdbuf -o0 -e0 printf "[OK]\n"
- return 0
- fi
- stdbuf -o0 -e0 printf "[FAIL]\n"
- exit 1
+ check_expected "type" "family" "saddr" "sport"
}
test_listener()
{
+ print_title "Listener tests"
+
# Capture events on the network namespace running the client
:>$client_evts
@@ -902,8 +931,10 @@ test_listener()
verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
}
+print_title "Make connections"
make_connection
make_connection "v6"
+
test_announce
test_remove
test_subflows
diff --git a/tools/testing/selftests/net/bpf/nat6to4.c b/tools/testing/selftests/net/nat6to4.c
index ac54c36b25fc..ac54c36b25fc 100644
--- a/tools/testing/selftests/net/bpf/nat6to4.c
+++ b/tools/testing/selftests/net/nat6to4.c
diff --git a/tools/testing/selftests/net/rps_default_mask.sh b/tools/testing/selftests/net/rps_default_mask.sh
new file mode 100755
index 000000000000..0fd0d2db3abc
--- /dev/null
+++ b/tools/testing/selftests/net/rps_default_mask.sh
@@ -0,0 +1,74 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+readonly ksft_skip=4
+readonly cpus=$(nproc)
+ret=0
+
+[ $cpus -gt 2 ] || exit $ksft_skip
+
+readonly INITIAL_RPS_DEFAULT_MASK=$(cat /proc/sys/net/core/rps_default_mask)
+readonly TAG="$(mktemp -u XXXXXX)"
+readonly VETH="veth${TAG}"
+readonly NETNS="ns-${TAG}"
+
+setup() {
+ ip netns add "${NETNS}"
+ ip -netns "${NETNS}" link set lo up
+}
+
+cleanup() {
+ echo $INITIAL_RPS_DEFAULT_MASK > /proc/sys/net/core/rps_default_mask
+ ip netns del $NETNS
+}
+
+chk_rps() {
+ local rps_mask expected_rps_mask=$4
+ local dev_name=$3
+ local netns=$2
+ local cmd="cat"
+ local msg=$1
+
+ [ -n "$netns" ] && cmd="ip netns exec $netns $cmd"
+
+ rps_mask=$($cmd /sys/class/net/$dev_name/queues/rx-0/rps_cpus)
+ printf "%-60s" "$msg"
+ if [ $rps_mask -eq $expected_rps_mask ]; then
+ echo "[ ok ]"
+ else
+ echo "[fail] expected $expected_rps_mask found $rps_mask"
+ ret=1
+ fi
+}
+
+trap cleanup EXIT
+
+echo 0 > /proc/sys/net/core/rps_default_mask
+setup
+chk_rps "empty rps_default_mask" $NETNS lo 0
+cleanup
+
+echo 1 > /proc/sys/net/core/rps_default_mask
+setup
+chk_rps "changing rps_default_mask dont affect existing devices" "" lo $INITIAL_RPS_DEFAULT_MASK
+
+echo 3 > /proc/sys/net/core/rps_default_mask
+chk_rps "changing rps_default_mask dont affect existing netns" $NETNS lo 0
+
+ip link add name $VETH type veth peer netns $NETNS name $VETH
+ip link set dev $VETH up
+ip -n $NETNS link set dev $VETH up
+chk_rps "changing rps_default_mask affect newly created devices" "" $VETH 3
+chk_rps "changing rps_default_mask don't affect newly child netns[II]" $NETNS $VETH 0
+ip netns del $NETNS
+
+setup
+chk_rps "rps_default_mask is 0 by default in child netns" "$NETNS" lo 0
+
+ip netns exec $NETNS sysctl -qw net.core.rps_default_mask=1
+ip link add name $VETH type veth peer netns $NETNS name $VETH
+chk_rps "changing rps_default_mask in child ns don't affect the main one" "" lo $INITIAL_RPS_DEFAULT_MASK
+chk_rps "changing rps_default_mask in child ns affects new childns devices" $NETNS $VETH 1
+chk_rps "changing rps_default_mask in child ns don't affect existing devices" $NETNS lo 0
+
+exit $ret
diff --git a/tools/testing/selftests/net/srv6_end_flavors_test.sh b/tools/testing/selftests/net/srv6_end_flavors_test.sh
new file mode 100755
index 000000000000..50563443a4ad
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_end_flavors_test.sh
@@ -0,0 +1,869 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+# author: Paolo Lungaroni <paolo.lungaroni@uniroma2.it>
+#
+# This script is designed to test the support for "flavors" in the SRv6 End
+# behavior.
+#
+# Flavors defined in RFC8986 [1] represent additional operations that can modify
+# or extend the existing SRv6 End, End.X and End.T behaviors. For the sake of
+# convenience, we report the list of flavors described in [1] hereafter:
+# - Penultimate Segment Pop (PSP);
+# - Ultimate Segment Pop (USP);
+# - Ultimate Segment Decapsulation (USD).
+#
+# The End, End.X, and End.T behaviors can support these flavors either
+# individually or in combinations.
+# Currently in this selftest we consider only the PSP flavor for the SRv6 End
+# behavior. However, it is possible to extend the script as soon as other
+# flavors will be supported in the kernel.
+#
+# The purpose of the PSP flavor consists in instructing the penultimate node
+# listed in the SRv6 policy to remove (i.e. pop) the outermost SRH from the IPv6
+# header.
+# A PSP enabled SRv6 End behavior instance processes the SRH by:
+# - decrementing the Segment Left (SL) value from 1 to 0;
+# - copying the last SID from the SID List into the IPv6 Destination Address
+# (DA);
+# - removing the SRH from the extension headers following the IPv6 header.
+#
+# Once the SRH is removed, the IPv6 packet is forwarded to the destination using
+# the IPv6 DA updated during the PSP operation (i.e. the IPv6 DA corresponding
+# to the last SID carried by the removed SRH).
+#
+# Although the PSP flavor can be set for any SRv6 End behavior instance on any
+# SR node, it will be active only on such behaviors bound to a penultimate SID
+# for a given SRv6 policy.
+# SL=2 SL=1 SL=0
+# | | |
+# For example, given the SRv6 policy (SID List := <X, Y, Z>):
+# - a PSP enabled SRv6 End behavior bound to SID Y will apply the PSP operation
+# as Segment Left (SL) is 1, corresponding to the Penultimate Segment of the
+# SID List;
+# - a PSP enabled SRv6 End behavior bound to SID X will *NOT* apply the PSP
+# operation as the Segment Left is 2. This behavior instance will apply the
+# "standard" End packet processing, ignoring the configured PSP flavor at
+# all.
+#
+# [1] RFC8986: https://datatracker.ietf.org/doc/html/rfc8986
+#
+# Network topology
+# ================
+#
+# The network topology used in this selftest is depicted hereafter, composed by
+# two hosts (hs-1, hs-2) and four routers (rt-1, rt-2, rt-3, rt-4).
+# Hosts hs-1 and hs-2 are connected to routers rt-1 and rt-2, respectively,
+# allowing them to communicate with each other.
+# Traffic exchanged between hs-1 and hs-2 can follow different network paths.
+# The network operator, through specific SRv6 Policies can steer traffic to one
+# path rather than another. In this selftest this is implemented as follows:
+#
+# i) The SRv6 H.Insert behavior applies SRv6 Policies on traffic received by
+# connected hosts. It pushes the Segment Routing Header (SRH) after the
+# IPv6 header. The SRH contains the SID List (i.e. SRv6 Policy) needed for
+# steering traffic across the segments/waypoints specified in that list;
+#
+# ii) The SRv6 End behavior advances the active SID in the SID List carried by
+# the SRH;
+#
+# iii) The PSP enabled SRv6 End behavior is used to remove the SRH when such
+# behavior is configured on a node bound to the Penultimate Segment carried
+# by the SID List.
+#
+# cafe::1 cafe::2
+# +--------+ +--------+
+# | | | |
+# | hs-1 | | hs-2 |
+# | | | |
+# +---+----+ +--- +---+
+# cafe::/64 | | cafe::/64
+# | |
+# +---+----+ +----+---+
+# | | fcf0:0:1:2::/64 | |
+# | rt-1 +-------------------+ rt-2 |
+# | | | |
+# +---+----+ +----+---+
+# | . . |
+# | fcf0:0:1:3::/64 . |
+# | . . |
+# | . . |
+# fcf0:0:1:4::/64 | . | fcf0:0:2:3::/64
+# | . . |
+# | . . |
+# | fcf0:0:2:4::/64 . |
+# | . . |
+# +---+----+ +----+---+
+# | | | |
+# | rt-4 +-------------------+ rt-3 |
+# | | fcf0:0:3:4::/64 | |
+# +---+----+ +----+---+
+#
+# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y in
+# the IPv6 operator network.
+#
+#
+# Local SID table
+# ===============
+#
+# Each SRv6 router is configured with a Local SID table in which SIDs are
+# stored. Considering the given SRv6 router rt-x, at least two SIDs are
+# configured in the Local SID table:
+#
+# Local SID table for SRv6 router rt-x
+# +---------------------------------------------------------------------+
+# |fcff:x::e is associated with the SRv6 End behavior |
+# |fcff:x::ef1 is associated with the SRv6 End behavior with PSP flavor |
+# +---------------------------------------------------------------------+
+#
+# The fcff::/16 prefix is reserved by the operator for the SIDs. Reachability of
+# SIDs is ensured by proper configuration of the IPv6 operator's network and
+# SRv6 routers.
+#
+#
+# SRv6 Policies
+# =============
+#
+# An SRv6 ingress router applies different SRv6 Policies to the traffic received
+# from connected hosts on the basis of the destination addresses.
+# In case of SRv6 H.Insert behavior, the SRv6 Policy enforcement consists of
+# pushing the SRH (carrying a given SID List) after the existing IPv6 header.
+# Note that in the inserting mode, there is no encapsulation at all.
+#
+# Before applying an SRv6 Policy using the SRv6 H.Insert behavior
+# +------+---------+
+# | IPv6 | Payload |
+# +------+---------+
+#
+# After applying an SRv6 Policy using the SRv6 H.Insert behavior
+# +------+-----+---------+
+# | IPv6 | SRH | Payload |
+# +------+-----+---------+
+#
+# Traffic from hs-1 to hs-2
+# -------------------------
+#
+# Packets generated from hs-1 and directed towards hs-2 are
+# handled by rt-1 which applies the following SRv6 Policy:
+#
+# i.a) IPv6 traffic, SID List=fcff:3::e,fcff:4::ef1,fcff:2::ef1,cafe::2
+#
+# Router rt-1 is configured to enforce the Policy (i.a) through the SRv6
+# H.Insert behavior which pushes the SRH after the existing IPv6 header. This
+# Policy steers the traffic from hs-1 across rt-3, rt-4, rt-2 and finally to the
+# destination hs-2.
+#
+# As the packet reaches the router rt-3, the SRv6 End behavior bound to SID
+# fcff:3::e is triggered. The behavior updates the Segment Left (from SL=3 to
+# SL=2) in the SRH, the IPv6 DA with fcff:4::ef1 and forwards the packet to the
+# next router on the path, i.e. rt-4.
+#
+# When router rt-4 receives the packet, the PSP enabled SRv6 End behavior bound
+# to SID fcff:4::ef1 is executed. Since the SL=2, the PSP operation is *NOT*
+# kicked in and the behavior applies the default End processing: the Segment
+# Left is decreased (from SL=2 to SL=1), the IPv6 DA is updated with the SID
+# fcff:2::ef1 and the packet is forwarded to router rt-2.
+#
+# The PSP enabled SRv6 End behavior on rt-2 is associated with SID fcff:2::ef1
+# and is executed as the packet is received. Because SL=1, the behavior applies
+# the PSP processing on the packet as follows: i) SL is decreased, i.e. from
+# SL=1 to SL=0; ii) last SID (cafe::2) is copied into the IPv6 DA; iii) the
+# outermost SRH is removed from the extension headers following the IPv6 header.
+# Once the PSP processing is completed, the packet is forwarded to the host hs-2
+# (destination).
+#
+# Traffic from hs-2 to hs-1
+# -------------------------
+#
+# Packets generated from hs-2 and directed to hs-1 are handled by rt-2 which
+# applies the following SRv6 Policy:
+#
+# i.b) IPv6 traffic, SID List=fcff:1::ef1,cafe::1
+#
+# Router rt-2 is configured to enforce the Policy (i.b) through the SRv6
+# H.Insert behavior which pushes the SRH after the existing IPv6 header. This
+# Policy steers the traffic from hs-2 across rt-1 and finally to the
+# destination hs-1
+#
+#
+# When the router rt-1 receives the packet, the PSP enabled SRv6 End behavior
+# associated with the SID fcff:1::ef1 is triggered. Since the SL=1,
+# the PSP operation takes place: i) the SL is decremented; ii) the IPv6 DA is
+# set with the last SID; iii) the SRH is removed from the extension headers
+# after the IPv6 header. At this point, the packet with IPv6 DA=cafe::1 is sent
+# to the destination, i.e. hs-1.
+
+# Kselftest framework requirement - SKIP code is 4.
+readonly ksft_skip=4
+
+readonly RDMSUFF="$(mktemp -u XXXXXXXX)"
+readonly DUMMY_DEVNAME="dum0"
+readonly RT2HS_DEVNAME="veth1"
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fcf0:0
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv6_TESTS_ADDR=2001:db8::1
+readonly LOCATOR_SERVICE=fcff
+readonly END_FUNC=000e
+readonly END_PSP_FUNC=0ef1
+
+PING_TIMEOUT_SEC=4
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+# IDs of routers and hosts are initialized during the setup of the testing
+# network
+ROUTERS=''
+HOSTS=''
+
+SETUP_ERR=1
+
+ret=${ksft_skip}
+nsuccess=0
+nfail=0
+
+log_test()
+{
+ local rc="$1"
+ local expected="$2"
+ local msg="$3"
+
+ if [ "${rc}" -eq "${expected}" ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ printf "\nTests passed: %3d\n" "${nsuccess}"
+ printf "Tests failed: %3d\n" "${nfail}"
+
+ # when a test fails, the value of 'ret' is set to 1 (error code).
+ # Conversely, when all tests are passed successfully, the 'ret' value
+ # is set to 0 (success code).
+ if [ "${ret}" -ne 1 ]; then
+ ret=0
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+test_command_or_ksft_skip()
+{
+ local cmd="$1"
+
+ if [ ! -x "$(command -v "${cmd}")" ]; then
+ echo "SKIP: Could not run test without \"${cmd}\" tool";
+ exit "${ksft_skip}"
+ fi
+}
+
+get_nodename()
+{
+ local name="$1"
+
+ echo "${name}-${RDMSUFF}"
+}
+
+get_rtname()
+{
+ local rtid="$1"
+
+ get_nodename "rt-${rtid}"
+}
+
+get_hsname()
+{
+ local hsid="$1"
+
+ get_nodename "hs-${hsid}"
+}
+
+__create_namespace()
+{
+ local name="$1"
+
+ ip netns add "${name}"
+}
+
+create_router()
+{
+ local rtid="$1"
+ local nsname
+
+ nsname="$(get_rtname "${rtid}")"
+
+ __create_namespace "${nsname}"
+}
+
+create_host()
+{
+ local hsid="$1"
+ local nsname
+
+ nsname="$(get_hsname "${hsid}")"
+
+ __create_namespace "${nsname}"
+}
+
+cleanup()
+{
+ local nsname
+ local i
+
+ # destroy routers
+ for i in ${ROUTERS}; do
+ nsname="$(get_rtname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # destroy hosts
+ for i in ${HOSTS}; do
+ nsname="$(get_hsname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # check whether the setup phase was completed successfully or not. In
+ # case of an error during the setup phase of the testing environment,
+ # the selftest is considered as "skipped".
+ if [ "${SETUP_ERR}" -ne 0 ]; then
+ echo "SKIP: Setting up the testing environment failed"
+ exit "${ksft_skip}"
+ fi
+
+ exit "${ret}"
+}
+
+add_link_rt_pairs()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local neigh
+ local nsname
+ local neigh_nsname
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ neigh_nsname="$(get_rtname "${neigh}")"
+
+ ip link add "veth-rt-${rt}-${neigh}" netns "${nsname}" \
+ type veth peer name "veth-rt-${neigh}-${rt}" \
+ netns "${neigh_nsname}"
+ done
+}
+
+get_network_prefix()
+{
+ local rt="$1"
+ local neigh="$2"
+ local p="${rt}"
+ local q="${neigh}"
+
+ if [ "${p}" -gt "${q}" ]; then
+ p="${q}"; q="${rt}"
+ fi
+
+ echo "${IPv6_RT_NETWORK}:${p}:${q}"
+}
+
+# Given the description of a router <id:op> as an input, the function returns
+# the <id> token which represents the ID of the router.
+# i.e. input: "12:psp"
+# output: "12"
+__get_srv6_rtcfg_id()
+{
+ local element="$1"
+
+ echo "${element}" | cut -d':' -f1
+}
+
+# Given the description of a router <id:op> as an input, the function returns
+# the <op> token which represents the operation (e.g. End behavior with or
+# withouth flavors) configured for the node.
+
+# Note that when the operation represents an End behavior with a list of
+# flavors, the output is the ordered version of that list.
+# i.e. input: "5:usp,psp,usd"
+# output: "psp,usd,usp"
+__get_srv6_rtcfg_op()
+{
+ local element="$1"
+
+ # return the lexicographically ordered flavors
+ echo "${element}" | cut -d':' -f2 | sed 's/,/\n/g' | sort | \
+ xargs | sed 's/ /,/g'
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local nsname
+ local net_prefix
+ local devname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ ip -netns "${nsname}" addr \
+ add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+
+ ip -netns "${nsname}" link set "${devname}" up
+ done
+
+ ip -netns "${nsname}" link set lo up
+
+ ip -netns "${nsname}" link add ${DUMMY_DEVNAME} type dummy
+ ip -netns "${nsname}" link set ${DUMMY_DEVNAME} up
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.forwarding=1
+}
+
+# Setup local SIDs for an SRv6 router
+setup_rt_local_sids()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local net_prefix
+ local devname
+ local nsname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ # set underlay network routes for SIDs reachability
+ ip -netns "${nsname}" -6 route \
+ add "${LOCATOR_SERVICE}:${neigh}::/32" \
+ table "${LOCALSID_TABLE_ID}" \
+ via "${net_prefix}::${neigh}" dev "${devname}"
+ done
+
+ # Local End behavior (note that "dev" is a dummy interface chosen for
+ # the sake of simplicity).
+ ip -netns "${nsname}" -6 route \
+ add "${LOCATOR_SERVICE}:${rt}::${END_FUNC}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End dev "${DUMMY_DEVNAME}"
+
+
+ # all SIDs start with a common locator. Routes and SRv6 Endpoint
+ # behavior instaces are grouped together in the 'localsid' table.
+ ip -netns "${nsname}" -6 rule \
+ add to "${LOCATOR_SERVICE}::/16" \
+ lookup "${LOCALSID_TABLE_ID}" prio 999
+
+ # set default routes to unreachable
+ ip -netns "${nsname}" -6 route \
+ add unreachable default metric 4278198272 \
+ dev "${DUMMY_DEVNAME}"
+}
+
+# This helper function builds and installs the SID List (i.e. SRv6 Policy)
+# to be applied on incoming packets at the ingress node. Moreover, it
+# configures the SRv6 nodes specified in the SID List to process the traffic
+# according to the operations required by the Policy itself.
+# args:
+# $1 - destination host (i.e. cafe::x host)
+# $2 - SRv6 router configured for enforcing the SRv6 Policy
+# $3 - compact way to represent a list of SRv6 routers with their operations
+# (i.e. behaviors) that each of them needs to perform. Every <nodeid:op>
+# element constructs a SID that is associated with the behavior <op> on
+# the <nodeid> node. The list of such elements forms an SRv6 Policy.
+__setup_rt_policy()
+{
+ local dst="$1"
+ local encap_rt="$2"
+ local policy_rts="$3"
+ local behavior_cfg
+ local in_nsname
+ local rt_nsname
+ local policy=''
+ local function
+ local fullsid
+ local op_type
+ local node
+ local n
+
+ in_nsname="$(get_rtname "${encap_rt}")"
+
+ for n in ${policy_rts}; do
+ node="$(__get_srv6_rtcfg_id "${n}")"
+ op_type="$(__get_srv6_rtcfg_op "${n}")"
+ rt_nsname="$(get_rtname "${node}")"
+
+ case "${op_type}" in
+ "noflv")
+ policy="${policy}${LOCATOR_SERVICE}:${node}::${END_FUNC},"
+ function="${END_FUNC}"
+ behavior_cfg="End"
+ ;;
+
+ "psp")
+ policy="${policy}${LOCATOR_SERVICE}:${node}::${END_PSP_FUNC},"
+ function="${END_PSP_FUNC}"
+ behavior_cfg="End flavors psp"
+ ;;
+
+ *)
+ break
+ ;;
+ esac
+
+ fullsid="${LOCATOR_SERVICE}:${node}::${function}"
+
+ # add SRv6 Endpoint behavior to the selected router
+ if ! ip -netns "${rt_nsname}" -6 route get "${fullsid}" \
+ &>/dev/null; then
+ ip -netns "${rt_nsname}" -6 route \
+ add "${fullsid}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action ${behavior_cfg} \
+ dev "${DUMMY_DEVNAME}"
+ fi
+ done
+
+ # we need to remove the trailing comma to avoid inserting an empty
+ # address (::0) in the SID List.
+ policy="${policy%,}"
+
+ # add SRv6 policy to incoming traffic sent by connected hosts
+ ip -netns "${in_nsname}" -6 route \
+ add "${IPv6_HS_NETWORK}::${dst}" \
+ encap seg6 mode inline segs "${policy}" \
+ dev "${DUMMY_DEVNAME}"
+
+ ip -netns "${in_nsname}" -6 neigh \
+ add proxy "${IPv6_HS_NETWORK}::${dst}" \
+ dev "${RT2HS_DEVNAME}"
+}
+
+# see __setup_rt_policy
+setup_rt_policy_ipv6()
+{
+ __setup_rt_policy "$1" "$2" "$3"
+}
+
+setup_hs()
+{
+ local hs="$1"
+ local rt="$2"
+ local hsname
+ local rtname
+
+ hsname="$(get_hsname "${hs}")"
+ rtname="$(get_rtname "${rt}")"
+
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip -netns "${hsname}" link add veth0 type veth \
+ peer name "${RT2HS_DEVNAME}" netns "${rtname}"
+
+ ip -netns "${hsname}" addr \
+ add "${IPv6_HS_NETWORK}::${hs}/64" dev veth0 nodad
+
+ ip -netns "${hsname}" link set veth0 up
+ ip -netns "${hsname}" link set lo up
+
+ ip -netns "${rtname}" addr \
+ add "${IPv6_HS_NETWORK}::254/64" dev "${RT2HS_DEVNAME}" nodad
+
+ ip -netns "${rtname}" link set "${RT2HS_DEVNAME}" up
+
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv6.conf."${RT2HS_DEVNAME}".proxy_ndp=1
+}
+
+setup()
+{
+ local i
+
+ # create routers
+ ROUTERS="1 2 3 4"; readonly ROUTERS
+ for i in ${ROUTERS}; do
+ create_router "${i}"
+ done
+
+ # create hosts
+ HOSTS="1 2"; readonly HOSTS
+ for i in ${HOSTS}; do
+ create_host "${i}"
+ done
+
+ # set up the links for connecting routers
+ add_link_rt_pairs 1 "2 3 4"
+ add_link_rt_pairs 2 "3 4"
+ add_link_rt_pairs 3 "4"
+
+ # set up the basic connectivity of routers and routes required for
+ # reachability of SIDs.
+ setup_rt_networking 1 "2 3 4"
+ setup_rt_networking 2 "1 3 4"
+ setup_rt_networking 3 "1 2 4"
+ setup_rt_networking 4 "1 2 3"
+
+ # set up the hosts connected to routers
+ setup_hs 1 1
+ setup_hs 2 2
+
+ # set up default SRv6 Endpoints (i.e. SRv6 End behavior)
+ setup_rt_local_sids 1 "2 3 4"
+ setup_rt_local_sids 2 "1 3 4"
+ setup_rt_local_sids 3 "1 2 4"
+ setup_rt_local_sids 4 "1 2 3"
+
+ # set up SRv6 policies
+ # create a connection between hosts hs-1 and hs-2.
+ # The path between hs-1 and hs-2 traverses SRv6 aware routers.
+ # For each direction two path are chosen:
+ #
+ # Direction hs-1 -> hs-2 (PSP flavor)
+ # - rt-1 (SRv6 H.Insert policy)
+ # - rt-3 (SRv6 End behavior)
+ # - rt-4 (SRv6 End flavor PSP with SL>1, acting as End behavior)
+ # - rt-2 (SRv6 End flavor PSP with SL=1)
+ #
+ # Direction hs-2 -> hs-1 (PSP flavor)
+ # - rt-2 (SRv6 H.Insert policy)
+ # - rt-1 (SRv6 End flavor PSP with SL=1)
+ setup_rt_policy_ipv6 2 1 "3:noflv 4:psp 2:psp"
+ setup_rt_policy_ipv6 1 2 "1:psp"
+
+ # testing environment was set up successfully
+ SETUP_ERR=0
+}
+
+check_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+ local prefix
+ local rtsrc_nsname
+
+ rtsrc_nsname="$(get_rtname "${rtsrc}")"
+
+ prefix="$(get_network_prefix "${rtsrc}" "${rtdst}")"
+
+ ip netns exec "${rtsrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${prefix}::${rtdst}" >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+
+ check_rt_connectivity "${rtsrc}" "${rtdst}"
+ log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv6_HS_NETWORK}::${hsdst}" >/dev/null 2>&1
+}
+
+check_and_log_hs2gw_connectivity()
+{
+ local hssrc="$1"
+
+ check_hs_ipv6_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> gw"
+}
+
+check_and_log_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_and_log_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+}
+
+router_tests()
+{
+ local i
+ local j
+
+ log_section "IPv6 routers connectivity test"
+
+ for i in ${ROUTERS}; do
+ for j in ${ROUTERS}; do
+ if [ "${i}" -eq "${j}" ]; then
+ continue
+ fi
+
+ check_and_log_rt_connectivity "${i}" "${j}"
+ done
+ done
+}
+
+host2gateway_tests()
+{
+ local hs
+
+ log_section "IPv6 connectivity test among hosts and gateways"
+
+ for hs in ${HOSTS}; do
+ check_and_log_hs2gw_connectivity "${hs}"
+ done
+}
+
+host_srv6_end_flv_psp_tests()
+{
+ log_section "SRv6 connectivity test hosts (h1 <-> h2, PSP flavor)"
+
+ check_and_log_hs_connectivity 1 2
+ check_and_log_hs_connectivity 2 1
+}
+
+test_iproute2_supp_or_ksft_skip()
+{
+ local flavor="$1"
+
+ if ! ip route help 2>&1 | grep -qo "${flavor}"; then
+ echo "SKIP: Missing SRv6 ${flavor} flavor support in iproute2"
+ exit "${ksft_skip}"
+ fi
+}
+
+test_kernel_supp_or_ksft_skip()
+{
+ local flavor="$1"
+ local test_netns
+
+ test_netns="kflv-$(mktemp -u XXXXXXXX)"
+
+ if ! ip netns add "${test_netns}"; then
+ echo "SKIP: Cannot set up netns to test kernel support for flavors"
+ exit "${ksft_skip}"
+ fi
+
+ if ! ip -netns "${test_netns}" link \
+ add "${DUMMY_DEVNAME}" type dummy; then
+ echo "SKIP: Cannot set up dummy dev to test kernel support for flavors"
+
+ ip netns del "${test_netns}"
+ exit "${ksft_skip}"
+ fi
+
+ if ! ip -netns "${test_netns}" link \
+ set "${DUMMY_DEVNAME}" up; then
+ echo "SKIP: Cannot activate dummy dev to test kernel support for flavors"
+
+ ip netns del "${test_netns}"
+ exit "${ksft_skip}"
+ fi
+
+ if ! ip -netns "${test_netns}" -6 route \
+ add "${IPv6_TESTS_ADDR}" encap seg6local \
+ action End flavors "${flavor}" dev "${DUMMY_DEVNAME}"; then
+ echo "SKIP: ${flavor} flavor not supported in kernel"
+
+ ip netns del "${test_netns}"
+ exit "${ksft_skip}"
+ fi
+
+ ip netns del "${test_netns}"
+}
+
+test_dummy_dev_or_ksft_skip()
+{
+ local test_netns
+
+ test_netns="dummy-$(mktemp -u XXXXXXXX)"
+
+ if ! ip netns add "${test_netns}"; then
+ echo "SKIP: Cannot set up netns for testing dummy dev support"
+ exit "${ksft_skip}"
+ fi
+
+ modprobe dummy &>/dev/null || true
+ if ! ip -netns "${test_netns}" link \
+ add "${DUMMY_DEVNAME}" type dummy; then
+ echo "SKIP: dummy dev not supported"
+
+ ip netns del "${test_netns}"
+ exit "${ksft_skip}"
+ fi
+
+ ip netns del "${test_netns}"
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "SKIP: Need root privileges"
+ exit "${ksft_skip}"
+fi
+
+# required programs to carry out this selftest
+test_command_or_ksft_skip ip
+test_command_or_ksft_skip ping
+test_command_or_ksft_skip sysctl
+test_command_or_ksft_skip grep
+test_command_or_ksft_skip cut
+test_command_or_ksft_skip sed
+test_command_or_ksft_skip sort
+test_command_or_ksft_skip xargs
+
+test_dummy_dev_or_ksft_skip
+test_iproute2_supp_or_ksft_skip psp
+test_kernel_supp_or_ksft_skip psp
+
+set -e
+trap cleanup EXIT
+
+setup
+set +e
+
+router_tests
+host2gateway_tests
+host_srv6_end_flv_psp_tests
+
+print_log_test_results
diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
index 00f837c9bc6c..46a02bbd31d0 100644
--- a/tools/testing/selftests/net/tcp_mmap.c
+++ b/tools/testing/selftests/net/tcp_mmap.c
@@ -137,7 +137,8 @@ static void *mmap_large_buffer(size_t need, size_t *allocated)
if (buffer == (void *)-1) {
sz = need;
buffer = mmap(NULL, sz, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE,
+ -1, 0);
if (buffer != (void *)-1)
fprintf(stderr, "MAP_HUGETLB attempt failed, look at /sys/kernel/mm/hugepages for optimal performance\n");
}
diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
index c9c4b9d65839..0a6359bed0b9 100755
--- a/tools/testing/selftests/net/udpgro_frglist.sh
+++ b/tools/testing/selftests/net/udpgro_frglist.sh
@@ -40,8 +40,8 @@ run_one() {
ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp
tc -n "${PEER_NS}" qdisc add dev veth1 clsact
- tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file ../bpf/nat6to4.o section schedcls/ingress6/nat_6 direct-action
- tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file ../bpf/nat6to4.o section schedcls/egress4/snat4 direct-action
+ tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file nat6to4.o section schedcls/ingress6/nat_6 direct-action
+ tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file nat6to4.o section schedcls/egress4/snat4 direct-action
echo ${rx_args}
ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
@@ -88,8 +88,8 @@ if [ ! -f ${BPF_FILE} ]; then
exit -1
fi
-if [ ! -f bpf/nat6to4.o ]; then
- echo "Missing nat6to4 helper. Build bpfnat6to4.o selftest first"
+if [ ! -f nat6to4.o ]; then
+ echo "Missing nat6to4 helper. Build bpf nat6to4.o selftest first"
exit -1
fi
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index 4058c7451e70..f35a924d4a30 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -214,11 +214,10 @@ static void do_verify_udp(const char *data, int len)
static int recv_msg(int fd, char *buf, int len, int *gso_size)
{
- char control[CMSG_SPACE(sizeof(uint16_t))] = {0};
+ char control[CMSG_SPACE(sizeof(int))] = {0};
struct msghdr msg = {0};
struct iovec iov = {0};
struct cmsghdr *cmsg;
- uint16_t *gsosizeptr;
int ret;
iov.iov_base = buf;
@@ -237,8 +236,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size)
cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level == SOL_UDP
&& cmsg->cmsg_type == UDP_GRO) {
- gsosizeptr = (uint16_t *) CMSG_DATA(cmsg);
- *gso_size = *gsosizeptr;
+ *gso_size = *(int *)CMSG_DATA(cmsg);
break;
}
}
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/rsvp.json b/tools/testing/selftests/tc-testing/tc-tests/filters/rsvp.json
deleted file mode 100644
index bdcbaa4c5663..000000000000
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/rsvp.json
+++ /dev/null
@@ -1,203 +0,0 @@
-[
- {
- "id": "2141",
- "name": "Add rsvp filter with tcp proto and specific IP address",
- "category": [
- "filter",
- "rsvp"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto tcp session 198.168.10.64",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*session 198.168.10.64 ipproto tcp",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "5267",
- "name": "Add rsvp filter with udp proto and specific IP address",
- "category": [
- "filter",
- "rsvp"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*session 1.1.1.1 ipproto udp",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "2819",
- "name": "Add rsvp filter with src ip and src port",
- "category": [
- "filter",
- "rsvp"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1 sender 2.2.2.2/5021 classid 1:1",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*flowid 1:1 session 1.1.1.1 ipproto udp sender 2.2.2.2/5021",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "c967",
- "name": "Add rsvp filter with tunnelid and continue action",
- "category": [
- "filter",
- "rsvp"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1 tunnelid 2 classid 1:1 action continue",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*flowid 1:1 session 1.1.1.1 ipproto udp tunnelid 2.*action order [0-9]+: gact action continue",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "5463",
- "name": "Add rsvp filter with tunnel and pipe action",
- "category": [
- "filter",
- "rsvp"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1 tunnel 2 skip 1 action pipe",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*tunnel 2 skip 1 session 1.1.1.1 ipproto udp.*action order [0-9]+: gact action pipe",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "2332",
- "name": "Add rsvp filter with miltiple actions",
- "category": [
- "filter",
- "rsvp"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 7 rsvp ipproto udp session 1.1.1.1 classid 1:1 action skbedit mark 7 pipe action gact drop",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*flowid 1:1 session 1.1.1.1 ipproto udp.*action order [0-9]+: skbedit mark 7 pipe.*action order [0-9]+: gact action drop",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "8879",
- "name": "Add rsvp filter with tunnel and skp flag",
- "category": [
- "filter",
- "rsvp"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1 tunnel 2 skip 1 action pipe",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*tunnel 2 skip 1 session 1.1.1.1 ipproto udp.*action order [0-9]+: gact action pipe",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "8261",
- "name": "List rsvp filters",
- "category": [
- "filter",
- "rsvp"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress",
- "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1/1234 classid 1:1",
- "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto tcp session 2.2.2.2/1234 classid 2:1"
- ],
- "cmdUnderTest": "$TC filter show dev $DEV1 parent ffff:",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh",
- "matchCount": "2",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "8989",
- "name": "Delete rsvp filter",
- "category": [
- "filter",
- "rsvp"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress",
- "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1/1234 tunnelid 9 classid 2:1"
- ],
- "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1/1234 tunnelid 9 classid 2:1",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*flowid 2:1 session 1.1.1.1/1234 ipproto udp tunnelid 9",
- "matchCount": "0",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- }
-]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json
deleted file mode 100644
index 44901db70376..000000000000
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json
+++ /dev/null
@@ -1,227 +0,0 @@
-[
- {
- "id": "8293",
- "name": "Add tcindex filter with default action",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1",
- "expExitCode": "0",
- "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
- "matchPattern": "^filter parent ffff: protocol ip pref 1 tcindex chain 0 handle 0x0001 classid 1:1",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "7281",
- "name": "Add tcindex filter with hash size and pass action",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 fall_through classid 1:1 action pass",
- "expExitCode": "0",
- "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
- "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "b294",
- "name": "Add tcindex filter with mask shift and reclassify action",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action reclassify",
- "expExitCode": "0",
- "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
- "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action reclassify",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "0532",
- "name": "Add tcindex filter with pass_on and continue actions",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 pass_on classid 1:1 action continue",
- "expExitCode": "0",
- "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
- "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action continue",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "d473",
- "name": "Add tcindex filter with pipe action",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action pipe",
- "expExitCode": "0",
- "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
- "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action pipe",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "2940",
- "name": "Add tcindex filter with miltiple actions",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress"
- ],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 7 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action skbedit mark 7 pipe action gact drop",
- "expExitCode": "0",
- "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 7 protocol ip tcindex",
- "matchPattern": "^filter parent ffff: protocol ip pref 7 tcindex.*handle 0x0001.*action.*skbedit.*mark 7 pipe.*action.*gact action drop",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "1893",
- "name": "List tcindex filters",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress",
- "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1",
- "$TC filter add dev $DEV1 parent ffff: handle 2 protocol ip prio 1 tcindex classid 1:1"
- ],
- "cmdUnderTest": "$TC filter show dev $DEV1 parent ffff:",
- "expExitCode": "0",
- "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "handle 0x000[0-9]+ classid 1:1",
- "matchCount": "2",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "2041",
- "name": "Change tcindex filter with pass action",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress",
- "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop"
- ],
- "cmdUnderTest": "$TC filter change dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action pass",
- "expExitCode": "0",
- "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
- "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "9203",
- "name": "Replace tcindex filter with pass action",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress",
- "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop"
- ],
- "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action pass",
- "expExitCode": "0",
- "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
- "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- },
- {
- "id": "7957",
- "name": "Delete tcindex filter with drop action",
- "category": [
- "filter",
- "tcindex"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$TC qdisc add dev $DEV1 ingress",
- "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop"
- ],
- "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop",
- "expExitCode": "0",
- "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
- "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action drop",
- "matchCount": "0",
- "teardown": [
- "$TC qdisc del dev $DEV1 ingress"
- ]
- }
-]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json
deleted file mode 100644
index f5bc8670a67d..000000000000
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json
+++ /dev/null
@@ -1,94 +0,0 @@
-[
- {
- "id": "7628",
- "name": "Create ATM with default setting",
- "category": [
- "qdisc",
- "atm"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root atm",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc atm 1: root refcnt",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "390a",
- "name": "Delete ATM with valid handle",
- "category": [
- "qdisc",
- "atm"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
- "$TC qdisc add dev $DUMMY handle 1: root atm"
- ],
- "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc atm 1: root refcnt",
- "matchCount": "0",
- "teardown": [
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "32a0",
- "name": "Show ATM class",
- "category": [
- "qdisc",
- "atm"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root atm",
- "expExitCode": "0",
- "verifyCmd": "$TC class show dev $DUMMY",
- "matchPattern": "class atm 1: parent 1:",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "6310",
- "name": "Dump ATM stats",
- "category": [
- "qdisc",
- "atm"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root atm",
- "expExitCode": "0",
- "verifyCmd": "$TC -s qdisc show dev $DUMMY",
- "matchPattern": "qdisc atm 1: root refcnt",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- }
-]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json
deleted file mode 100644
index 1ab21c83a122..000000000000
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json
+++ /dev/null
@@ -1,184 +0,0 @@
-[
- {
- "id": "3460",
- "name": "Create CBQ with default setting",
- "category": [
- "qdisc",
- "cbq"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "0592",
- "name": "Create CBQ with mpu",
- "category": [
- "qdisc",
- "cbq"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 mpu 1000",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "4684",
- "name": "Create CBQ with valid cell num",
- "category": [
- "qdisc",
- "cbq"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 cell 128",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "4345",
- "name": "Create CBQ with invalid cell num",
- "category": [
- "qdisc",
- "cbq"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 cell 100",
- "expExitCode": "1",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
- "matchCount": "0",
- "teardown": [
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "4525",
- "name": "Create CBQ with valid ewma",
- "category": [
- "qdisc",
- "cbq"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 ewma 16",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "6784",
- "name": "Create CBQ with invalid ewma",
- "category": [
- "qdisc",
- "cbq"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 ewma 128",
- "expExitCode": "1",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
- "matchCount": "0",
- "teardown": [
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "5468",
- "name": "Delete CBQ with handle",
- "category": [
- "qdisc",
- "cbq"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
- "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000"
- ],
- "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
- "matchCount": "0",
- "teardown": [
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "492a",
- "name": "Show CBQ class",
- "category": [
- "qdisc",
- "cbq"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000",
- "expExitCode": "0",
- "verifyCmd": "$TC class show dev $DUMMY",
- "matchPattern": "class cbq 1: root rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- }
-]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json
deleted file mode 100644
index c030795f9c37..000000000000
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json
+++ /dev/null
@@ -1,140 +0,0 @@
-[
- {
- "id": "6345",
- "name": "Create DSMARK with default setting",
- "category": [
- "qdisc",
- "dsmark"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "3462",
- "name": "Create DSMARK with default_index setting",
- "category": [
- "qdisc",
- "dsmark"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 default_index 512",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400 default_index 0x0200",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "ca95",
- "name": "Create DSMARK with set_tc_index flag",
- "category": [
- "qdisc",
- "dsmark"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 set_tc_index",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400 set_tc_index",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "a950",
- "name": "Create DSMARK with multiple setting",
- "category": [
- "qdisc",
- "dsmark"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 default_index 1024 set_tc_index",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400 default_index 0x0400 set_tc_index",
- "matchCount": "1",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "4092",
- "name": "Delete DSMARK with handle",
- "category": [
- "qdisc",
- "dsmark"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
- "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 default_index 1024"
- ],
- "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
- "expExitCode": "0",
- "verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400",
- "matchCount": "0",
- "teardown": [
- "$IP link del dev $DUMMY type dummy"
- ]
- },
- {
- "id": "5930",
- "name": "Show DSMARK class",
- "category": [
- "qdisc",
- "dsmark"
- ],
- "plugins": {
- "requires": "nsPlugin"
- },
- "setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
- ],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024",
- "expExitCode": "0",
- "verifyCmd": "$TC class show dev $DUMMY",
- "matchPattern": "class dsmark 1:",
- "matchCount": "0",
- "teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
- ]
- }
-]
diff --git a/tools/testing/vsock/Makefile b/tools/testing/vsock/Makefile
index f8293c6910c9..43a254f0e14d 100644
--- a/tools/testing/vsock/Makefile
+++ b/tools/testing/vsock/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-all: test
+all: test vsock_perf
test: vsock_test vsock_diag_test
vsock_test: vsock_test.o timeout.o control.o util.o
vsock_diag_test: vsock_diag_test.o timeout.o control.o util.o
+vsock_perf: vsock_perf.o
CFLAGS += -g -O2 -Werror -Wall -I. -I../../include -I../../../usr/include -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -D_GNU_SOURCE
.PHONY: all test clean
diff --git a/tools/testing/vsock/README b/tools/testing/vsock/README
index 4d5045e7d2c3..84ee217ba8ee 100644
--- a/tools/testing/vsock/README
+++ b/tools/testing/vsock/README
@@ -35,3 +35,37 @@ Invoke test binaries in both directions as follows:
--control-port=$GUEST_IP \
--control-port=1234 \
--peer-cid=3
+
+vsock_perf utility
+-------------------
+'vsock_perf' is a simple tool to measure vsock performance. It works in
+sender/receiver modes: sender connect to peer at the specified port and
+starts data transmission to the receiver. After data processing is done,
+it prints several metrics(see below).
+
+Usage:
+# run as sender
+# connect to CID 2, port 1234, send 1G of data, tx buf size is 1M
+./vsock_perf --sender 2 --port 1234 --bytes 1G --buf-size 1M
+
+Output:
+tx performance: A Gbits/s
+
+Output explanation:
+A is calculated as "number of bits to send" / "time in tx loop"
+
+# run as receiver
+# listen port 1234, rx buf size is 1M, socket buf size is 1G, SO_RCVLOWAT is 64K
+./vsock_perf --port 1234 --buf-size 1M --vsk-size 1G --rcvlowat 64K
+
+Output:
+rx performance: A Gbits/s
+total in 'read()': B sec
+POLLIN wakeups: C
+average in 'read()': D ns
+
+Output explanation:
+A is calculated as "number of received bits" / "time in rx loop".
+B is time, spent in 'read()' system call(excluding 'poll()')
+C is number of 'poll()' wake ups with POLLIN bit set.
+D is B / C, e.g. average amount of time, spent in single 'read()'.
diff --git a/tools/testing/vsock/control.c b/tools/testing/vsock/control.c
index 4874872fc5a3..d2deb4b15b94 100644
--- a/tools/testing/vsock/control.c
+++ b/tools/testing/vsock/control.c
@@ -141,6 +141,34 @@ void control_writeln(const char *str)
timeout_end();
}
+void control_writeulong(unsigned long value)
+{
+ char str[32];
+
+ if (snprintf(str, sizeof(str), "%lu", value) >= sizeof(str)) {
+ perror("snprintf");
+ exit(EXIT_FAILURE);
+ }
+
+ control_writeln(str);
+}
+
+unsigned long control_readulong(void)
+{
+ unsigned long value;
+ char *str;
+
+ str = control_readln();
+
+ if (!str)
+ exit(EXIT_FAILURE);
+
+ value = strtoul(str, NULL, 10);
+ free(str);
+
+ return value;
+}
+
/* Return the next line from the control socket (without the trailing newline).
*
* The program terminates if a timeout occurs.
diff --git a/tools/testing/vsock/control.h b/tools/testing/vsock/control.h
index 51814b4f9ac1..c1f77fdb2c7a 100644
--- a/tools/testing/vsock/control.h
+++ b/tools/testing/vsock/control.h
@@ -9,7 +9,9 @@ void control_init(const char *control_host, const char *control_port,
void control_cleanup(void);
void control_writeln(const char *str);
char *control_readln(void);
+unsigned long control_readulong(void);
void control_expectln(const char *str);
bool control_cmpln(char *line, const char *str, bool fail);
+void control_writeulong(unsigned long value);
#endif /* CONTROL_H */
diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c
index 2acbb7703c6a..01b636d3039a 100644
--- a/tools/testing/vsock/util.c
+++ b/tools/testing/vsock/util.c
@@ -395,3 +395,16 @@ void skip_test(struct test_case *test_cases, size_t test_cases_len,
test_cases[test_id].skip = true;
}
+
+unsigned long hash_djb2(const void *data, size_t len)
+{
+ unsigned long hash = 5381;
+ int i = 0;
+
+ while (i < len) {
+ hash = ((hash << 5) + hash) + ((unsigned char *)data)[i];
+ i++;
+ }
+
+ return hash;
+}
diff --git a/tools/testing/vsock/util.h b/tools/testing/vsock/util.h
index a3375ad2fb7f..fb99208a95ea 100644
--- a/tools/testing/vsock/util.h
+++ b/tools/testing/vsock/util.h
@@ -49,4 +49,5 @@ void run_tests(const struct test_case *test_cases,
void list_tests(const struct test_case *test_cases);
void skip_test(struct test_case *test_cases, size_t test_cases_len,
const char *test_id_str);
+unsigned long hash_djb2(const void *data, size_t len);
#endif /* UTIL_H */
diff --git a/tools/testing/vsock/vsock_perf.c b/tools/testing/vsock/vsock_perf.c
new file mode 100644
index 000000000000..a72520338f84
--- /dev/null
+++ b/tools/testing/vsock/vsock_perf.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vsock_perf - benchmark utility for vsock.
+ *
+ * Copyright (C) 2022 SberDevices.
+ *
+ * Author: Arseniy Krasnov <AVKrasnov@sberdevices.ru>
+ */
+#include <getopt.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <time.h>
+#include <stdint.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <linux/vm_sockets.h>
+
+#define DEFAULT_BUF_SIZE_BYTES (128 * 1024)
+#define DEFAULT_TO_SEND_BYTES (64 * 1024)
+#define DEFAULT_VSOCK_BUF_BYTES (256 * 1024)
+#define DEFAULT_RCVLOWAT_BYTES 1
+#define DEFAULT_PORT 1234
+
+#define BYTES_PER_GB (1024 * 1024 * 1024ULL)
+#define NSEC_PER_SEC (1000000000ULL)
+
+static unsigned int port = DEFAULT_PORT;
+static unsigned long buf_size_bytes = DEFAULT_BUF_SIZE_BYTES;
+static unsigned long vsock_buf_bytes = DEFAULT_VSOCK_BUF_BYTES;
+
+static void error(const char *s)
+{
+ perror(s);
+ exit(EXIT_FAILURE);
+}
+
+static time_t current_nsec(void)
+{
+ struct timespec ts;
+
+ if (clock_gettime(CLOCK_REALTIME, &ts))
+ error("clock_gettime");
+
+ return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec;
+}
+
+/* From lib/cmdline.c. */
+static unsigned long memparse(const char *ptr)
+{
+ char *endptr;
+
+ unsigned long long ret = strtoull(ptr, &endptr, 0);
+
+ switch (*endptr) {
+ case 'E':
+ case 'e':
+ ret <<= 10;
+ case 'P':
+ case 'p':
+ ret <<= 10;
+ case 'T':
+ case 't':
+ ret <<= 10;
+ case 'G':
+ case 'g':
+ ret <<= 10;
+ case 'M':
+ case 'm':
+ ret <<= 10;
+ case 'K':
+ case 'k':
+ ret <<= 10;
+ endptr++;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void vsock_increase_buf_size(int fd)
+{
+ if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
+ &vsock_buf_bytes, sizeof(vsock_buf_bytes)))
+ error("setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
+
+ if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
+ &vsock_buf_bytes, sizeof(vsock_buf_bytes)))
+ error("setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
+}
+
+static int vsock_connect(unsigned int cid, unsigned int port)
+{
+ union {
+ struct sockaddr sa;
+ struct sockaddr_vm svm;
+ } addr = {
+ .svm = {
+ .svm_family = AF_VSOCK,
+ .svm_port = port,
+ .svm_cid = cid,
+ },
+ };
+ int fd;
+
+ fd = socket(AF_VSOCK, SOCK_STREAM, 0);
+
+ if (fd < 0) {
+ perror("socket");
+ return -1;
+ }
+
+ if (connect(fd, &addr.sa, sizeof(addr.svm)) < 0) {
+ perror("connect");
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+static float get_gbps(unsigned long bits, time_t ns_delta)
+{
+ return ((float)bits / 1000000000ULL) /
+ ((float)ns_delta / NSEC_PER_SEC);
+}
+
+static void run_receiver(unsigned long rcvlowat_bytes)
+{
+ unsigned int read_cnt;
+ time_t rx_begin_ns;
+ time_t in_read_ns;
+ size_t total_recv;
+ int client_fd;
+ char *data;
+ int fd;
+ union {
+ struct sockaddr sa;
+ struct sockaddr_vm svm;
+ } addr = {
+ .svm = {
+ .svm_family = AF_VSOCK,
+ .svm_port = port,
+ .svm_cid = VMADDR_CID_ANY,
+ },
+ };
+ union {
+ struct sockaddr sa;
+ struct sockaddr_vm svm;
+ } clientaddr;
+
+ socklen_t clientaddr_len = sizeof(clientaddr.svm);
+
+ printf("Run as receiver\n");
+ printf("Listen port %u\n", port);
+ printf("RX buffer %lu bytes\n", buf_size_bytes);
+ printf("vsock buffer %lu bytes\n", vsock_buf_bytes);
+ printf("SO_RCVLOWAT %lu bytes\n", rcvlowat_bytes);
+
+ fd = socket(AF_VSOCK, SOCK_STREAM, 0);
+
+ if (fd < 0)
+ error("socket");
+
+ if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0)
+ error("bind");
+
+ if (listen(fd, 1) < 0)
+ error("listen");
+
+ client_fd = accept(fd, &clientaddr.sa, &clientaddr_len);
+
+ if (client_fd < 0)
+ error("accept");
+
+ vsock_increase_buf_size(client_fd);
+
+ if (setsockopt(client_fd, SOL_SOCKET, SO_RCVLOWAT,
+ &rcvlowat_bytes,
+ sizeof(rcvlowat_bytes)))
+ error("setsockopt(SO_RCVLOWAT)");
+
+ data = malloc(buf_size_bytes);
+
+ if (!data) {
+ fprintf(stderr, "'malloc()' failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ read_cnt = 0;
+ in_read_ns = 0;
+ total_recv = 0;
+ rx_begin_ns = current_nsec();
+
+ while (1) {
+ struct pollfd fds = { 0 };
+
+ fds.fd = client_fd;
+ fds.events = POLLIN | POLLERR |
+ POLLHUP | POLLRDHUP;
+
+ if (poll(&fds, 1, -1) < 0)
+ error("poll");
+
+ if (fds.revents & POLLERR) {
+ fprintf(stderr, "'poll()' error\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (fds.revents & POLLIN) {
+ ssize_t bytes_read;
+ time_t t;
+
+ t = current_nsec();
+ bytes_read = read(fds.fd, data, buf_size_bytes);
+ in_read_ns += (current_nsec() - t);
+ read_cnt++;
+
+ if (!bytes_read)
+ break;
+
+ if (bytes_read < 0) {
+ perror("read");
+ exit(EXIT_FAILURE);
+ }
+
+ total_recv += bytes_read;
+ }
+
+ if (fds.revents & (POLLHUP | POLLRDHUP))
+ break;
+ }
+
+ printf("total bytes received: %zu\n", total_recv);
+ printf("rx performance: %f Gbits/s\n",
+ get_gbps(total_recv * 8, current_nsec() - rx_begin_ns));
+ printf("total time in 'read()': %f sec\n", (float)in_read_ns / NSEC_PER_SEC);
+ printf("average time in 'read()': %f ns\n", (float)in_read_ns / read_cnt);
+ printf("POLLIN wakeups: %i\n", read_cnt);
+
+ free(data);
+ close(client_fd);
+ close(fd);
+}
+
+static void run_sender(int peer_cid, unsigned long to_send_bytes)
+{
+ time_t tx_begin_ns;
+ time_t tx_total_ns;
+ size_t total_send;
+ void *data;
+ int fd;
+
+ printf("Run as sender\n");
+ printf("Connect to %i:%u\n", peer_cid, port);
+ printf("Send %lu bytes\n", to_send_bytes);
+ printf("TX buffer %lu bytes\n", buf_size_bytes);
+
+ fd = vsock_connect(peer_cid, port);
+
+ if (fd < 0)
+ exit(EXIT_FAILURE);
+
+ data = malloc(buf_size_bytes);
+
+ if (!data) {
+ fprintf(stderr, "'malloc()' failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(data, 0, buf_size_bytes);
+ total_send = 0;
+ tx_begin_ns = current_nsec();
+
+ while (total_send < to_send_bytes) {
+ ssize_t sent;
+
+ sent = write(fd, data, buf_size_bytes);
+
+ if (sent <= 0)
+ error("write");
+
+ total_send += sent;
+ }
+
+ tx_total_ns = current_nsec() - tx_begin_ns;
+
+ printf("total bytes sent: %zu\n", total_send);
+ printf("tx performance: %f Gbits/s\n",
+ get_gbps(total_send * 8, tx_total_ns));
+ printf("total time in 'write()': %f sec\n",
+ (float)tx_total_ns / NSEC_PER_SEC);
+
+ close(fd);
+ free(data);
+}
+
+static const char optstring[] = "";
+static const struct option longopts[] = {
+ {
+ .name = "help",
+ .has_arg = no_argument,
+ .val = 'H',
+ },
+ {
+ .name = "sender",
+ .has_arg = required_argument,
+ .val = 'S',
+ },
+ {
+ .name = "port",
+ .has_arg = required_argument,
+ .val = 'P',
+ },
+ {
+ .name = "bytes",
+ .has_arg = required_argument,
+ .val = 'M',
+ },
+ {
+ .name = "buf-size",
+ .has_arg = required_argument,
+ .val = 'B',
+ },
+ {
+ .name = "vsk-size",
+ .has_arg = required_argument,
+ .val = 'V',
+ },
+ {
+ .name = "rcvlowat",
+ .has_arg = required_argument,
+ .val = 'R',
+ },
+ {},
+};
+
+static void usage(void)
+{
+ printf("Usage: ./vsock_perf [--help] [options]\n"
+ "\n"
+ "This is benchmarking utility, to test vsock performance.\n"
+ "It runs in two modes: sender or receiver. In sender mode, it\n"
+ "connects to the specified CID and starts data transmission.\n"
+ "\n"
+ "Options:\n"
+ " --help This message\n"
+ " --sender <cid> Sender mode (receiver default)\n"
+ " <cid> of the receiver to connect to\n"
+ " --port <port> Port (default %d)\n"
+ " --bytes <bytes>KMG Bytes to send (default %d)\n"
+ " --buf-size <bytes>KMG Data buffer size (default %d). In sender mode\n"
+ " it is the buffer size, passed to 'write()'. In\n"
+ " receiver mode it is the buffer size passed to 'read()'.\n"
+ " --vsk-size <bytes>KMG Socket buffer size (default %d)\n"
+ " --rcvlowat <bytes>KMG SO_RCVLOWAT value (default %d)\n"
+ "\n", DEFAULT_PORT, DEFAULT_TO_SEND_BYTES,
+ DEFAULT_BUF_SIZE_BYTES, DEFAULT_VSOCK_BUF_BYTES,
+ DEFAULT_RCVLOWAT_BYTES);
+ exit(EXIT_FAILURE);
+}
+
+static long strtolx(const char *arg)
+{
+ long value;
+ char *end;
+
+ value = strtol(arg, &end, 10);
+
+ if (end != arg + strlen(arg))
+ usage();
+
+ return value;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned long to_send_bytes = DEFAULT_TO_SEND_BYTES;
+ unsigned long rcvlowat_bytes = DEFAULT_RCVLOWAT_BYTES;
+ int peer_cid = -1;
+ bool sender = false;
+
+ while (1) {
+ int opt = getopt_long(argc, argv, optstring, longopts, NULL);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'V': /* Peer buffer size. */
+ vsock_buf_bytes = memparse(optarg);
+ break;
+ case 'R': /* SO_RCVLOWAT value. */
+ rcvlowat_bytes = memparse(optarg);
+ break;
+ case 'P': /* Port to connect to. */
+ port = strtolx(optarg);
+ break;
+ case 'M': /* Bytes to send. */
+ to_send_bytes = memparse(optarg);
+ break;
+ case 'B': /* Size of rx/tx buffer. */
+ buf_size_bytes = memparse(optarg);
+ break;
+ case 'S': /* Sender mode. CID to connect to. */
+ peer_cid = strtolx(optarg);
+ sender = true;
+ break;
+ case 'H': /* Help. */
+ usage();
+ break;
+ default:
+ usage();
+ }
+ }
+
+ if (!sender)
+ run_receiver(rcvlowat_bytes);
+ else
+ run_sender(peer_cid, to_send_bytes);
+
+ return 0;
+}
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index bb6d691cb30d..67e9f9df3a8c 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -284,10 +284,14 @@ static void test_stream_msg_peek_server(const struct test_opts *opts)
close(fd);
}
-#define MESSAGES_CNT 7
-#define MSG_EOR_IDX (MESSAGES_CNT / 2)
+#define SOCK_BUF_SIZE (2 * 1024 * 1024)
+#define MAX_MSG_SIZE (32 * 1024)
+
static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
{
+ unsigned long curr_hash;
+ int page_size;
+ int msg_count;
int fd;
fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
@@ -296,18 +300,79 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
exit(EXIT_FAILURE);
}
- /* Send several messages, one with MSG_EOR flag */
- for (int i = 0; i < MESSAGES_CNT; i++)
- send_byte(fd, 1, (i == MSG_EOR_IDX) ? MSG_EOR : 0);
+ /* Wait, until receiver sets buffer size. */
+ control_expectln("SRVREADY");
+
+ curr_hash = 0;
+ page_size = getpagesize();
+ msg_count = SOCK_BUF_SIZE / MAX_MSG_SIZE;
+
+ for (int i = 0; i < msg_count; i++) {
+ ssize_t send_size;
+ size_t buf_size;
+ int flags;
+ void *buf;
+
+ /* Use "small" buffers and "big" buffers. */
+ if (i & 1)
+ buf_size = page_size +
+ (rand() % (MAX_MSG_SIZE - page_size));
+ else
+ buf_size = 1 + (rand() % page_size);
+
+ buf = malloc(buf_size);
+
+ if (!buf) {
+ perror("malloc");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(buf, rand() & 0xff, buf_size);
+ /* Set at least one MSG_EOR + some random. */
+ if (i == (msg_count / 2) || (rand() & 1)) {
+ flags = MSG_EOR;
+ curr_hash++;
+ } else {
+ flags = 0;
+ }
+
+ send_size = send(fd, buf, buf_size, flags);
+
+ if (send_size < 0) {
+ perror("send");
+ exit(EXIT_FAILURE);
+ }
+
+ if (send_size != buf_size) {
+ fprintf(stderr, "Invalid send size\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Hash sum is computed at both client and server in
+ * the same way:
+ * H += hash('message data')
+ * Such hash "controls" both data integrity and message
+ * bounds. After data exchange, both sums are compared
+ * using control socket, and if message bounds wasn't
+ * broken - two values must be equal.
+ */
+ curr_hash += hash_djb2(buf, buf_size);
+ free(buf);
+ }
control_writeln("SENDDONE");
+ control_writeulong(curr_hash);
close(fd);
}
static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
{
+ unsigned long sock_buf_size;
+ unsigned long remote_hash;
+ unsigned long curr_hash;
int fd;
- char buf[16];
+ char buf[MAX_MSG_SIZE];
struct msghdr msg = {0};
struct iovec iov = {0};
@@ -317,25 +382,57 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
exit(EXIT_FAILURE);
}
+ sock_buf_size = SOCK_BUF_SIZE;
+
+ if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
+ &sock_buf_size, sizeof(sock_buf_size))) {
+ perror("setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
+ exit(EXIT_FAILURE);
+ }
+
+ if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
+ &sock_buf_size, sizeof(sock_buf_size))) {
+ perror("setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Ready to receive data. */
+ control_writeln("SRVREADY");
+ /* Wait, until peer sends whole data. */
control_expectln("SENDDONE");
iov.iov_base = buf;
iov.iov_len = sizeof(buf);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
- for (int i = 0; i < MESSAGES_CNT; i++) {
- if (recvmsg(fd, &msg, 0) != 1) {
- perror("message bound violated");
- exit(EXIT_FAILURE);
- }
+ curr_hash = 0;
+
+ while (1) {
+ ssize_t recv_size;
- if ((i == MSG_EOR_IDX) ^ !!(msg.msg_flags & MSG_EOR)) {
- perror("MSG_EOR");
+ recv_size = recvmsg(fd, &msg, 0);
+
+ if (!recv_size)
+ break;
+
+ if (recv_size < 0) {
+ perror("recvmsg");
exit(EXIT_FAILURE);
}
+
+ if (msg.msg_flags & MSG_EOR)
+ curr_hash++;
+
+ curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size);
}
close(fd);
+ remote_hash = control_readulong();
+
+ if (curr_hash != remote_hash) {
+ fprintf(stderr, "Message bounds broken\n");
+ exit(EXIT_FAILURE);
+ }
}
#define MESSAGE_TRUNC_SZ 32
@@ -427,7 +524,7 @@ static void test_seqpacket_timeout_client(const struct test_opts *opts)
tv.tv_usec = 0;
if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (void *)&tv, sizeof(tv)) == -1) {
- perror("setsockopt 'SO_RCVTIMEO'");
+ perror("setsockopt(SO_RCVTIMEO)");
exit(EXIT_FAILURE);
}
@@ -472,6 +569,70 @@ static void test_seqpacket_timeout_server(const struct test_opts *opts)
close(fd);
}
+static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
+{
+ unsigned long sock_buf_size;
+ ssize_t send_size;
+ socklen_t len;
+ void *data;
+ int fd;
+
+ len = sizeof(sock_buf_size);
+
+ fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ if (fd < 0) {
+ perror("connect");
+ exit(EXIT_FAILURE);
+ }
+
+ if (getsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
+ &sock_buf_size, &len)) {
+ perror("getsockopt");
+ exit(EXIT_FAILURE);
+ }
+
+ sock_buf_size++;
+
+ data = malloc(sock_buf_size);
+ if (!data) {
+ perror("malloc");
+ exit(EXIT_FAILURE);
+ }
+
+ send_size = send(fd, data, sock_buf_size, 0);
+ if (send_size != -1) {
+ fprintf(stderr, "expected 'send(2)' failure, got %zi\n",
+ send_size);
+ exit(EXIT_FAILURE);
+ }
+
+ if (errno != EMSGSIZE) {
+ fprintf(stderr, "expected EMSGSIZE in 'errno', got %i\n",
+ errno);
+ exit(EXIT_FAILURE);
+ }
+
+ control_writeln("CLISENT");
+
+ free(data);
+ close(fd);
+}
+
+static void test_seqpacket_bigmsg_server(const struct test_opts *opts)
+{
+ int fd;
+
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ if (fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ control_expectln("CLISENT");
+
+ close(fd);
+}
+
#define BUF_PATTERN_1 'a'
#define BUF_PATTERN_2 'b'
@@ -644,7 +805,7 @@ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
if (setsockopt(fd, SOL_SOCKET, SO_RCVLOWAT,
&lowat_val, sizeof(lowat_val))) {
- perror("setsockopt");
+ perror("setsockopt(SO_RCVLOWAT)");
exit(EXIT_FAILURE);
}
@@ -754,6 +915,11 @@ static struct test_case test_cases[] = {
.run_client = test_stream_poll_rcvlowat_client,
.run_server = test_stream_poll_rcvlowat_server,
},
+ {
+ .name = "SOCK_SEQPACKET big message",
+ .run_client = test_seqpacket_bigmsg_client,
+ .run_server = test_seqpacket_bigmsg_server,
+ },
{},
};
@@ -837,6 +1003,7 @@ int main(int argc, char **argv)
.peer_cid = VMADDR_CID_ANY,
};
+ srand(time(NULL));
init_signals();
for (;;) {